move tokenizer into cpu cache as well

This commit is contained in:
Lincoln Stein
2022-10-12 03:03:29 -04:00
parent 7c06849c4d
commit b537e92789
2 changed files with 9 additions and 3 deletions

View File

@ -678,8 +678,6 @@ class Generate:
self.embedding_path, self.precision == 'float32' or self.precision == 'autocast'
)
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
self.model.cond_stage_model.device = self.device
self._set_sampler()
for m in self.model.modules():