pipeline: remove code for legacy model

This commit is contained in:
Kevin Turner 2023-03-09 18:04:11 -08:00
parent 9d339e94f2
commit 1a829bb998
2 changed files with 0 additions and 36 deletions

View File

@ -495,18 +495,6 @@ class Generate:
torch.cuda.reset_peak_memory_stats()
results = list()
init_image = None
mask_image = None
try:
if (
self.free_gpu_mem
and self.model.cond_stage_model.device != self.model.device
):
self.model.cond_stage_model.device = self.model.device
self.model.cond_stage_model.to(self.model.device)
except AttributeError:
pass
try:
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(

View File

@ -54,16 +54,6 @@ class PipelineIntermediateState:
attention_map_saver: Optional[AttentionMapSaver] = None
# copied from configs/stable-diffusion/v1-inference.yaml
_default_personalization_config_params = dict(
placeholder_strings=["*"],
initializer_wods=["sculpture"],
per_image_tokens=False,
num_vectors_per_token=1,
progressive_words=False,
)
@dataclass
class AddsMaskLatents:
"""Add the channels required for inpainting model input.
@ -917,20 +907,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
device=self._model_group.device_for(self.unet),
)
@property
def cond_stage_model(self):
return self.embeddings_provider
@torch.inference_mode()
def _tokenize(self, prompt: Union[str, List[str]]):
return self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
@property
def channels(self) -> int:
"""Compatible with DiffusionWrapper"""