mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Fix Inpainting Issues (#3744)
- fix: Inpaint not working with some schedulers: Resolves #3732 - fix: LoRA's not working at all while inpainting.
This commit is contained in:
commit
d4ec8873f7
@ -154,40 +154,42 @@ class InpaintInvocation(BaseInvocation):
|
|||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def load_model_old_way(self, context, scheduler):
|
def load_model_old_way(self, context, scheduler):
|
||||||
|
def _lora_loader():
|
||||||
|
for lora in self.unet.loras:
|
||||||
|
lora_info = context.services.model_manager.get_model(
|
||||||
|
**lora.dict(exclude={"weight"}))
|
||||||
|
yield (lora_info.context.model, lora.weight)
|
||||||
|
del lora_info
|
||||||
|
return
|
||||||
|
|
||||||
unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
|
unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
|
||||||
vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
|
vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
|
||||||
|
|
||||||
#unet = unet_info.context.model
|
with vae_info as vae,\
|
||||||
#vae = vae_info.context.model
|
ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),\
|
||||||
|
unet_info as unet:
|
||||||
|
|
||||||
with ExitStack() as stack:
|
device = context.services.model_manager.mgr.cache.execution_device
|
||||||
loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras]
|
dtype = context.services.model_manager.mgr.cache.precision
|
||||||
|
|
||||||
with vae_info as vae,\
|
pipeline = StableDiffusionGeneratorPipeline(
|
||||||
unet_info as unet,\
|
vae=vae,
|
||||||
ModelPatcher.apply_lora_unet(unet, loras):
|
text_encoder=None,
|
||||||
|
tokenizer=None,
|
||||||
|
unet=unet,
|
||||||
|
scheduler=scheduler,
|
||||||
|
safety_checker=None,
|
||||||
|
feature_extractor=None,
|
||||||
|
requires_safety_checker=False,
|
||||||
|
precision="float16" if dtype == torch.float16 else "float32",
|
||||||
|
execution_device=device,
|
||||||
|
)
|
||||||
|
|
||||||
device = context.services.model_manager.mgr.cache.execution_device
|
yield OldModelInfo(
|
||||||
dtype = context.services.model_manager.mgr.cache.precision
|
name=self.unet.unet.model_name,
|
||||||
|
hash="<NO-HASH>",
|
||||||
pipeline = StableDiffusionGeneratorPipeline(
|
model=pipeline,
|
||||||
vae=vae,
|
)
|
||||||
text_encoder=None,
|
|
||||||
tokenizer=None,
|
|
||||||
unet=unet,
|
|
||||||
scheduler=scheduler,
|
|
||||||
safety_checker=None,
|
|
||||||
feature_extractor=None,
|
|
||||||
requires_safety_checker=False,
|
|
||||||
precision="float16" if dtype == torch.float16 else "float32",
|
|
||||||
execution_device=device,
|
|
||||||
)
|
|
||||||
|
|
||||||
yield OldModelInfo(
|
|
||||||
name=self.unet.unet.model_name,
|
|
||||||
hash="<NO-HASH>",
|
|
||||||
model=pipeline,
|
|
||||||
)
|
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
image = (
|
image = (
|
||||||
@ -226,21 +228,21 @@ class InpaintInvocation(BaseInvocation):
|
|||||||
), # Shorthand for passing all of the parameters above manually
|
), # Shorthand for passing all of the parameters above manually
|
||||||
)
|
)
|
||||||
|
|
||||||
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
# each time it is called. We only need the first one.
|
# each time it is called. We only need the first one.
|
||||||
generator_output = next(outputs)
|
generator_output = next(outputs)
|
||||||
|
|
||||||
image_dto = context.services.images.create(
|
image_dto = context.services.images.create(
|
||||||
image=generator_output.image,
|
image=generator_output.image,
|
||||||
image_origin=ResourceOrigin.INTERNAL,
|
image_origin=ResourceOrigin.INTERNAL,
|
||||||
image_category=ImageCategory.GENERAL,
|
image_category=ImageCategory.GENERAL,
|
||||||
session_id=context.graph_execution_state_id,
|
session_id=context.graph_execution_state_id,
|
||||||
node_id=self.id,
|
node_id=self.id,
|
||||||
is_intermediate=self.is_intermediate,
|
is_intermediate=self.is_intermediate,
|
||||||
)
|
)
|
||||||
|
|
||||||
return ImageOutput(
|
return ImageOutput(
|
||||||
image=ImageField(image_name=image_dto.image_name),
|
image=ImageField(image_name=image_dto.image_name),
|
||||||
width=image_dto.width,
|
width=image_dto.width,
|
||||||
height=image_dto.height,
|
height=image_dto.height,
|
||||||
)
|
)
|
||||||
|
@ -127,7 +127,7 @@ class AddsMaskGuidance:
|
|||||||
|
|
||||||
def _t_for_field(self, field_name: str, t):
|
def _t_for_field(self, field_name: str, t):
|
||||||
if field_name == "pred_original_sample":
|
if field_name == "pred_original_sample":
|
||||||
return torch.zeros_like(t, dtype=t.dtype) # it represents t=0
|
return self.scheduler.timesteps[-1]
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def apply_mask(self, latents: torch.Tensor, t) -> torch.Tensor:
|
def apply_mask(self, latents: torch.Tensor, t) -> torch.Tensor:
|
||||||
|
Loading…
Reference in New Issue
Block a user