diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 6cb9a73976..59b76da01b 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -34,7 +34,7 @@ from .image import ImageOutput, build_image_output, PILInvocationConfig class ControlField(BaseModel): image: ImageField = Field(default=None, description="processed image") control_model: Optional[str] = Field(default=None, description="control model used") - control_weight: Optional[float] = Field(default=None, description="weight given to controlnet") + control_weight: Optional[float] = Field(default=1, description="weight given to controlnet") begin_step_percent: float = Field(default=0, ge=0, le=1, description="% of total steps at which controlnet is first applied") end_step_percent: float = Field(default=1, ge=0, le=1, @@ -61,7 +61,7 @@ class ControlNetInvocation(BaseInvocation): # Inputs image: ImageField = Field(default=None, description="image to process") control_model: str = Field(default=None, description="control model to use") - control_weight: float = Field(default=0.5, ge=0, le=1, description="weight given to controlnet") + control_weight: float = Field(default=1.0, ge=0, le=1, description="weight given to controlnet") # TODO: add support in backend core for begin_step_percent, end_step_percent, guess_mode begin_step_percent: float = Field(default=0, ge=0, le=1, description="% of total steps at which controlnet is first applied") diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index ad4adc6fbb..65f2bf22a9 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -260,24 +260,25 @@ class TextToLatentsInvocation(BaseInvocation): conditioning_data = self.get_conditioning_data(context, model) # print("type of control input: ", type(self.control)) if self.control is None: - print("control input is None") + # print("control input is None") control_list = None elif isinstance(self.control, list) and len(self.control) == 0: - print("control input is empty list") + # print("control input is empty list") control_list = None elif isinstance(self.control, ControlField): - print("control input is ControlField") + # print("control input is ControlField") control_list = [self.control] elif isinstance(self.control, list) and len(self.control) > 0 and isinstance(self.control[0], ControlField): - print("control input is list[ControlField]") + # print("control input is list[ControlField]") control_list = self.control else: - print("input control is unrecognized:", type(self.control)) + #print("input control is unrecognized:", type(self.control)) control_list = None #if (self.control is None or (isinstance(self.control, list) and len(self.control)==0)): if (control_list is None): control_models = None + control_data = None # from above handling, any control that is not None should now be of type list[ControlField] else: # FIXME: add checks to skip entry if model or image is None diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 5fe8289741..ec2902e4d6 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -670,7 +670,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): else: latent_control_input = latent_model_input # control_data should be type List[ControlNetData] - # this loop covers both ControlNet (one ControlNetData in list) # and MultiControlNet (multiple ControlNetData in list) for i, control_datum in enumerate(control_data):