diff --git a/docker/.env.sample b/docker/.env.sample index c0a56402fc..98ad307c04 100644 --- a/docker/.env.sample +++ b/docker/.env.sample @@ -11,5 +11,5 @@ INVOKEAI_ROOT= # HUGGING_FACE_HUB_TOKEN= ## optional variables specific to the docker setup. -# GPU_DRIVER=cuda -# CONTAINER_UID=1000 \ No newline at end of file +# GPU_DRIVER=cuda # or rocm +# CONTAINER_UID=1000 diff --git a/docker/Dockerfile b/docker/Dockerfile index 73852ec66e..6aa6a43a1a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai ENV VIRTUAL_ENV=/opt/venv/invokeai ENV PATH="$VIRTUAL_ENV/bin:$PATH" -ARG TORCH_VERSION=2.0.1 -ARG TORCHVISION_VERSION=0.15.2 +ARG TORCH_VERSION=2.1.0 +ARG TORCHVISION_VERSION=0.16 ARG GPU_DRIVER=cuda ARG TARGETPLATFORM="linux/amd64" # unused but available @@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ elif [ "$GPU_DRIVER" = "rocm" ]; then \ - extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \ + extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \ else \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \ fi &&\ diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 85deac428e..f7e92d6bf5 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -15,6 +15,10 @@ services: - driver: nvidia count: 1 capabilities: [gpu] + # For AMD support, comment out the deploy section above and uncomment the devices section below: + #devices: + # - /dev/kfd:/dev/kfd + # - /dev/dri:/dev/dri build: context: .. dockerfile: docker/Dockerfile diff --git a/docker/run.sh b/docker/run.sh index 0306c4ddab..4b595b06df 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -7,5 +7,5 @@ set -e SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") cd "$SCRIPTDIR" || exit 1 -docker compose up --build -d +docker compose up -d docker compose logs -f diff --git a/installer/lib/installer.py b/installer/lib/installer.py index bf48e3b06d..f59a358b6c 100644 --- a/installer/lib/installer.py +++ b/installer/lib/installer.py @@ -460,10 +460,10 @@ def get_torch_source() -> (Union[str, None], str): url = "https://download.pytorch.org/whl/cpu" if device == "cuda": - url = "https://download.pytorch.org/whl/cu118" + url = "https://download.pytorch.org/whl/cu121" optional_modules = "[xformers,onnx-cuda]" if device == "cuda_and_dml": - url = "https://download.pytorch.org/whl/cu118" + url = "https://download.pytorch.org/whl/cu121" optional_modules = "[xformers,onnx-directml]" # in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13 diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 945df4bd83..c5347bee38 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -92,6 +92,10 @@ class FieldDescriptions: inclusive_low = "The inclusive low value" exclusive_high = "The exclusive high value" decimal_places = "The number of decimal places to round to" + freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' + freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' + freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features." + freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features." class Input(str, Enum): diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 56c13e6816..fac680c78b 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -710,6 +710,8 @@ class DenoiseLatentsInvocation(BaseInvocation): ) with ( ExitStack() as exit_stack, + ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()), + ModelPatcher.apply_freeu(unet_info.context.model, self.unet.freeu_config), set_seamless(unet_info.context.model, self.unet.seamless_axes), unet_info as unet, # Apply the LoRA after unet has been moved to its target device for faster patching. diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index 3ed325802e..ad676e824c 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -182,8 +182,8 @@ class IntegerMathInvocation(BaseInvocation): operation: INTEGER_OPERATIONS = InputField( default="ADD", description="The operation to perform", ui_choice_labels=INTEGER_OPERATIONS_LABELS ) - a: int = InputField(default=0, description=FieldDescriptions.num_1) - b: int = InputField(default=0, description=FieldDescriptions.num_2) + a: int = InputField(default=1, description=FieldDescriptions.num_1) + b: int = InputField(default=1, description=FieldDescriptions.num_2) @field_validator("b") def no_unrepresentable_results(cls, v: int, info: ValidationInfo): @@ -256,8 +256,8 @@ class FloatMathInvocation(BaseInvocation): operation: FLOAT_OPERATIONS = InputField( default="ADD", description="The operation to perform", ui_choice_labels=FLOAT_OPERATIONS_LABELS ) - a: float = InputField(default=0, description=FieldDescriptions.num_1) - b: float = InputField(default=0, description=FieldDescriptions.num_2) + a: float = InputField(default=1, description=FieldDescriptions.num_1) + b: float = InputField(default=1, description=FieldDescriptions.num_2) @field_validator("b") def no_unrepresentable_results(cls, v: float, info: ValidationInfo): diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index dfa1075d6e..4c57996e67 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -17,6 +17,22 @@ from .baseinvocation import ( invocation_output, ) +# TODO: Permanent fix for this +# from invokeai.app.invocations.shared import FreeUConfig + + +class FreeUConfig(BaseModel): + """ + Configuration for the FreeU hyperparameters. + - https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu + - https://github.com/ChenyangSi/FreeU + """ + + s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1) + s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2) + b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1) + b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2) + class ModelInfo(BaseModel): model_name: str = Field(description="Info to load submodel") @@ -36,6 +52,7 @@ class UNetField(BaseModel): scheduler: ModelInfo = Field(description="Info to load scheduler submodel") loras: List[LoraInfo] = Field(description="Loras to apply on model loading") seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') + freeu_config: Optional[FreeUConfig] = Field(default=None, description="FreeU configuration") class ClipField(BaseModel): @@ -51,15 +68,34 @@ class VaeField(BaseModel): seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless') -@invocation_output("model_loader_output") -class ModelLoaderOutput(BaseInvocationOutput): - """Model loader output""" +@invocation_output("unet_output") +class UNetOutput(BaseInvocationOutput): + """Base class for invocations that output a UNet field""" unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet") - clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP") + + +@invocation_output("vae_output") +class VAEOutput(BaseInvocationOutput): + """Base class for invocations that output a VAE field""" + vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") +@invocation_output("clip_output") +class CLIPOutput(BaseInvocationOutput): + """Base class for invocations that output a CLIP field""" + + clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP") + + +@invocation_output("model_loader_output") +class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput): + """Model loader output""" + + pass + + class MainModelField(BaseModel): """Main model field""" @@ -366,13 +402,6 @@ class VAEModelField(BaseModel): model_config = ConfigDict(protected_namespaces=()) -@invocation_output("vae_loader_output") -class VaeLoaderOutput(BaseInvocationOutput): - """VAE output""" - - vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") - - @invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.0") class VaeLoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" @@ -384,7 +413,7 @@ class VaeLoaderInvocation(BaseInvocation): title="VAE", ) - def invoke(self, context: InvocationContext) -> VaeLoaderOutput: + def invoke(self, context: InvocationContext) -> VAEOutput: base_model = self.vae_model.base_model model_name = self.vae_model.model_name model_type = ModelType.Vae @@ -395,7 +424,7 @@ class VaeLoaderInvocation(BaseInvocation): model_type=model_type, ): raise Exception(f"Unkown vae name: {model_name}!") - return VaeLoaderOutput( + return VAEOutput( vae=VaeField( vae=ModelInfo( model_name=model_name, @@ -457,3 +486,24 @@ class SeamlessModeInvocation(BaseInvocation): vae.seamless_axes = seamless_axes_list return SeamlessModeOutput(unet=unet, vae=vae) + + +@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.0") +class FreeUInvocation(BaseInvocation): + """ + Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2): + + SD1.5: 1.2/1.4/0.9/0.2, + SD2: 1.1/1.2/0.9/0.2, + SDXL: 1.1/1.2/0.6/0.4, + """ + + unet: UNetField = InputField(description=FieldDescriptions.unet, input=Input.Connection, title="UNet") + b1: float = InputField(default=1.2, ge=-1, le=3, description=FieldDescriptions.freeu_b1) + b2: float = InputField(default=1.4, ge=-1, le=3, description=FieldDescriptions.freeu_b2) + s1: float = InputField(default=0.9, ge=-1, le=3, description=FieldDescriptions.freeu_s1) + s2: float = InputField(default=0.2, ge=-1, le=3, description=FieldDescriptions.freeu_s2) + + def invoke(self, context: InvocationContext) -> UNetOutput: + self.unet.freeu_config = FreeUConfig(s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2) + return UNetOutput(unet=self.unet) diff --git a/invokeai/app/invocations/shared.py b/invokeai/app/invocations/shared.py new file mode 100644 index 0000000000..db742a3433 --- /dev/null +++ b/invokeai/app/invocations/shared.py @@ -0,0 +1,16 @@ +from pydantic import BaseModel, Field + +from invokeai.app.invocations.baseinvocation import FieldDescriptions + + +class FreeUConfig(BaseModel): + """ + Configuration for the FreeU hyperparameters. + - https://huggingface.co/docs/diffusers/main/en/using-diffusers/freeu + - https://github.com/ChenyangSi/FreeU + """ + + s1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s1) + s2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_s2) + b1: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b1) + b2: float = Field(ge=-1, le=3, description=FieldDescriptions.freeu_b2) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_management/lora.py index 2a0e465e03..18eff002b4 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_management/lora.py @@ -12,6 +12,8 @@ from diffusers.models import UNet2DConditionModel from safetensors.torch import load_file from transformers import CLIPTextModel, CLIPTokenizer +from invokeai.app.invocations.shared import FreeUConfig + from .models.lora import LoRAModel """ @@ -240,6 +242,25 @@ class ModelPatcher: while len(skipped_layers) > 0: text_encoder.text_model.encoder.layers.append(skipped_layers.pop()) + @classmethod + @contextmanager + def apply_freeu( + cls, + unet: UNet2DConditionModel, + freeu_config: Optional[FreeUConfig] = None, + ): + did_apply_freeu = False + try: + if freeu_config is not None: + unet.enable_freeu(b1=freeu_config.b1, b2=freeu_config.b2, s1=freeu_config.s1, s2=freeu_config.s2) + did_apply_freeu = True + + yield + + finally: + if did_apply_freeu: + unet.disable_freeu() + class TextualInversionModel: embedding: torch.Tensor # [n, 768]|[n, 1280] diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 0943b78bf8..1b65326f6e 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -546,11 +546,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): # Handle ControlNet(s) and T2I-Adapter(s) down_block_additional_residuals = None mid_block_additional_residual = None - if control_data is not None and t2i_adapter_data is not None: - # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility - # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. - raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") - elif control_data is not None: + down_intrablock_additional_residuals = None + # if control_data is not None and t2i_adapter_data is not None: + # TODO(ryand): This is a limitation of the UNet2DConditionModel API, not a fundamental incompatibility + # between ControlNets and T2I-Adapters. We will try to fix this upstream in diffusers. + # raise Exception("ControlNet(s) and T2I-Adapter(s) cannot be used simultaneously (yet).") + # elif control_data is not None: + if control_data is not None: down_block_additional_residuals, mid_block_additional_residual = self.invokeai_diffuser.do_controlnet_step( control_data=control_data, sample=latent_model_input, @@ -559,7 +561,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, ) - elif t2i_adapter_data is not None: + # elif t2i_adapter_data is not None: + if t2i_adapter_data is not None: accum_adapter_state = None for single_t2i_adapter_data in t2i_adapter_data: # Determine the T2I-Adapter weights for the current denoising step. @@ -584,7 +587,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): for idx, value in enumerate(single_t2i_adapter_data.adapter_state): accum_adapter_state[idx] += value * t2i_adapter_weight - down_block_additional_residuals = accum_adapter_state + # down_block_additional_residuals = accum_adapter_state + down_intrablock_additional_residuals = accum_adapter_state uc_noise_pred, c_noise_pred = self.invokeai_diffuser.do_unet_step( sample=latent_model_input, @@ -593,8 +597,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): total_step_count=total_step_count, conditioning_data=conditioning_data, # extra: - down_block_additional_residuals=down_block_additional_residuals, - mid_block_additional_residual=mid_block_additional_residual, + down_block_additional_residuals=down_block_additional_residuals, # for ControlNet + mid_block_additional_residual=mid_block_additional_residual, # for ControlNet + down_intrablock_additional_residuals=down_intrablock_additional_residuals, # for T2I-Adapter ) guidance_scale = conditioning_data.guidance_scale diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 943fe7b307..b7c0058fe9 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -260,7 +260,6 @@ class InvokeAIDiffuserComponent: conditioning_data, **kwargs, ) - else: ( unconditioned_next_x, @@ -410,6 +409,15 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -441,6 +449,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -470,6 +479,7 @@ class InvokeAIDiffuserComponent: cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -494,6 +504,15 @@ class InvokeAIDiffuserComponent: uncond_down_block.append(_uncond_down) cond_down_block.append(_cond_down) + uncond_down_intrablock, cond_down_intrablock = None, None + down_intrablock_additional_residuals = kwargs.pop("down_intrablock_additional_residuals", None) + if down_intrablock_additional_residuals is not None: + uncond_down_intrablock, cond_down_intrablock = [], [] + for down_intrablock in down_intrablock_additional_residuals: + _uncond_down, _cond_down = down_intrablock.chunk(2) + uncond_down_intrablock.append(_uncond_down) + cond_down_intrablock.append(_cond_down) + uncond_mid_block, cond_mid_block = None, None mid_block_additional_residual = kwargs.pop("mid_block_additional_residual", None) if mid_block_additional_residual is not None: @@ -522,6 +541,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=uncond_down_block, mid_block_additional_residual=uncond_mid_block, + down_intrablock_additional_residuals=uncond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) @@ -541,6 +561,7 @@ class InvokeAIDiffuserComponent: {"swap_cross_attn_context": cross_attn_processor_context}, down_block_additional_residuals=cond_down_block, mid_block_additional_residual=cond_mid_block, + down_intrablock_additional_residuals=cond_down_intrablock, added_cond_kwargs=added_cond_kwargs, **kwargs, ) diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py index b065fcfe5b..551f2acdf2 100644 --- a/invokeai/frontend/install/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -63,8 +63,8 @@ def welcome(latest_release: str, latest_prerelease: str): yield "[bold yellow]Options:" yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic]) [2] Update to the latest [bold]pre-release[/bold] (may be buggy; caveat emptor!) ([italic]{latest_prerelease}[/italic]) -[2] Manually enter the [bold]tag name[/bold] for the version you wish to update to -[3] Manually enter the [bold]branch name[/bold] for the version you wish to update to""" +[3] Manually enter the [bold]tag name[/bold] for the version you wish to update to +[4] Manually enter the [bold]branch name[/bold] for the version you wish to update to""" console.rule() print( diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index cbc88966a7..772ea216c0 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -12,6 +12,7 @@ import { addFirstListImagesListener } from './listeners/addFirstListImagesListen import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; import { addAppStartedListener } from './listeners/appStarted'; +import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; @@ -71,8 +72,6 @@ import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSa import { addTabChangedListener } from './listeners/tabChanged'; import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; import { addWorkflowLoadedListener } from './listeners/workflowLoaded'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; -import { addControlAdapterAddedOrEnabledListener } from './listeners/controlAdapterAddedOrEnabled'; export const listenerMiddleware = createListenerMiddleware(); @@ -200,7 +199,3 @@ addTabChangedListener(); // Dynamic prompts addDynamicPromptsListener(); - -// Display toast when controlnet or t2i adapter enabled -// TODO: Remove when they can both be enabled at same time -addControlAdapterAddedOrEnabledListener(); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts deleted file mode 100644 index bc5387c1fb..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlAdapterAddedOrEnabled.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { isAnyOf } from '@reduxjs/toolkit'; -import { - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterIsEnabledChanged, - controlAdapterRecalled, - selectControlAdapterAll, - selectControlAdapterById, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { ControlAdapterType } from 'features/controlAdapters/store/types'; -import { addToast } from 'features/system/store/systemSlice'; -import i18n from 'i18n'; -import { startAppListening } from '..'; - -const isAnyControlAdapterAddedOrEnabled = isAnyOf( - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterRecalled, - controlAdapterIsEnabledChanged -); - -/** - * Until we can have both controlnet and t2i adapter enabled at once, they are mutually exclusive - * This displays a toast when one is enabled and the other is already enabled, or one is added - * with the other enabled - */ -export const addControlAdapterAddedOrEnabledListener = () => { - startAppListening({ - matcher: isAnyControlAdapterAddedOrEnabled, - effect: async (action, { dispatch, getOriginalState }) => { - const controlAdapters = getOriginalState().controlAdapters; - - const hasEnabledControlNets = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 'controlnet'); - - const hasEnabledT2IAdapters = selectControlAdapterAll( - controlAdapters - ).some((ca) => ca.isEnabled && ca.type === 't2i_adapter'); - - let caType: ControlAdapterType | null = null; - - if (controlAdapterAdded.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterAddedFromImage.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterRecalled.match(action)) { - caType = action.payload.type; - } - - if (controlAdapterIsEnabledChanged.match(action)) { - const _caType = selectControlAdapterById( - controlAdapters, - action.payload.id - )?.type; - if (!_caType) { - return; - } - caType = _caType; - } - - if ( - (caType === 'controlnet' && hasEnabledT2IAdapters) || - (caType === 't2i_adapter' && hasEnabledControlNets) - ) { - const title = - caType === 'controlnet' - ? i18n.t('controlnet.controlNetEnabledT2IDisabled') - : i18n.t('controlnet.t2iEnabledControlNetDisabled'); - - const description = i18n.t('controlnet.controlNetT2IMutexDesc'); - - dispatch( - addToast({ - title, - description, - status: 'warning', - }) - ); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index a3645fad9d..9e293f1104 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -88,61 +88,6 @@ export const selectValidT2IAdapters = (controlAdapters: ControlAdaptersState) => (ca.processorType === 'none' && Boolean(ca.controlImage))) ); -// TODO: I think we can safely remove this? -// const disableAllIPAdapters = ( -// state: ControlAdaptersState, -// exclude?: string -// ) => { -// const updates: Update[] = selectAllIPAdapters(state) -// .filter((ca) => ca.id !== exclude) -// .map((ca) => ({ -// id: ca.id, -// changes: { isEnabled: false }, -// })); -// caAdapter.updateMany(state, updates); -// }; - -const disableAllControlNets = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllControlNets(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableAllT2IAdapters = ( - state: ControlAdaptersState, - exclude?: string -) => { - const updates: Update[] = selectAllT2IAdapters(state) - .filter((ca) => ca.id !== exclude) - .map((ca) => ({ - id: ca.id, - changes: { isEnabled: false }, - })); - caAdapter.updateMany(state, updates); -}; - -const disableIncompatibleControlAdapters = ( - state: ControlAdaptersState, - type: ControlAdapterType, - exclude?: string -) => { - if (type === 'controlnet') { - // we cannot do controlnet + t2i adapter, if we are enabled a controlnet, disable all t2is - disableAllT2IAdapters(state, exclude); - } - if (type === 't2i_adapter') { - // we cannot do controlnet + t2i adapter, if we are enabled a t2i, disable controlnets - disableAllControlNets(state, exclude); - } -}; - export const controlAdaptersSlice = createSlice({ name: 'controlAdapters', initialState: initialControlAdapterState, @@ -158,7 +103,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, type, overrides } = action.payload; caAdapter.addOne(state, buildControlAdapter(id, type, overrides)); - disableIncompatibleControlAdapters(state, type, id); }, prepare: ({ type, @@ -175,8 +119,6 @@ export const controlAdaptersSlice = createSlice({ action: PayloadAction ) => { caAdapter.addOne(state, action.payload); - const { type, id } = action.payload; - disableIncompatibleControlAdapters(state, type, id); }, controlAdapterDuplicated: { reducer: ( @@ -196,8 +138,6 @@ export const controlAdaptersSlice = createSlice({ isEnabled: true, }); caAdapter.addOne(state, newControlAdapter); - const { type } = newControlAdapter; - disableIncompatibleControlAdapters(state, type, newId); }, prepare: (id: string) => { return { payload: { id, newId: uuidv4() } }; @@ -217,7 +157,6 @@ export const controlAdaptersSlice = createSlice({ state, buildControlAdapter(id, type, { controlImage }) ); - disableIncompatibleControlAdapters(state, type, id); }, prepare: (payload: { type: ControlAdapterType; @@ -235,12 +174,6 @@ export const controlAdaptersSlice = createSlice({ ) => { const { id, isEnabled } = action.payload; caAdapter.updateOne(state, { id, changes: { isEnabled } }); - if (isEnabled) { - // we are enabling a control adapter. due to limitations in the current system, we may need to disable other adapters - // TODO: disable when multiple IP adapters are supported - const ca = selectControlAdapterById(state, id); - ca && disableIncompatibleControlAdapters(state, ca.type, id); - } }, controlAdapterImageChanged: ( state, diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 6ff9d49bf9..8ac2e2dcb2 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -8808,11 +8808,11 @@ export type components = { ui_order: number | null; }; /** - * StableDiffusionOnnxModelFormat + * IPAdapterModelFormat * @description An enumeration. * @enum {string} */ - StableDiffusionOnnxModelFormat: "olive" | "onnx"; + IPAdapterModelFormat: "invokeai"; /** * IPAdapterModelFormat * @description An enumeration. @@ -8832,11 +8832,11 @@ export type components = { */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; /** - * CLIPVisionModelFormat + * StableDiffusionOnnxModelFormat * @description An enumeration. * @enum {string} */ - CLIPVisionModelFormat: "diffusers"; + StableDiffusionOnnxModelFormat: "olive" | "onnx"; /** * StableDiffusion1ModelFormat * @description An enumeration. diff --git a/pyproject.toml b/pyproject.toml index b7a851ce75..ba87e6a68b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ dependencies = [ "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 - "diffusers[torch]~=0.21.0", + "diffusers[torch]~=0.22.0", "dnspython~=2.4.0", "dynamicprompts", "easing-functions", @@ -81,8 +81,8 @@ dependencies = [ "semver~=3.0.1", "send2trash", "test-tube~=0.7.5", - "torch~=2.0.1", - "torchvision~=0.15.2", + "torch~=2.1.0", + "torchvision~=0.16", "torchmetrics~=0.11.0", "torchsde~=0.2.5", "transformers~=4.31.0", @@ -110,8 +110,8 @@ dependencies = [ "pytest-datadir", ] "xformers" = [ - "xformers~=0.0.19; sys_platform!='darwin'", - "triton; sys_platform=='linux'", + "xformers==0.0.22post7; sys_platform!='darwin'", + "triton; sys_platform=='linux'", ] "onnx" = ["onnxruntime"] "onnx-cuda" = ["onnxruntime-gpu"]