diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 77ac23fbdd..c9a0ca4423 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -755,7 +755,10 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_end=self.denoising_end, ) - (result_latents, result_attention_map_saver,) = pipeline.latents_from_embeddings( + ( + result_latents, + result_attention_map_saver, + ) = pipeline.latents_from_embeddings( latents=latents, timesteps=timesteps, init_timestep=init_timestep, diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index fab689ae2b..585122d091 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -207,7 +207,7 @@ class IntegerMathInvocation(BaseInvocation): elif self.operation == "DIV": return IntegerOutput(value=int(self.a / self.b)) elif self.operation == "EXP": - return IntegerOutput(value=self.a ** self.b) + return IntegerOutput(value=self.a**self.b) elif self.operation == "MOD": return IntegerOutput(value=self.a % self.b) elif self.operation == "ABS": @@ -281,7 +281,7 @@ class FloatMathInvocation(BaseInvocation): elif self.operation == "DIV": return FloatOutput(value=self.a / self.b) elif self.operation == "EXP": - return FloatOutput(value=self.a ** self.b) + return FloatOutput(value=self.a**self.b) elif self.operation == "SQRT": return FloatOutput(value=np.sqrt(self.a)) elif self.operation == "ABS": diff --git a/invokeai/backend/ip_adapter/resampler.py b/invokeai/backend/ip_adapter/resampler.py index 0c07a7fa1b..84224fd359 100644 --- a/invokeai/backend/ip_adapter/resampler.py +++ b/invokeai/backend/ip_adapter/resampler.py @@ -33,7 +33,7 @@ def reshape_tensor(x, heads): class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() - self.scale = dim_head ** -0.5 + self.scale = dim_head**-0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads @@ -91,7 +91,7 @@ class Resampler(nn.Module): ): super().__init__() - self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim ** 0.5) + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) self.proj_in = nn.Linear(embedding_dim, dim) diff --git a/invokeai/backend/model_management/memory_snapshot.py b/invokeai/backend/model_management/memory_snapshot.py index e5c98f2307..fe54af191c 100644 --- a/invokeai/backend/model_management/memory_snapshot.py +++ b/invokeai/backend/model_management/memory_snapshot.py @@ -6,7 +6,7 @@ import torch from invokeai.backend.model_management.libc_util import LibcUtil, Struct_mallinfo2 -GB = 2 ** 30 # 1 GB +GB = 2**30 # 1 GB class MemorySnapshot: diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 117dc01eb9..83af789219 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -49,7 +49,7 @@ DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75 # actual size of a gig GIG = 1073741824 # Size of a MB in bytes. -MB = 2 ** 20 +MB = 2**20 @dataclass diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index bf4a7f5a44..ff835b1f3f 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -268,7 +268,14 @@ AnyModelConfig = Union[ T2IConfig, ] -# Preferred alternative is a discriminated Union, but it breaks FastAPI when applied to a route. +AnyModelConfigValidator = TypeAdapter(AnyModelConfig) + +# IMPLEMENTATION NOTE: +# The preferred alternative to the above is a discriminated Union as shown +# below. However, it breaks FastAPI when used as the input Body parameter in a route. +# This is a known issue. Please see: +# https://github.com/tiangolo/fastapi/discussions/9761 and +# https://github.com/tiangolo/fastapi/discussions/9287 # AnyModelConfig = Annotated[ # Union[ # _MainModelConfig, @@ -284,8 +291,6 @@ AnyModelConfig = Union[ # Field(discriminator="type"), # ] -AnyModelConfigValidator = TypeAdapter(AnyModelConfig) - class ModelConfigFactory(object): """Class for parsing config dicts into StableDiffusion Config obects.""" diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 4931ab7b50..3cb1862004 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -261,7 +261,7 @@ class InvokeAICrossAttentionMixin: if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 return self.einsum_lowest_level(q, k, v, None, None, None) else: - slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1])) + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) return self.einsum_op_slice_dim1(q, k, v, slice_size) def einsum_op_mps_v2(self, q, k, v): diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index b4f984b676..b7c0058fe9 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -175,7 +175,10 @@ class InvokeAIDiffuserComponent: dim=0, ), } - (encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch( + ( + encoder_hidden_states, + encoder_attention_mask, + ) = self._concat_conditionings_for_batch( conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds, ) @@ -237,7 +240,10 @@ class InvokeAIDiffuserComponent: wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 if wants_cross_attention_control: - (unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_cross_attention_controlled_conditioning( sample, timestep, conditioning_data, @@ -245,14 +251,20 @@ class InvokeAIDiffuserComponent: **kwargs, ) elif self.sequential_guidance: - (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning_sequentially( sample, timestep, conditioning_data, **kwargs, ) else: - (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning( + ( + unconditioned_next_x, + conditioned_next_x, + ) = self._apply_standard_conditioning( sample, timestep, conditioning_data, diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 84473a9ffa..9bc1d188bc 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -470,7 +470,10 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - (h, w,) = ( + ( + h, + w, + ) = ( img.shape[0], img.shape[1], ) diff --git a/invokeai/backend/util/mps_fixes.py b/invokeai/backend/util/mps_fixes.py index dc428e9d51..ce21d33b88 100644 --- a/invokeai/backend/util/mps_fixes.py +++ b/invokeai/backend/util/mps_fixes.py @@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor: if attn.upcast_attention: out_item_size = 4 - chunk_size = 2 ** 29 + chunk_size = 2**29 out_size = query.shape[1] * key.shape[1] * out_item_size chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size)) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 2f31145f5b..d10d5a0a27 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -210,7 +210,7 @@ def parallel_data_prefetch( return gather_res -def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): +def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1])