From 2b36565e9e5ad05fba59a2ea9f7883f05195e536 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 10 Nov 2023 21:32:44 -0500 Subject: [PATCH] awkward workaround for double-Annotated in model_record route --- invokeai/app/api/routers/model_records.py | 10 +++++----- invokeai/app/invocations/latent.py | 5 +---- invokeai/app/invocations/math.py | 4 ++-- invokeai/backend/ip_adapter/resampler.py | 4 ++-- .../model_management/memory_snapshot.py | 2 +- .../backend/model_management/model_cache.py | 2 +- invokeai/backend/model_manager/config.py | 3 ++- .../diffusion/cross_attention_control.py | 2 +- .../diffusion/shared_invokeai_diffusion.py | 20 ++++--------------- .../training/textual_inversion_training.py | 5 +---- invokeai/backend/util/mps_fixes.py | 2 +- invokeai/backend/util/util.py | 2 +- .../model_records/test_model_records_sql.py | 2 ++ 13 files changed, 24 insertions(+), 39 deletions(-) diff --git a/invokeai/app/api/routers/model_records.py b/invokeai/app/api/routers/model_records.py index 00ee3471d1..7b66ad876e 100644 --- a/invokeai/app/api/routers/model_records.py +++ b/invokeai/app/api/routers/model_records.py @@ -2,14 +2,13 @@ """FastAPI route for model configuration records.""" -from hashlib import sha1 -from random import randbytes from typing import List, Optional from fastapi import Body, Path, Query, Response from fastapi.routing import APIRouter from pydantic import BaseModel, ConfigDict, TypeAdapter from starlette.exceptions import HTTPException +from typing_extensions import Annotated from invokeai.app.services.model_records import DuplicateModelException, InvalidModelException, UnknownModelException from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType @@ -85,8 +84,9 @@ async def get_model_record( response_model=AnyModelConfig, ) async def update_model_record( - key: str = Path(description="Unique key of model"), - info: AnyModelConfig = Body(description="Model configuration"), + key: Annotated[str, Path(description="Unique key of model")], + # info: Annotated[AnyModelConfig, Body(description="Model configuration")], + info: AnyModelConfig, ) -> AnyModelConfig: """Update model contents with a new config. If the model name or base fields are changed, then the model is renamed.""" logger = ApiDependencies.invoker.services.logger @@ -134,7 +134,7 @@ async def del_model_record( status_code=201, ) async def add_model_record( - config: AnyModelConfig = Body(description="Model configuration"), + config: AnyModelConfig, ) -> AnyModelConfig: """ Add a model using the configuration information appropriate for its type. diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index c9a0ca4423..77ac23fbdd 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -755,10 +755,7 @@ class DenoiseLatentsInvocation(BaseInvocation): denoising_end=self.denoising_end, ) - ( - result_latents, - result_attention_map_saver, - ) = pipeline.latents_from_embeddings( + (result_latents, result_attention_map_saver,) = pipeline.latents_from_embeddings( latents=latents, timesteps=timesteps, init_timestep=init_timestep, diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index 585122d091..fab689ae2b 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -207,7 +207,7 @@ class IntegerMathInvocation(BaseInvocation): elif self.operation == "DIV": return IntegerOutput(value=int(self.a / self.b)) elif self.operation == "EXP": - return IntegerOutput(value=self.a**self.b) + return IntegerOutput(value=self.a ** self.b) elif self.operation == "MOD": return IntegerOutput(value=self.a % self.b) elif self.operation == "ABS": @@ -281,7 +281,7 @@ class FloatMathInvocation(BaseInvocation): elif self.operation == "DIV": return FloatOutput(value=self.a / self.b) elif self.operation == "EXP": - return FloatOutput(value=self.a**self.b) + return FloatOutput(value=self.a ** self.b) elif self.operation == "SQRT": return FloatOutput(value=np.sqrt(self.a)) elif self.operation == "ABS": diff --git a/invokeai/backend/ip_adapter/resampler.py b/invokeai/backend/ip_adapter/resampler.py index 84224fd359..0c07a7fa1b 100644 --- a/invokeai/backend/ip_adapter/resampler.py +++ b/invokeai/backend/ip_adapter/resampler.py @@ -33,7 +33,7 @@ def reshape_tensor(x, heads): class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() - self.scale = dim_head**-0.5 + self.scale = dim_head ** -0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads @@ -91,7 +91,7 @@ class Resampler(nn.Module): ): super().__init__() - self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) + self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim ** 0.5) self.proj_in = nn.Linear(embedding_dim, dim) diff --git a/invokeai/backend/model_management/memory_snapshot.py b/invokeai/backend/model_management/memory_snapshot.py index fe54af191c..e5c98f2307 100644 --- a/invokeai/backend/model_management/memory_snapshot.py +++ b/invokeai/backend/model_management/memory_snapshot.py @@ -6,7 +6,7 @@ import torch from invokeai.backend.model_management.libc_util import LibcUtil, Struct_mallinfo2 -GB = 2**30 # 1 GB +GB = 2 ** 30 # 1 GB class MemorySnapshot: diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 83af789219..117dc01eb9 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -49,7 +49,7 @@ DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75 # actual size of a gig GIG = 1073741824 # Size of a MB in bytes. -MB = 2**20 +MB = 2 ** 20 @dataclass diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 6d9248a18b..cf28bbeb17 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -22,6 +22,7 @@ Validation errors will raise an InvalidModelConfigException error. from enum import Enum from typing import Literal, Optional, Type, Union +from fastapi import Body from pydantic import BaseModel, ConfigDict, Field, TypeAdapter from typing_extensions import Annotated @@ -268,7 +269,7 @@ AnyModelConfig = Annotated[ CLIPVisionDiffusersConfig, T2IConfig, ], - Field(discriminator="type"), + Body(discriminator="type"), ] AnyModelConfigValidator = TypeAdapter(AnyModelConfig) diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index 3cb1862004..4931ab7b50 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -261,7 +261,7 @@ class InvokeAICrossAttentionMixin: if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096 return self.einsum_lowest_level(q, k, v, None, None, None) else: - slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) + slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1])) return self.einsum_op_slice_dim1(q, k, v, slice_size) def einsum_op_mps_v2(self, q, k, v): diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index b7c0058fe9..b4f984b676 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -175,10 +175,7 @@ class InvokeAIDiffuserComponent: dim=0, ), } - ( - encoder_hidden_states, - encoder_attention_mask, - ) = self._concat_conditionings_for_batch( + (encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch( conditioning_data.unconditioned_embeddings.embeds, conditioning_data.text_embeddings.embeds, ) @@ -240,10 +237,7 @@ class InvokeAIDiffuserComponent: wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0 if wants_cross_attention_control: - ( - unconditioned_next_x, - conditioned_next_x, - ) = self._apply_cross_attention_controlled_conditioning( + (unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning( sample, timestep, conditioning_data, @@ -251,20 +245,14 @@ class InvokeAIDiffuserComponent: **kwargs, ) elif self.sequential_guidance: - ( - unconditioned_next_x, - conditioned_next_x, - ) = self._apply_standard_conditioning_sequentially( + (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially( sample, timestep, conditioning_data, **kwargs, ) else: - ( - unconditioned_next_x, - conditioned_next_x, - ) = self._apply_standard_conditioning( + (unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning( sample, timestep, conditioning_data, diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 9bc1d188bc..84473a9ffa 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -470,10 +470,7 @@ class TextualInversionDataset(Dataset): if self.center_crop: crop = min(img.shape[0], img.shape[1]) - ( - h, - w, - ) = ( + (h, w,) = ( img.shape[0], img.shape[1], ) diff --git a/invokeai/backend/util/mps_fixes.py b/invokeai/backend/util/mps_fixes.py index ce21d33b88..dc428e9d51 100644 --- a/invokeai/backend/util/mps_fixes.py +++ b/invokeai/backend/util/mps_fixes.py @@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor: if attn.upcast_attention: out_item_size = 4 - chunk_size = 2**29 + chunk_size = 2 ** 29 out_size = query.shape[1] * key.shape[1] * out_item_size chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size)) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index d10d5a0a27..2f31145f5b 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -210,7 +210,7 @@ def parallel_data_prefetch( return gather_res -def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3): +def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3): delta = (res[0] / shape[0], res[1] / shape[1]) d = (shape[0] // res[0], shape[1] // res[1]) diff --git a/tests/app/services/model_records/test_model_records_sql.py b/tests/app/services/model_records/test_model_records_sql.py index f20204f252..c856cb7c02 100644 --- a/tests/app/services/model_records/test_model_records_sql.py +++ b/tests/app/services/model_records/test_model_records_sql.py @@ -16,6 +16,7 @@ from invokeai.app.services.model_records import ( from invokeai.app.services.shared.sqlite import SqliteDatabase from invokeai.backend.model_manager.config import ( BaseModelType, + MainCheckpointConfig, MainDiffusersConfig, ModelType, TextualInversionConfig, @@ -57,6 +58,7 @@ def test_add(store: ModelRecordServiceBase): store.add_model("key1", raw) config1 = store.get_model("key1") assert config1 is not None + assert type(config1) == MainCheckpointConfig assert config1.base == BaseModelType("sd-1") assert config1.name == "model1" assert config1.original_hash == "111222333444"