From c27d59baf7c215eb79b699a92ae45d538872cbfb Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Wed, 21 Aug 2024 15:37:27 -0400 Subject: [PATCH] Run ruff --- invokeai/app/invocations/model.py | 5 +---- .../app/services/shared/invocation_context.py | 1 - .../model_manager/load/model_loaders/flux.py | 20 ++++++++++++++----- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 75ed9fceab..eadf3002d5 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -1,6 +1,5 @@ import copy -from time import sleep -from typing import Dict, List, Literal, Optional +from typing import List, Literal, Optional import yaml from pydantic import BaseModel, Field @@ -13,14 +12,12 @@ from invokeai.app.invocations.baseinvocation import ( invocation_output, ) from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType -from invokeai.app.services.model_records import ModelRecordChanges from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.shared.models import FreeUConfig from invokeai.backend.model_manager.config import ( AnyModelConfig, BaseModelType, CheckpointConfigBase, - ModelFormat, ModelType, SubModelType, ) diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py index 3f378b663e..01662335e4 100644 --- a/invokeai/app/services/shared/invocation_context.py +++ b/invokeai/app/services/shared/invocation_context.py @@ -13,7 +13,6 @@ from invokeai.app.services.config.config_default import InvokeAIAppConfig from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.invocation_services import InvocationServices -from invokeai.app.services.model_records import ModelRecordChanges from invokeai.app.services.model_records.model_records_base import UnknownModelException from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.model_manager.config import ( diff --git a/invokeai/backend/model_manager/load/model_loaders/flux.py b/invokeai/backend/model_manager/load/model_loaders/flux.py index ebc3333eea..bb57e4413c 100644 --- a/invokeai/backend/model_manager/load/model_loaders/flux.py +++ b/invokeai/backend/model_manager/load/model_loaders/flux.py @@ -87,7 +87,9 @@ class ClipCheckpointModel(ModelLoader): case SubModelType.TextEncoder: return CLIPTextModel.from_pretrained(config.path) - raise ValueError(f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}") + raise ValueError( + f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" + ) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder8b) @@ -108,7 +110,9 @@ class T5Encoder8bCheckpointModel(ModelLoader): case SubModelType.TextEncoder2: return FastQuantizedTransformersModel.from_pretrained(Path(config.path) / "text_encoder_2") - raise ValueError(f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}") + raise ValueError( + f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" + ) @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder) @@ -131,7 +135,9 @@ class T5EncoderCheckpointModel(ModelLoader): Path(config.path) / "text_encoder_2" ) # TODO: Fix hf subfolder install - raise ValueError(f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}") + raise ValueError( + f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" + ) @ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.Checkpoint) @@ -154,7 +160,9 @@ class FluxCheckpointModel(ModelLoader): case SubModelType.Transformer: return self._load_from_singlefile(config, flux_conf) - raise ValueError(f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}") + raise ValueError( + f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" + ) def _load_from_singlefile( self, @@ -194,7 +202,9 @@ class FluxBnbQuantizednf4bCheckpointModel(ModelLoader): case SubModelType.Transformer: return self._load_from_singlefile(config, flux_conf) - raise ValueError(f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}") + raise ValueError( + f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}" + ) def _load_from_singlefile( self,