mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Run ruff
This commit is contained in:
parent
fc760d3719
commit
3d251b4b93
@ -1,6 +1,5 @@
|
|||||||
import copy
|
import copy
|
||||||
from time import sleep
|
from typing import List, Literal, Optional
|
||||||
from typing import Dict, List, Literal, Optional
|
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
@ -13,14 +12,12 @@ from invokeai.app.invocations.baseinvocation import (
|
|||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||||
from invokeai.app.services.model_records import ModelRecordChanges
|
|
||||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||||
from invokeai.app.shared.models import FreeUConfig
|
from invokeai.app.shared.models import FreeUConfig
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
BaseModelType,
|
||||||
CheckpointConfigBase,
|
CheckpointConfigBase,
|
||||||
ModelFormat,
|
|
||||||
ModelType,
|
ModelType,
|
||||||
SubModelType,
|
SubModelType,
|
||||||
)
|
)
|
||||||
|
@ -13,7 +13,6 @@ from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
|||||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||||
from invokeai.app.services.images.images_common import ImageDTO
|
from invokeai.app.services.images.images_common import ImageDTO
|
||||||
from invokeai.app.services.invocation_services import InvocationServices
|
from invokeai.app.services.invocation_services import InvocationServices
|
||||||
from invokeai.app.services.model_records import ModelRecordChanges
|
|
||||||
from invokeai.app.services.model_records.model_records_base import UnknownModelException
|
from invokeai.app.services.model_records.model_records_base import UnknownModelException
|
||||||
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
|
@ -87,7 +87,9 @@ class ClipCheckpointModel(ModelLoader):
|
|||||||
case SubModelType.TextEncoder:
|
case SubModelType.TextEncoder:
|
||||||
return CLIPTextModel.from_pretrained(config.path)
|
return CLIPTextModel.from_pretrained(config.path)
|
||||||
|
|
||||||
raise ValueError(f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}")
|
raise ValueError(
|
||||||
|
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder8b)
|
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder8b)
|
||||||
@ -108,7 +110,9 @@ class T5Encoder8bCheckpointModel(ModelLoader):
|
|||||||
case SubModelType.TextEncoder2:
|
case SubModelType.TextEncoder2:
|
||||||
return FastQuantizedTransformersModel.from_pretrained(Path(config.path) / "text_encoder_2")
|
return FastQuantizedTransformersModel.from_pretrained(Path(config.path) / "text_encoder_2")
|
||||||
|
|
||||||
raise ValueError(f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}")
|
raise ValueError(
|
||||||
|
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder)
|
@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T5Encoder, format=ModelFormat.T5Encoder)
|
||||||
@ -131,7 +135,9 @@ class T5EncoderCheckpointModel(ModelLoader):
|
|||||||
Path(config.path) / "text_encoder_2"
|
Path(config.path) / "text_encoder_2"
|
||||||
) # TODO: Fix hf subfolder install
|
) # TODO: Fix hf subfolder install
|
||||||
|
|
||||||
raise ValueError(f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}")
|
raise ValueError(
|
||||||
|
f"Only Tokenizer and TextEncoder submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
@ModelLoaderRegistry.register(base=BaseModelType.Flux, type=ModelType.Main, format=ModelFormat.Checkpoint)
|
||||||
@ -154,7 +160,9 @@ class FluxCheckpointModel(ModelLoader):
|
|||||||
case SubModelType.Transformer:
|
case SubModelType.Transformer:
|
||||||
return self._load_from_singlefile(config, flux_conf)
|
return self._load_from_singlefile(config, flux_conf)
|
||||||
|
|
||||||
raise ValueError(f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}")
|
raise ValueError(
|
||||||
|
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||||
|
)
|
||||||
|
|
||||||
def _load_from_singlefile(
|
def _load_from_singlefile(
|
||||||
self,
|
self,
|
||||||
@ -194,7 +202,9 @@ class FluxBnbQuantizednf4bCheckpointModel(ModelLoader):
|
|||||||
case SubModelType.Transformer:
|
case SubModelType.Transformer:
|
||||||
return self._load_from_singlefile(config, flux_conf)
|
return self._load_from_singlefile(config, flux_conf)
|
||||||
|
|
||||||
raise ValueError(f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}")
|
raise ValueError(
|
||||||
|
f"Only Transformer submodels are currently supported. Received: {submodel_type.value if submodel_type else 'None'}"
|
||||||
|
)
|
||||||
|
|
||||||
def _load_from_singlefile(
|
def _load_from_singlefile(
|
||||||
self,
|
self,
|
||||||
|
Loading…
Reference in New Issue
Block a user