mirror of
https://github.com/invoke-ai/InvokeAI
synced 2025-07-26 05:17:55 +00:00
Use definitions in config
This commit is contained in:
@ -37,6 +37,7 @@ from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.model_hash.hash_validator import validate_hash
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
|
||||
from invokeai.backend.model_manager.model_on_disk import ModelOnDisk
|
||||
from invokeai.backend.model_manager.omi import stable_diffusion_xl_1_lora, flux_dev_1_lora
|
||||
from invokeai.backend.model_manager.taxonomy import (
|
||||
AnyVariant,
|
||||
BaseModelType,
|
||||
@ -352,15 +353,14 @@ class LoRAOmiConfig(LoRAConfigBase, ModelConfigBase):
|
||||
@classmethod
|
||||
def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
|
||||
metadata = mod.metadata()
|
||||
base_str, _ = metadata["modelspec.architecture"].split("/")
|
||||
base_str = base_str.lower()
|
||||
architecture = metadata["modelspec.architecture"]
|
||||
|
||||
if "stable-diffusion-xl-v1-base" in base_str:
|
||||
if architecture == stable_diffusion_xl_1_lora:
|
||||
base = BaseModelType.StableDiffusionXL
|
||||
elif "flux" in base_str:
|
||||
elif architecture == flux_dev_1_lora:
|
||||
base = BaseModelType.Flux
|
||||
else:
|
||||
raise InvalidModelConfigException(f"Unrecognised/unsupported base architecture for OMI LoRA: {base_str}")
|
||||
raise InvalidModelConfigException(f"Unrecognised/unsupported architecture for OMI LoRA: {architecture}")
|
||||
|
||||
return {"base": base}
|
||||
|
||||
|
Reference in New Issue
Block a user