chore: ruff check - fix flake8-comprensions

This commit is contained in:
psychedelicious
2023-11-11 10:44:43 +11:00
parent 43f2398e14
commit 3a136420d5
60 changed files with 489 additions and 512 deletions

View File

@ -88,7 +88,7 @@ class PromptFormatter:
t2i = self.t2i
opt = self.opt
switches = list()
switches = []
switches.append(f'"{opt.prompt}"')
switches.append(f"-s{opt.steps or t2i.steps}")
switches.append(f"-W{opt.width or t2i.width}")

View File

@ -40,7 +40,7 @@ class InitImageResizer:
(rw, rh) = (int(scale * im.width), int(scale * im.height))
# round everything to multiples of 64
width, height, rw, rh = map(lambda x: x - x % 64, (width, height, rw, rh))
width, height, rw, rh = (x - x % 64 for x in (width, height, rw, rh))
# no resize necessary, but return a copy
if im.width == width and im.height == height:

View File

@ -197,7 +197,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_conversion_models():
target_dir = config.models_path / "core/convert"
kwargs = dict() # for future use
kwargs = {} # for future use
try:
logger.info("Downloading core tokenizers and text encoders")
@ -252,26 +252,26 @@ def download_conversion_models():
def download_realesrgan():
logger.info("Installing ESRGAN Upscaling models...")
URLs = [
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
description="RealESRGAN_x4plus.pth",
),
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
description="RealESRGAN_x4plus_anime_6B.pth",
),
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
dest="core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
description="ESRGAN_SRx4_DF2KOST_official.pth",
),
dict(
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
description="RealESRGAN_x2plus.pth",
),
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
"description": "RealESRGAN_x4plus.pth",
},
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
"description": "RealESRGAN_x4plus_anime_6B.pth",
},
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"dest": "core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
"description": "ESRGAN_SRx4_DF2KOST_official.pth",
},
{
"url": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
"dest": "core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
"description": "RealESRGAN_x2plus.pth",
},
]
for model in URLs:
download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"])
@ -680,7 +680,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
if program_opts.default_only
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
if program_opts.yes_to_all
else list(),
else [],
)

View File

@ -182,10 +182,10 @@ class MigrateTo3(object):
"""
dest_directory = self.dest_models
kwargs = dict(
cache_dir=self.root_directory / "models/hub",
kwargs = {
"cache_dir": self.root_directory / "models/hub",
# local_files_only = True
)
}
try:
logger.info("Migrating core tokenizers and text encoders")
target_dir = dest_directory / "core" / "convert"
@ -316,11 +316,11 @@ class MigrateTo3(object):
dest_dir = self.dest_models
cache = self.root_directory / "models/hub"
kwargs = dict(
cache_dir=cache,
safety_checker=None,
kwargs = {
"cache_dir": cache,
"safety_checker": None,
# local_files_only = True,
)
}
owner, repo_name = repo_id.split("/")
model_name = model_name or repo_name

View File

@ -120,7 +120,7 @@ class ModelInstall(object):
be treated uniformly. It also sorts the models alphabetically
by their name, to improve the display somewhat.
"""
model_dict = dict()
model_dict = {}
# first populate with the entries in INITIAL_MODELS.yaml
for key, value in self.datasets.items():
@ -134,7 +134,7 @@ class ModelInstall(object):
model_dict[key] = model_info
# supplement with entries in models.yaml
installed_models = [x for x in self.mgr.list_models()]
installed_models = list(self.mgr.list_models())
for md in installed_models:
base = md["base_model"]
@ -184,7 +184,7 @@ class ModelInstall(object):
def recommended_models(self) -> Set[str]:
starters = self.starter_models(all_models=True)
return set([x for x in starters if self.datasets[x].get("recommended", False)])
return {x for x in starters if self.datasets[x].get("recommended", False)}
def default_model(self) -> str:
starters = self.starter_models()
@ -234,7 +234,7 @@ class ModelInstall(object):
"""
if not models_installed:
models_installed = dict()
models_installed = {}
model_path_id_or_url = str(model_path_id_or_url).strip("\"' ")
@ -252,8 +252,7 @@ class ModelInstall(object):
# folders style or similar
elif path.is_dir() and any(
[
(path / x).exists()
(path / x).exists()
for x in {
"config.json",
"model_index.json",
@ -261,7 +260,6 @@ class ModelInstall(object):
"pytorch_lora_weights.bin",
"pytorch_lora_weights.safetensors",
}
]
):
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
@ -433,17 +431,17 @@ class ModelInstall(object):
rel_path = self.relative_to_root(path, self.config.models_path)
attributes = dict(
path=str(rel_path),
description=str(description),
model_format=info.format,
)
attributes = {
"path": str(rel_path),
"description": str(description),
"model_format": info.format,
}
legacy_conf = None
if info.model_type == ModelType.Main or info.model_type == ModelType.ONNX:
attributes.update(
dict(
variant=info.variant_type,
)
{
"variant": info.variant_type,
}
)
if info.format == "checkpoint":
try:
@ -474,7 +472,7 @@ class ModelInstall(object):
)
if legacy_conf:
attributes.update(dict(config=str(legacy_conf)))
attributes.update({"config": str(legacy_conf)})
return attributes
def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path:
@ -519,7 +517,7 @@ class ModelInstall(object):
def _download_hf_model(self, repo_id: str, files: List[str], staging: Path, subfolder: None) -> Path:
_, name = repo_id.split("/")
location = staging / name
paths = list()
paths = []
for filename in files:
filePath = Path(filename)
p = hf_download_with_resume(

View File

@ -104,7 +104,7 @@ class ModelPatcher:
loras: List[Tuple[LoRAModel, float]],
prefix: str,
):
original_weights = dict()
original_weights = {}
try:
with torch.no_grad():
for lora, lora_weight in loras:
@ -324,7 +324,7 @@ class TextualInversionManager(BaseTextualInversionManager):
tokenizer: CLIPTokenizer
def __init__(self, tokenizer: CLIPTokenizer):
self.pad_tokens = dict()
self.pad_tokens = {}
self.tokenizer = tokenizer
def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]:
@ -385,10 +385,10 @@ class ONNXModelPatcher:
if not isinstance(model, IAIOnnxRuntimeModel):
raise Exception("Only IAIOnnxRuntimeModel models supported")
orig_weights = dict()
orig_weights = {}
try:
blended_loras = dict()
blended_loras = {}
for lora, lora_weight in loras:
for layer_key, layer in lora.layers.items():
@ -404,7 +404,7 @@ class ONNXModelPatcher:
else:
blended_loras[layer_key] = layer_weight
node_names = dict()
node_names = {}
for node in model.nodes.values():
node_names[node.name.replace("/", "_").replace(".", "_").lstrip("_")] = node.name

View File

@ -132,7 +132,7 @@ class ModelCache(object):
snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's
behaviour.
"""
self.model_infos: Dict[str, ModelBase] = dict()
self.model_infos: Dict[str, ModelBase] = {}
# allow lazy offloading only when vram cache enabled
self.lazy_offloading = lazy_offloading and max_vram_cache_size > 0
self.precision: torch.dtype = precision
@ -147,8 +147,8 @@ class ModelCache(object):
# used for stats collection
self.stats = None
self._cached_models = dict()
self._cache_stack = list()
self._cached_models = {}
self._cache_stack = []
def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]:
if self._log_memory_usage:

View File

@ -363,7 +363,7 @@ class ModelManager(object):
else:
return
self.models = dict()
self.models = {}
for model_key, model_config in config.items():
if model_key.startswith("_"):
continue
@ -374,7 +374,7 @@ class ModelManager(object):
self.models[model_key] = model_class.create_config(**model_config)
# check config version number and update on disk/RAM if necessary
self.cache_keys = dict()
self.cache_keys = {}
# add controlnet, lora and textual_inversion models from disk
self.scan_models_directory()
@ -902,7 +902,7 @@ class ModelManager(object):
"""
Write current configuration out to the indicated file.
"""
data_to_save = dict()
data_to_save = {}
data_to_save["__metadata__"] = self.config_meta.model_dump()
for model_key, model_config in self.models.items():
@ -1034,7 +1034,7 @@ class ModelManager(object):
self.ignore = ignore
def on_search_started(self):
self.new_models_found = dict()
self.new_models_found = {}
def on_model_found(self, model: Path):
if model not in self.ignore:
@ -1106,7 +1106,7 @@ class ModelManager(object):
# avoid circular import here
from invokeai.backend.install.model_install_backend import ModelInstall
successfully_installed = dict()
successfully_installed = {}
installer = ModelInstall(
config=self.app_config, prediction_type_helper=prediction_type_helper, model_manager=self

View File

@ -92,7 +92,7 @@ class ModelMerger(object):
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
"""
model_paths = list()
model_paths = []
config = self.manager.app_config
base_model = BaseModelType(base_model)
vae = None
@ -124,13 +124,13 @@ class ModelMerger(object):
dump_path = (dump_path / merged_model_name).as_posix()
merged_pipe.save_pretrained(dump_path, safe_serialization=True)
attributes = dict(
path=dump_path,
description=f"Merge of models {', '.join(model_names)}",
model_format="diffusers",
variant=ModelVariantType.Normal.value,
vae=vae,
)
attributes = {
"path": dump_path,
"description": f"Merge of models {', '.join(model_names)}",
"model_format": "diffusers",
"variant": ModelVariantType.Normal.value,
"vae": vae,
}
return self.manager.add_model(
merged_model_name,
base_model=base_model,

View File

@ -59,7 +59,7 @@ class ModelSearch(ABC):
for root, dirs, files in os.walk(path, followlinks=True):
if str(Path(root).name).startswith("."):
self._pruned_paths.add(root)
if any([Path(root).is_relative_to(x) for x in self._pruned_paths]):
if any(Path(root).is_relative_to(x) for x in self._pruned_paths):
continue
self._items_scanned += len(dirs) + len(files)
@ -69,8 +69,7 @@ class ModelSearch(ABC):
self._scanned_dirs.add(path)
continue
if any(
[
(path / x).exists()
(path / x).exists()
for x in {
"config.json",
"model_index.json",
@ -78,7 +77,6 @@ class ModelSearch(ABC):
"pytorch_lora_weights.bin",
"image_encoder.txt",
}
]
):
try:
self.on_model_found(path)

View File

@ -97,8 +97,8 @@ MODEL_CLASSES = {
# },
}
MODEL_CONFIGS = list()
OPENAPI_MODEL_CONFIGS = list()
MODEL_CONFIGS = []
OPENAPI_MODEL_CONFIGS = []
class OpenAPIModelInfoBase(BaseModel):
@ -133,7 +133,7 @@ for base_model, models in MODEL_CLASSES.items():
def get_model_config_enums():
enums = list()
enums = []
for model_config in MODEL_CONFIGS:
if hasattr(inspect, "get_annotations"):

View File

@ -164,7 +164,7 @@ class ModelBase(metaclass=ABCMeta):
with suppress(Exception):
return cls.__configs
configs = dict()
configs = {}
for name in dir(cls):
if name.startswith("__"):
continue
@ -246,8 +246,8 @@ class DiffusersModel(ModelBase):
def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType):
super().__init__(model_path, base_model, model_type)
self.child_types: Dict[str, Type] = dict()
self.child_sizes: Dict[str, int] = dict()
self.child_types: Dict[str, Type] = {}
self.child_sizes: Dict[str, int] = {}
try:
config_data = DiffusionPipeline.load_config(self.model_path)
@ -326,8 +326,8 @@ def calc_model_size_by_fs(model_path: str, subfolder: Optional[str] = None, vari
all_files = os.listdir(model_path)
all_files = [f for f in all_files if os.path.isfile(os.path.join(model_path, f))]
fp16_files = set([f for f in all_files if ".fp16." in f or ".fp16-" in f])
bit8_files = set([f for f in all_files if ".8bit." in f or ".8bit-" in f])
fp16_files = {f for f in all_files if ".fp16." in f or ".fp16-" in f}
bit8_files = {f for f in all_files if ".8bit." in f or ".8bit-" in f}
other_files = set(all_files) - fp16_files - bit8_files
if variant is None:
@ -413,7 +413,7 @@ def _calc_onnx_model_by_data(model) -> int:
def _fast_safetensors_reader(path: str):
checkpoint = dict()
checkpoint = {}
device = torch.device("meta")
with open(path, "rb") as f:
definition_len = int.from_bytes(f.read(8), "little")
@ -483,7 +483,7 @@ class IAIOnnxRuntimeModel:
class _tensor_access:
def __init__(self, model):
self.model = model
self.indexes = dict()
self.indexes = {}
for idx, obj in enumerate(self.model.proto.graph.initializer):
self.indexes[obj.name] = idx
@ -524,7 +524,7 @@ class IAIOnnxRuntimeModel:
class _access_helper:
def __init__(self, raw_proto):
self.indexes = dict()
self.indexes = {}
self.raw_proto = raw_proto
for idx, obj in enumerate(raw_proto):
self.indexes[obj.name] = idx
@ -549,7 +549,7 @@ class IAIOnnxRuntimeModel:
return self.indexes.keys()
def values(self):
return [obj for obj in self.raw_proto]
return list(self.raw_proto)
def __init__(self, model_path: str, provider: Optional[str]):
self.path = model_path

View File

@ -104,7 +104,7 @@ class ControlNetModel(ModelBase):
return ControlNetModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]):
return ControlNetModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")

View File

@ -73,7 +73,7 @@ class LoRAModel(ModelBase):
return LoRAModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
return LoRAModelFormat.LyCORIS
raise InvalidModelException(f"Not a valid model: {path}")
@ -499,7 +499,7 @@ class LoRAModelRaw: # (torch.nn.Module):
stability_unet_keys = list(SDXL_UNET_STABILITY_TO_DIFFUSERS_MAP)
stability_unet_keys.sort()
new_state_dict = dict()
new_state_dict = {}
for full_key, value in state_dict.items():
if full_key.startswith("lora_unet_"):
search_key = full_key.replace("lora_unet_", "")
@ -545,7 +545,7 @@ class LoRAModelRaw: # (torch.nn.Module):
model = cls(
name=file_path.stem, # TODO:
layers=dict(),
layers={},
)
if file_path.suffix == ".safetensors":
@ -593,12 +593,12 @@ class LoRAModelRaw: # (torch.nn.Module):
@staticmethod
def _group_state(state_dict: dict):
state_dict_groupped = dict()
state_dict_groupped = {}
for key, value in state_dict.items():
stem, leaf = key.split(".", 1)
if stem not in state_dict_groupped:
state_dict_groupped[stem] = dict()
state_dict_groupped[stem] = {}
state_dict_groupped[stem][leaf] = value
return state_dict_groupped

View File

@ -110,7 +110,7 @@ class StableDiffusion1Model(DiffusersModel):
return StableDiffusion1ModelFormat.Diffusers
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
return StableDiffusion1ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@ -221,7 +221,7 @@ class StableDiffusion2Model(DiffusersModel):
return StableDiffusion2ModelFormat.Diffusers
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
return StableDiffusion2ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")

View File

@ -71,7 +71,7 @@ class TextualInversionModel(ModelBase):
return None # diffusers-ti
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]]):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]):
return None
raise InvalidModelException(f"Not a valid model: {path}")

View File

@ -89,7 +89,7 @@ class VaeModel(ModelBase):
return VaeModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]):
return VaeModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")

View File

@ -16,28 +16,28 @@ from diffusers import (
UniPCMultistepScheduler,
)
SCHEDULER_MAP = dict(
ddim=(DDIMScheduler, dict()),
ddpm=(DDPMScheduler, dict()),
deis=(DEISMultistepScheduler, dict()),
lms=(LMSDiscreteScheduler, dict(use_karras_sigmas=False)),
lms_k=(LMSDiscreteScheduler, dict(use_karras_sigmas=True)),
pndm=(PNDMScheduler, dict()),
heun=(HeunDiscreteScheduler, dict(use_karras_sigmas=False)),
heun_k=(HeunDiscreteScheduler, dict(use_karras_sigmas=True)),
euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
euler_a=(EulerAncestralDiscreteScheduler, dict()),
kdpm_2=(KDPM2DiscreteScheduler, dict()),
kdpm_2_a=(KDPM2AncestralDiscreteScheduler, dict()),
dpmpp_2s=(DPMSolverSinglestepScheduler, dict(use_karras_sigmas=False)),
dpmpp_2s_k=(DPMSolverSinglestepScheduler, dict(use_karras_sigmas=True)),
dpmpp_2m=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False)),
dpmpp_2m_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True)),
dpmpp_2m_sde=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=False, algorithm_type="sde-dpmsolver++")),
dpmpp_2m_sde_k=(DPMSolverMultistepScheduler, dict(use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")),
dpmpp_sde=(DPMSolverSDEScheduler, dict(use_karras_sigmas=False, noise_sampler_seed=0)),
dpmpp_sde_k=(DPMSolverSDEScheduler, dict(use_karras_sigmas=True, noise_sampler_seed=0)),
unipc=(UniPCMultistepScheduler, dict(cpu_only=True)),
lcm=(LCMScheduler, dict()),
)
SCHEDULER_MAP = {
"ddim": (DDIMScheduler, {}),
"ddpm": (DDPMScheduler, {}),
"deis": (DEISMultistepScheduler, {}),
"lms": (LMSDiscreteScheduler, {"use_karras_sigmas": False}),
"lms_k": (LMSDiscreteScheduler, {"use_karras_sigmas": True}),
"pndm": (PNDMScheduler, {}),
"heun": (HeunDiscreteScheduler, {"use_karras_sigmas": False}),
"heun_k": (HeunDiscreteScheduler, {"use_karras_sigmas": True}),
"euler": (EulerDiscreteScheduler, {"use_karras_sigmas": False}),
"euler_k": (EulerDiscreteScheduler, {"use_karras_sigmas": True}),
"euler_a": (EulerAncestralDiscreteScheduler, {}),
"kdpm_2": (KDPM2DiscreteScheduler, {}),
"kdpm_2_a": (KDPM2AncestralDiscreteScheduler, {}),
"dpmpp_2s": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": False}),
"dpmpp_2s_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}),
"dpmpp_2m": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False}),
"dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}),
"dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"use_karras_sigmas": False, "algorithm_type": "sde-dpmsolver++"}),
"dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True, "algorithm_type": "sde-dpmsolver++"}),
"dpmpp_sde": (DPMSolverSDEScheduler, {"use_karras_sigmas": False, "noise_sampler_seed": 0}),
"dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}),
"unipc": (UniPCMultistepScheduler, {"cpu_only": True}),
"lcm": (LCMScheduler, {}),
}

View File

@ -615,7 +615,7 @@ def do_textual_inversion_training(
vae_info = model_manager.get_model(*model_meta, submodel=SubModelType.Vae)
unet_info = model_manager.get_model(*model_meta, submodel=SubModelType.UNet)
pipeline_args = dict(local_files_only=True)
pipeline_args = {"local_files_only": True}
if tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
else:

View File

@ -225,34 +225,34 @@ def basicConfig(**kwargs):
_FACILITY_MAP = (
dict(
LOG_KERN=syslog.LOG_KERN,
LOG_USER=syslog.LOG_USER,
LOG_MAIL=syslog.LOG_MAIL,
LOG_DAEMON=syslog.LOG_DAEMON,
LOG_AUTH=syslog.LOG_AUTH,
LOG_LPR=syslog.LOG_LPR,
LOG_NEWS=syslog.LOG_NEWS,
LOG_UUCP=syslog.LOG_UUCP,
LOG_CRON=syslog.LOG_CRON,
LOG_SYSLOG=syslog.LOG_SYSLOG,
LOG_LOCAL0=syslog.LOG_LOCAL0,
LOG_LOCAL1=syslog.LOG_LOCAL1,
LOG_LOCAL2=syslog.LOG_LOCAL2,
LOG_LOCAL3=syslog.LOG_LOCAL3,
LOG_LOCAL4=syslog.LOG_LOCAL4,
LOG_LOCAL5=syslog.LOG_LOCAL5,
LOG_LOCAL6=syslog.LOG_LOCAL6,
LOG_LOCAL7=syslog.LOG_LOCAL7,
)
{
"LOG_KERN": syslog.LOG_KERN,
"LOG_USER": syslog.LOG_USER,
"LOG_MAIL": syslog.LOG_MAIL,
"LOG_DAEMON": syslog.LOG_DAEMON,
"LOG_AUTH": syslog.LOG_AUTH,
"LOG_LPR": syslog.LOG_LPR,
"LOG_NEWS": syslog.LOG_NEWS,
"LOG_UUCP": syslog.LOG_UUCP,
"LOG_CRON": syslog.LOG_CRON,
"LOG_SYSLOG": syslog.LOG_SYSLOG,
"LOG_LOCAL0": syslog.LOG_LOCAL0,
"LOG_LOCAL1": syslog.LOG_LOCAL1,
"LOG_LOCAL2": syslog.LOG_LOCAL2,
"LOG_LOCAL3": syslog.LOG_LOCAL3,
"LOG_LOCAL4": syslog.LOG_LOCAL4,
"LOG_LOCAL5": syslog.LOG_LOCAL5,
"LOG_LOCAL6": syslog.LOG_LOCAL6,
"LOG_LOCAL7": syslog.LOG_LOCAL7,
}
if SYSLOG_AVAILABLE
else dict()
else {}
)
_SOCK_MAP = dict(
SOCK_STREAM=socket.SOCK_STREAM,
SOCK_DGRAM=socket.SOCK_DGRAM,
)
_SOCK_MAP = {
"SOCK_STREAM": socket.SOCK_STREAM,
"SOCK_DGRAM": socket.SOCK_DGRAM,
}
class InvokeAIFormatter(logging.Formatter):
@ -344,7 +344,7 @@ LOG_FORMATTERS = {
class InvokeAILogger(object):
loggers = dict()
loggers = {}
@classmethod
def get_logger(
@ -364,7 +364,7 @@ class InvokeAILogger(object):
@classmethod
def get_loggers(cls, config: InvokeAIAppConfig) -> list[logging.Handler]:
handler_strs = config.log_handlers
handlers = list()
handlers = []
for handler in handler_strs:
handler_name, *args = handler.split("=", 2)
args = args[0] if len(args) > 0 else None
@ -398,7 +398,7 @@ class InvokeAILogger(object):
raise ValueError("syslog is not available on this system")
if not args:
args = "/dev/log" if Path("/dev/log").exists() else "address:localhost:514"
syslog_args = dict()
syslog_args = {}
try:
for a in args.split(","):
arg_name, *arg_value = a.split(":", 2)
@ -434,7 +434,7 @@ class InvokeAILogger(object):
path = url.path
port = url.port or 80
syslog_args = dict()
syslog_args = {}
for a in arg_list:
arg_name, *arg_value = a.split(":", 2)
if arg_name == "method":

View File

@ -26,7 +26,7 @@ def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = list()
txts = []
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
@ -90,7 +90,7 @@ def instantiate_from_config(config, **kwargs):
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()), **kwargs)
return get_obj_from_str(config["target"])(**config.get("params", {}), **kwargs)
def get_obj_from_str(string, reload=False):