mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Resolving merge conflicts for flake8
This commit is contained in:
committed by
psychedelicious
parent
f6db9da06c
commit
537ae2f901
@ -1,5 +1,5 @@
|
||||
"""
|
||||
Initialization file for invokeai.backend
|
||||
"""
|
||||
from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo
|
||||
from .model_management.models import SilenceWarnings
|
||||
from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo # noqa: F401
|
||||
from .model_management.models import SilenceWarnings # noqa: F401
|
||||
|
@ -1,14 +1,16 @@
|
||||
"""
|
||||
Initialization file for invokeai.backend.image_util methods.
|
||||
"""
|
||||
from .patchmatch import PatchMatch
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata
|
||||
from .seamless import configure_model_padding
|
||||
from .txt2mask import Txt2Mask
|
||||
from .util import InitImageResizer, make_grid
|
||||
from .patchmatch import PatchMatch # noqa: F401
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
|
||||
from .seamless import configure_model_padding # noqa: F401
|
||||
from .txt2mask import Txt2Mask # noqa: F401
|
||||
from .util import InitImageResizer, make_grid # noqa: F401
|
||||
|
||||
|
||||
def debug_image(debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False):
|
||||
from PIL import ImageDraw
|
||||
|
||||
if not debug_status:
|
||||
return
|
||||
|
||||
|
@ -26,7 +26,7 @@ class PngWriter:
|
||||
dirlist = sorted(os.listdir(self.outdir), reverse=True)
|
||||
# find the first filename that matches our pattern or return 000000.0.png
|
||||
existing_name = next(
|
||||
(f for f in dirlist if re.match("^(\d+)\..*\.png", f)),
|
||||
(f for f in dirlist if re.match(r"^(\d+)\..*\.png", f)),
|
||||
"0000000.0.png",
|
||||
)
|
||||
basecount = int(existing_name.split(".", 1)[0]) + 1
|
||||
@ -98,11 +98,11 @@ class PromptFormatter:
|
||||
# to do: put model name into the t2i object
|
||||
# switches.append(f'--model{t2i.model_name}')
|
||||
if opt.seamless or t2i.seamless:
|
||||
switches.append(f"--seamless")
|
||||
switches.append("--seamless")
|
||||
if opt.init_img:
|
||||
switches.append(f"-I{opt.init_img}")
|
||||
if opt.fit:
|
||||
switches.append(f"--fit")
|
||||
switches.append("--fit")
|
||||
if opt.strength and opt.init_img is not None:
|
||||
switches.append(f"-f{opt.strength or t2i.strength}")
|
||||
if opt.gfpgan_strength:
|
||||
|
@ -52,7 +52,6 @@ from invokeai.frontend.install.widgets import (
|
||||
SingleSelectColumns,
|
||||
CenteredButtonPress,
|
||||
FileBox,
|
||||
IntTitleSlider,
|
||||
set_min_terminal_size,
|
||||
CyclingForm,
|
||||
MIN_COLS,
|
||||
@ -308,7 +307,7 @@ class editOptsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||
first_time = not (config.root_path / "invokeai.yaml").exists()
|
||||
access_token = HfFolder.get_token()
|
||||
window_width, window_height = get_terminal_size()
|
||||
label = """Configure startup settings. You can come back and change these later.
|
||||
label = """Configure startup settings. You can come back and change these later.
|
||||
Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.
|
||||
Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
"""
|
||||
|
@ -525,7 +525,7 @@ def do_migrate(src_directory: Path, dest_directory: Path):
|
||||
if version_3: # write into the dest directory
|
||||
try:
|
||||
shutil.copy(dest_directory / "configs" / "models.yaml", config_file)
|
||||
except:
|
||||
except Exception:
|
||||
MigrateTo3.initialize_yaml(config_file)
|
||||
mgr = ModelManager(config_file) # important to initialize BEFORE moving the models directory
|
||||
(dest_directory / "models").replace(dest_models)
|
||||
@ -553,7 +553,7 @@ def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="invokeai-migrate3",
|
||||
description="""
|
||||
This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format
|
||||
This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format
|
||||
'--from-directory' root to the InvokeAI 3.0 '--to-directory' root. These may be abbreviated '--from' and '--to'.a
|
||||
|
||||
The old models directory and config file will be renamed 'models.orig' and 'models.yaml.orig' respectively.
|
||||
|
@ -12,7 +12,6 @@ from typing import Optional, List, Dict, Callable, Union, Set
|
||||
import requests
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers import logging as dlogging
|
||||
import onnx
|
||||
import torch
|
||||
from huggingface_hub import hf_hub_url, HfFolder, HfApi
|
||||
from omegaconf import OmegaConf
|
||||
|
@ -1,10 +1,10 @@
|
||||
"""
|
||||
Initialization file for invokeai.backend.model_management
|
||||
"""
|
||||
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType
|
||||
from .model_cache import ModelCache
|
||||
from .lora import ModelPatcher, ONNXModelPatcher
|
||||
from .models import (
|
||||
from .model_manager import ModelManager, ModelInfo, AddModelResult, SchedulerPredictionType # noqa: F401
|
||||
from .model_cache import ModelCache # noqa: F401
|
||||
from .lora import ModelPatcher, ONNXModelPatcher # noqa: F401
|
||||
from .models import ( # noqa: F401
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
@ -12,5 +12,4 @@ from .models import (
|
||||
ModelNotFoundException,
|
||||
DuplicateModelException,
|
||||
)
|
||||
from .model_merge import ModelMerger, MergeInterpolationMethod
|
||||
from .lora import ModelPatcher
|
||||
from .model_merge import ModelMerger, MergeInterpolationMethod # noqa: F401
|
||||
|
@ -5,21 +5,16 @@ from contextlib import contextmanager
|
||||
from typing import Optional, Dict, Tuple, Any, Union, List
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from safetensors.torch import load_file
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
|
||||
from diffusers.models import UNet2DConditionModel
|
||||
from transformers import CLIPTextModel
|
||||
from onnx import numpy_helper
|
||||
from onnxruntime import OrtValue
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
from compel.embeddings_provider import BaseTextualInversionManager
|
||||
from diffusers.models import UNet2DConditionModel
|
||||
from safetensors.torch import load_file
|
||||
from transformers import CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from .models.lora import LoRAModel
|
||||
|
||||
|
||||
"""
|
||||
loras = [
|
||||
(lora_model1, 0.7),
|
||||
@ -52,7 +47,7 @@ class ModelPatcher:
|
||||
module = module.get_submodule(submodule_name)
|
||||
module_key += "." + submodule_name
|
||||
submodule_name = key_parts.pop(0)
|
||||
except:
|
||||
except Exception:
|
||||
submodule_name += "_" + key_parts.pop(0)
|
||||
|
||||
module = module.get_submodule(submodule_name)
|
||||
@ -341,7 +336,7 @@ class ONNXModelPatcher:
|
||||
def apply_lora(
|
||||
cls,
|
||||
model: IAIOnnxRuntimeModel,
|
||||
loras: List[Tuple[LoraModel, float]],
|
||||
loras: List[Tuple[LoRAModel, float]],
|
||||
prefix: str,
|
||||
):
|
||||
from .models.base import IAIOnnxRuntimeModel
|
||||
|
@ -273,7 +273,7 @@ class ModelCache(object):
|
||||
self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}")
|
||||
self.cache._print_cuda_stats()
|
||||
|
||||
except:
|
||||
except Exception:
|
||||
self.cache_entry.unlock()
|
||||
raise
|
||||
|
||||
|
@ -419,12 +419,12 @@ class ModelManager(object):
|
||||
base_model_str, model_type_str, model_name = model_key.split("/", 2)
|
||||
try:
|
||||
model_type = ModelType(model_type_str)
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception(f"Unknown model type: {model_type_str}")
|
||||
|
||||
try:
|
||||
base_model = BaseModelType(base_model_str)
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception(f"Unknown base model: {base_model_str}")
|
||||
|
||||
return (model_name, base_model, model_type)
|
||||
@ -855,7 +855,7 @@ class ModelManager(object):
|
||||
info.pop("config")
|
||||
|
||||
result = self.add_model(model_name, base_model, model_type, model_attributes=info, clobber=True)
|
||||
except:
|
||||
except Exception:
|
||||
# something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error!
|
||||
rmtree(new_diffusers_path)
|
||||
raise
|
||||
@ -1042,7 +1042,7 @@ class ModelManager(object):
|
||||
# Patch in the SD VAE from core so that it is available for use by the UI
|
||||
try:
|
||||
self.heuristic_import({str(self.resolve_model_path("core/convert/sd-vae-ft-mse"))})
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
installer = ModelInstall(
|
||||
|
@ -217,9 +217,9 @@ class ModelProbe(object):
|
||||
raise "The model {model_name} is potentially infected by malware. Aborting import."
|
||||
|
||||
|
||||
###################################################3
|
||||
# ##################################################3
|
||||
# Checkpoint probing
|
||||
###################################################3
|
||||
# ##################################################3
|
||||
class ProbeBase(object):
|
||||
def get_base_type(self) -> BaseModelType:
|
||||
pass
|
||||
@ -431,7 +431,7 @@ class PipelineFolderProbe(FolderProbeBase):
|
||||
return ModelVariantType.Depth
|
||||
elif in_channels == 4:
|
||||
return ModelVariantType.Normal
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
return ModelVariantType.Normal
|
||||
|
||||
|
@ -2,7 +2,7 @@ import inspect
|
||||
from enum import Enum
|
||||
from pydantic import BaseModel
|
||||
from typing import Literal, get_origin
|
||||
from .base import (
|
||||
from .base import ( # noqa: F401
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
@ -118,7 +118,7 @@ def get_model_config_enums():
|
||||
fields = model_config.__annotations__
|
||||
try:
|
||||
field = fields["model_format"]
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("format field not found")
|
||||
|
||||
# model_format: None
|
||||
|
@ -3,27 +3,28 @@ import os
|
||||
import sys
|
||||
import typing
|
||||
import inspect
|
||||
from enum import Enum
|
||||
import warnings
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from contextlib import suppress
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from picklescan.scanner import scan_file_path
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
import safetensors.torch
|
||||
from pathlib import Path
|
||||
from diffusers import DiffusionPipeline, ConfigMixin, OnnxRuntimeModel
|
||||
|
||||
from contextlib import suppress
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
|
||||
|
||||
import onnx
|
||||
import safetensors.torch
|
||||
from diffusers import DiffusionPipeline, ConfigMixin
|
||||
from onnx import numpy_helper
|
||||
from onnxruntime import (
|
||||
InferenceSession,
|
||||
SessionOptions,
|
||||
get_available_providers,
|
||||
)
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
|
||||
from diffusers import logging as diffusers_logging
|
||||
from transformers import logging as transformers_logging
|
||||
|
||||
|
||||
class DuplicateModelException(Exception):
|
||||
@ -171,7 +172,7 @@ class ModelBase(metaclass=ABCMeta):
|
||||
fields = value.__annotations__
|
||||
try:
|
||||
field = fields["model_format"]
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception(f"Invalid config definition - format field not found({cls.__qualname__})")
|
||||
|
||||
if isinstance(field, type) and issubclass(field, str) and issubclass(field, Enum):
|
||||
@ -244,7 +245,7 @@ class DiffusersModel(ModelBase):
|
||||
try:
|
||||
config_data = DiffusionPipeline.load_config(self.model_path)
|
||||
# config_data = json.loads(os.path.join(self.model_path, "model_index.json"))
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("Invalid diffusers model! (model_index.json not found or invalid)")
|
||||
|
||||
config_data.pop("_ignore_files", None)
|
||||
@ -343,7 +344,7 @@ def calc_model_size_by_fs(model_path: str, subfolder: Optional[str] = None, vari
|
||||
with open(os.path.join(model_path, file), "r") as f:
|
||||
index_data = json.loads(f.read())
|
||||
return int(index_data["metadata"]["total_size"])
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# calculate files size if there is no index file
|
||||
@ -440,7 +441,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False):
|
||||
if str(path).endswith(".safetensors"):
|
||||
try:
|
||||
checkpoint = _fast_safetensors_reader(path)
|
||||
except:
|
||||
except Exception:
|
||||
# TODO: create issue for support "meta"?
|
||||
checkpoint = safetensors.torch.load_file(path, device="cpu")
|
||||
else:
|
||||
@ -452,11 +453,6 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False):
|
||||
return checkpoint
|
||||
|
||||
|
||||
import warnings
|
||||
from diffusers import logging as diffusers_logging
|
||||
from transformers import logging as transformers_logging
|
||||
|
||||
|
||||
class SilenceWarnings(object):
|
||||
def __init__(self):
|
||||
self.transformers_verbosity = transformers_logging.get_verbosity()
|
||||
@ -639,7 +635,7 @@ class IAIOnnxRuntimeModel:
|
||||
raise Exception("You should call create_session before running model")
|
||||
|
||||
inputs = {k: np.array(v) for k, v in kwargs.items()}
|
||||
output_names = self.session.get_outputs()
|
||||
# output_names = self.session.get_outputs()
|
||||
# for k in inputs:
|
||||
# self.io_binding.bind_cpu_input(k, inputs[k])
|
||||
# for name in output_names:
|
||||
|
@ -43,7 +43,7 @@ class ControlNetModel(ModelBase):
|
||||
try:
|
||||
config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json")
|
||||
# config = json.loads(os.path.join(self.model_path, "config.json"))
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("Invalid controlnet model! (config.json not found or invalid)")
|
||||
|
||||
model_class_name = config.get("_class_name", None)
|
||||
@ -53,7 +53,7 @@ class ControlNetModel(ModelBase):
|
||||
try:
|
||||
self.model_class = self._hf_definition_to_type(["diffusers", model_class_name])
|
||||
self.model_size = calc_model_size_by_fs(self.model_path)
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("Invalid ControlNet model!")
|
||||
|
||||
def get_size(self, child_type: Optional[SubModelType] = None):
|
||||
@ -78,7 +78,7 @@ class ControlNetModel(ModelBase):
|
||||
variant=variant,
|
||||
)
|
||||
break
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
if not model:
|
||||
raise ModelNotFoundException()
|
||||
|
@ -330,5 +330,5 @@ def _select_ckpt_config(version: BaseModelType, variant: ModelVariantType):
|
||||
config_path = config_path.relative_to(app_config.root_path)
|
||||
return str(config_path)
|
||||
|
||||
except:
|
||||
except Exception:
|
||||
return None
|
||||
|
@ -1,25 +1,16 @@
|
||||
import os
|
||||
import json
|
||||
from enum import Enum
|
||||
from pydantic import Field
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional, Union
|
||||
from typing import Literal
|
||||
from .base import (
|
||||
ModelBase,
|
||||
ModelConfigBase,
|
||||
BaseModelType,
|
||||
ModelType,
|
||||
SubModelType,
|
||||
ModelVariantType,
|
||||
DiffusersModel,
|
||||
SchedulerPredictionType,
|
||||
SilenceWarnings,
|
||||
read_checkpoint_meta,
|
||||
classproperty,
|
||||
OnnxRuntimeModel,
|
||||
IAIOnnxRuntimeModel,
|
||||
)
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
|
||||
class StableDiffusionOnnxModelFormat(str, Enum):
|
||||
|
@ -44,14 +44,14 @@ class VaeModel(ModelBase):
|
||||
try:
|
||||
config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json")
|
||||
# config = json.loads(os.path.join(self.model_path, "config.json"))
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("Invalid vae model! (config.json not found or invalid)")
|
||||
|
||||
try:
|
||||
vae_class_name = config.get("_class_name", "AutoencoderKL")
|
||||
self.vae_class = self._hf_definition_to_type(["diffusers", vae_class_name])
|
||||
self.model_size = calc_model_size_by_fs(self.model_path)
|
||||
except:
|
||||
except Exception:
|
||||
raise Exception("Invalid vae model! (Unkown vae type)")
|
||||
|
||||
def get_size(self, child_type: Optional[SubModelType] = None):
|
||||
|
@ -1,11 +1,15 @@
|
||||
"""
|
||||
Initialization file for the invokeai.backend.stable_diffusion package
|
||||
"""
|
||||
from .diffusers_pipeline import (
|
||||
from .diffusers_pipeline import ( # noqa: F401
|
||||
ConditioningData,
|
||||
PipelineIntermediateState,
|
||||
StableDiffusionGeneratorPipeline,
|
||||
)
|
||||
from .diffusion import InvokeAIDiffuserComponent
|
||||
from .diffusion.cross_attention_map_saving import AttentionMapSaver
|
||||
from .diffusion.shared_invokeai_diffusion import PostprocessingSettings, BasicConditioningInfo, SDXLConditioningInfo
|
||||
from .diffusion import InvokeAIDiffuserComponent # noqa: F401
|
||||
from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401
|
||||
from .diffusion.shared_invokeai_diffusion import ( # noqa: F401
|
||||
PostprocessingSettings,
|
||||
BasicConditioningInfo,
|
||||
SDXLConditioningInfo,
|
||||
)
|
||||
|
@ -2,10 +2,8 @@ from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import inspect
|
||||
import math
|
||||
import secrets
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Callable, Generic, List, Optional, Type, Union
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
import PIL.Image
|
||||
import einops
|
||||
|
@ -1,9 +1,9 @@
|
||||
"""
|
||||
Initialization file for invokeai.models.diffusion
|
||||
"""
|
||||
from .cross_attention_control import InvokeAICrossAttentionMixin
|
||||
from .cross_attention_map_saving import AttentionMapSaver
|
||||
from .shared_invokeai_diffusion import (
|
||||
from .cross_attention_control import InvokeAICrossAttentionMixin # noqa: F401
|
||||
from .cross_attention_map_saving import AttentionMapSaver # noqa: F401
|
||||
from .shared_invokeai_diffusion import ( # noqa: F401
|
||||
InvokeAIDiffuserComponent,
|
||||
PostprocessingSettings,
|
||||
BasicConditioningInfo,
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
import enum
|
||||
import math
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, Optional
|
||||
|
||||
import diffusers
|
||||
@ -12,6 +13,11 @@ import torch
|
||||
from compel.cross_attention_control import Arguments
|
||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.models.attention_processor import AttentionProcessor
|
||||
from diffusers.models.attention_processor import (
|
||||
Attention,
|
||||
AttnProcessor,
|
||||
SlicedAttnProcessor,
|
||||
)
|
||||
from torch import nn
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
@ -522,14 +528,6 @@ class AttnProcessor:
|
||||
return hidden_states
|
||||
|
||||
"""
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
import torch
|
||||
from diffusers.models.attention_processor import (
|
||||
Attention,
|
||||
AttnProcessor,
|
||||
SlicedAttnProcessor,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
|
@ -5,8 +5,6 @@ import torch
|
||||
from torchvision.transforms.functional import InterpolationMode
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
|
||||
from .cross_attention_control import CrossAttentionType, get_cross_attention_modules
|
||||
|
||||
|
||||
class AttentionMapSaver:
|
||||
def __init__(self, token_ids: range, latents_shape: torch.Size):
|
||||
|
@ -3,15 +3,12 @@ from __future__ import annotations
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass
|
||||
import math
|
||||
from typing import Any, Callable, Dict, Optional, Union, List
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from diffusers import UNet2DConditionModel
|
||||
from diffusers.models.attention_processor import AttentionProcessor
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
from .cross_attention_control import (
|
||||
@ -579,7 +576,7 @@ class InvokeAIDiffuserComponent:
|
||||
latents.to(device="cpu")
|
||||
|
||||
if (
|
||||
h_symmetry_time_pct != None
|
||||
h_symmetry_time_pct is not None
|
||||
and self.last_percent_through < h_symmetry_time_pct
|
||||
and percent_through >= h_symmetry_time_pct
|
||||
):
|
||||
@ -595,7 +592,7 @@ class InvokeAIDiffuserComponent:
|
||||
)
|
||||
|
||||
if (
|
||||
v_symmetry_time_pct != None
|
||||
v_symmetry_time_pct is not None
|
||||
and self.last_percent_through < v_symmetry_time_pct
|
||||
and percent_through >= v_symmetry_time_pct
|
||||
):
|
||||
|
@ -1,6 +1,6 @@
|
||||
from ldm.modules.image_degradation.bsrgan import (
|
||||
from ldm.modules.image_degradation.bsrgan import ( # noqa: F401
|
||||
degradation_bsrgan_variant as degradation_fn_bsr,
|
||||
)
|
||||
from ldm.modules.image_degradation.bsrgan_light import (
|
||||
from ldm.modules.image_degradation.bsrgan_light import ( # noqa: F401
|
||||
degradation_bsrgan_variant as degradation_fn_bsr_light,
|
||||
)
|
||||
|
@ -573,14 +573,15 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
|
||||
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
||||
"""
|
||||
image = util.uint2single(image)
|
||||
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
||||
sf_ori = sf
|
||||
jpeg_prob, scale2_prob = 0.9, 0.25
|
||||
# isp_prob = 0.25 # uncomment with `if i== 6` block below
|
||||
# sf_ori = sf # uncomment with `if i== 6` block below
|
||||
|
||||
h1, w1 = image.shape[:2]
|
||||
image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop
|
||||
h, w = image.shape[:2]
|
||||
|
||||
hq = image.copy()
|
||||
# hq = image.copy() # uncomment with `if i== 6` block below
|
||||
|
||||
if sf == 4 and random.random() < scale2_prob: # downsample1
|
||||
if np.random.rand() < 0.5:
|
||||
@ -777,7 +778,7 @@ if __name__ == "__main__":
|
||||
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
|
||||
print(img_lq.shape)
|
||||
print("bicubic", img_lq_bicubic.shape)
|
||||
print(img_hq.shape)
|
||||
# print(img_hq.shape)
|
||||
lq_nearest = cv2.resize(
|
||||
util.single2uint(img_lq),
|
||||
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
||||
@ -788,5 +789,6 @@ if __name__ == "__main__":
|
||||
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
||||
interpolation=0,
|
||||
)
|
||||
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
|
||||
# img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
|
||||
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest], axis=1)
|
||||
util.imsave(img_concat, str(i) + ".png")
|
||||
|
@ -577,14 +577,15 @@ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
|
||||
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
||||
"""
|
||||
image = util.uint2single(image)
|
||||
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
||||
sf_ori = sf
|
||||
jpeg_prob, scale2_prob = 0.9, 0.25
|
||||
# isp_prob = 0.25 # uncomment with `if i== 6` block below
|
||||
# sf_ori = sf # uncomment with `if i== 6` block below
|
||||
|
||||
h1, w1 = image.shape[:2]
|
||||
image = image.copy()[: w1 - w1 % sf, : h1 - h1 % sf, ...] # mod crop
|
||||
h, w = image.shape[:2]
|
||||
|
||||
hq = image.copy()
|
||||
# hq = image.copy() # uncomment with `if i== 6` block below
|
||||
|
||||
if sf == 4 and random.random() < scale2_prob: # downsample1
|
||||
if np.random.rand() < 0.5:
|
||||
|
@ -8,8 +8,6 @@ import numpy as np
|
||||
import torch
|
||||
from torchvision.utils import make_grid
|
||||
|
||||
# import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
||||
@ -50,6 +48,8 @@ def get_timestamp():
|
||||
|
||||
|
||||
def imshow(x, title=None, cbar=False, figsize=None):
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
plt.figure(figsize=figsize)
|
||||
plt.imshow(np.squeeze(x), interpolation="nearest", cmap="gray")
|
||||
if title:
|
||||
@ -60,6 +60,8 @@ def imshow(x, title=None, cbar=False, figsize=None):
|
||||
|
||||
|
||||
def surf(Z, cmap="rainbow", figsize=None):
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
plt.figure(figsize=figsize)
|
||||
ax3 = plt.axes(projection="3d")
|
||||
|
||||
|
@ -1 +1 @@
|
||||
from .schedulers import SCHEDULER_MAP
|
||||
from .schedulers import SCHEDULER_MAP # noqa: F401
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""
|
||||
Initialization file for invokeai.backend.training
|
||||
"""
|
||||
from .textual_inversion_training import do_textual_inversion_training, parse_args
|
||||
from .textual_inversion_training import do_textual_inversion_training, parse_args # noqa: F401
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""
|
||||
Initialization file for invokeai.backend.util
|
||||
"""
|
||||
from .devices import (
|
||||
from .devices import ( # noqa: F401
|
||||
CPU_DEVICE,
|
||||
CUDA_DEVICE,
|
||||
MPS_DEVICE,
|
||||
@ -10,5 +10,5 @@ from .devices import (
|
||||
normalize_device,
|
||||
torch_dtype,
|
||||
)
|
||||
from .log import write_log
|
||||
from .util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name, Chdir
|
||||
from .log import write_log # noqa: F401
|
||||
from .util import ask_user, download_with_resume, instantiate_from_config, url_attachment_name, Chdir # noqa: F401
|
||||
|
@ -25,10 +25,15 @@ from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
||||
import diffusers
|
||||
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
# TODO: create PR to diffusers
|
||||
# Modified ControlNetModel with encoder_attention_mask argument added
|
||||
|
||||
|
||||
logger = InvokeAILogger.getLogger(__name__)
|
||||
|
||||
|
||||
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
||||
"""
|
||||
A ControlNet model.
|
||||
@ -111,7 +116,7 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
||||
"DownBlock2D",
|
||||
),
|
||||
only_cross_attention: Union[bool, Tuple[bool]] = False,
|
||||
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
|
||||
block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
|
||||
layers_per_block: int = 2,
|
||||
downsample_padding: int = 1,
|
||||
mid_block_scale_factor: float = 1,
|
||||
|
@ -27,8 +27,8 @@ def write_log_message(results, output_cntr):
|
||||
log_lines = [f"{path}: {prompt}\n" for path, prompt in results]
|
||||
if len(log_lines) > 1:
|
||||
subcntr = 1
|
||||
for l in log_lines:
|
||||
print(f"[{output_cntr}.{subcntr}] {l}", end="")
|
||||
for ll in log_lines:
|
||||
print(f"[{output_cntr}.{subcntr}] {ll}", end="")
|
||||
subcntr += 1
|
||||
else:
|
||||
print(f"[{output_cntr}] {log_lines[0]}", end="")
|
||||
|
@ -182,13 +182,13 @@ import urllib.parse
|
||||
from abc import abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig, get_invokeai_config
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
try:
|
||||
import syslog
|
||||
|
||||
SYSLOG_AVAILABLE = True
|
||||
except:
|
||||
except ImportError:
|
||||
SYSLOG_AVAILABLE = False
|
||||
|
||||
|
||||
@ -417,7 +417,7 @@ class InvokeAILogger(object):
|
||||
syslog_args["socktype"] = _SOCK_MAP[arg_value[0]]
|
||||
else:
|
||||
syslog_args["address"] = arg_name
|
||||
except:
|
||||
except Exception:
|
||||
raise ValueError(f"{args} is not a value argument list for syslog logging")
|
||||
return logging.handlers.SysLogHandler(**syslog_args)
|
||||
|
||||
|
@ -191,7 +191,7 @@ class ChunkedSlicedAttnProcessor:
|
||||
assert value.shape[0] == 1
|
||||
assert hidden_states.shape[0] == 1
|
||||
|
||||
dtype = query.dtype
|
||||
# dtype = query.dtype
|
||||
if attn.upcast_attention:
|
||||
query = query.float()
|
||||
key = key.float()
|
||||
|
@ -84,7 +84,7 @@ def count_params(model, verbose=False):
|
||||
|
||||
|
||||
def instantiate_from_config(config, **kwargs):
|
||||
if not "target" in config:
|
||||
if "target" not in config:
|
||||
if config == "__is_first_stage__":
|
||||
return None
|
||||
elif config == "__is_unconditional__":
|
||||
@ -234,16 +234,17 @@ def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10
|
||||
.repeat_interleave(d[1], 1)
|
||||
)
|
||||
|
||||
dot = lambda grad, shift: (
|
||||
torch.stack(
|
||||
(
|
||||
grid[: shape[0], : shape[1], 0] + shift[0],
|
||||
grid[: shape[0], : shape[1], 1] + shift[1],
|
||||
),
|
||||
dim=-1,
|
||||
)
|
||||
* grad[: shape[0], : shape[1]]
|
||||
).sum(dim=-1)
|
||||
def dot(grad, shift):
|
||||
return (
|
||||
torch.stack(
|
||||
(
|
||||
grid[: shape[0], : shape[1], 0] + shift[0],
|
||||
grid[: shape[0], : shape[1], 1] + shift[1],
|
||||
),
|
||||
dim=-1,
|
||||
)
|
||||
* grad[: shape[0], : shape[1]]
|
||||
).sum(dim=-1)
|
||||
|
||||
n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0]).to(device)
|
||||
n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0]).to(device)
|
||||
@ -287,7 +288,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
||||
if dest.is_dir():
|
||||
try:
|
||||
file_name = re.search('filename="(.+)"', resp.headers.get("Content-Disposition")).group(1)
|
||||
except:
|
||||
except AttributeError:
|
||||
file_name = os.path.basename(url)
|
||||
dest = dest / file_name
|
||||
else:
|
||||
@ -342,7 +343,7 @@ def url_attachment_name(url: str) -> dict:
|
||||
resp = requests.get(url, stream=True)
|
||||
match = re.search('filename="(.+)"', resp.headers.get("Content-Disposition"))
|
||||
return match.group(1)
|
||||
except:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user