isort wip 2

This commit is contained in:
Martin Kristiansen 2023-08-18 11:13:28 -04:00 committed by Kent Keirsey
parent 5615c31799
commit caea6d11c6
35 changed files with 168 additions and 180 deletions

View File

@ -5,6 +5,7 @@ InvokeAI Installer
import argparse
import os
from pathlib import Path
from installer import Installer
if __name__ == "__main__":

View File

@ -1,5 +1,5 @@
"""
Initialization file for invokeai.backend
"""
from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo # noqa: F401
from .model_management import BaseModelType, ModelCache, ModelInfo, ModelManager, ModelType, SubModelType # noqa: F401
from .model_management.models import SilenceWarnings # noqa: F401

View File

@ -3,12 +3,13 @@ This module defines a singleton object, "invisible_watermark" that
wraps the invisible watermark model. It respects the global "invisible_watermark"
configuration variable, that allows the watermarking to be supressed.
"""
import numpy as np
import cv2
from PIL import Image
import numpy as np
from imwatermark import WatermarkEncoder
from invokeai.app.services.config import InvokeAIAppConfig
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config()

View File

@ -5,6 +5,7 @@ wraps the actual patchmatch object. It respects the global
be suppressed or deferred
"""
import numpy as np
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig

View File

@ -5,10 +5,11 @@ configuration variable, that allows the checker to be supressed.
"""
import numpy as np
from PIL import Image
from invokeai.backend import SilenceWarnings
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.devices import choose_torch_device
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend import SilenceWarnings
from invokeai.backend.util.devices import choose_torch_device
config = InvokeAIAppConfig.get_config()

View File

@ -2,9 +2,8 @@
Check that the invokeai_root is correctly configured and exit if not.
"""
import sys
from invokeai.app.services.config import (
InvokeAIAppConfig,
)
from invokeai.app.services.config import InvokeAIAppConfig
def check_invokeai_root(config: InvokeAIAppConfig):

View File

@ -6,16 +6,13 @@
#
# Coauthor: Kevin Turner http://github.com/keturn
#
import sys
import argparse
import io
import os
import psutil
import shutil
import sys
import textwrap
import torch
import traceback
import yaml
import warnings
from argparse import Namespace
from enum import Enum
@ -25,26 +22,25 @@ from typing import get_type_hints, get_args, Any
from urllib import request
import npyscreen
import transformers
import omegaconf
import psutil
import torch
import transformers
import yaml
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
from pydantic.error_wrappers import ValidationError
from tqdm import tqdm
from transformers import (
CLIPTextModel,
CLIPTextConfig,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
import invokeai.configs as configs
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from invokeai.app.services.config import (
InvokeAIAppConfig,
)
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained
from invokeai.backend.model_management.model_probe import BaseModelType, ModelType
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
@ -58,16 +54,12 @@ from invokeai.frontend.install.widgets import (
CyclingForm,
MIN_COLS,
MIN_LINES,
CenteredButtonPress,
CyclingForm,
FileBox,
WindowTooSmallException,
set_min_terminal_size,
)
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import (
hf_download_from_pretrained,
InstallSelections,
ModelInstall,
)
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
from pydantic.error_wrappers import ValidationError
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()

View File

@ -3,33 +3,26 @@ Migrate the models directory and models.yaml file from an existing
InvokeAI 2.3 installation to 3.0.0.
"""
import os
import argparse
import os
import shutil
import yaml
import transformers
import diffusers
import warnings
from dataclasses import dataclass
from pathlib import Path
from omegaconf import OmegaConf, DictConfig
from typing import Union
from diffusers import StableDiffusionPipeline, AutoencoderKL
import diffusers
import transformers
import yaml
from diffusers import AutoencoderKL, StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import (
CLIPTextModel,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
from omegaconf import DictConfig, OmegaConf
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager
from invokeai.backend.model_management.model_probe import ModelProbe, ModelType, BaseModelType, ModelProbeInfo
from invokeai.backend.model_management.model_probe import BaseModelType, ModelProbe, ModelProbeInfo, ModelType
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()

View File

@ -7,23 +7,23 @@ import warnings
from dataclasses import dataclass, field
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Optional, List, Dict, Callable, Union, Set
from typing import Callable, Dict, List, Optional, Set, Union
import requests
import torch
from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
import torch
from huggingface_hub import hf_hub_url, HfFolder, HfApi
from huggingface_hub import HfApi, HfFolder, hf_hub_url
from omegaconf import OmegaConf
from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult
from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo
from invokeai.backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
from invokeai.backend.model_management.model_probe import ModelProbe, ModelProbeInfo, SchedulerPredictionType
from invokeai.backend.util import download_with_resume
from invokeai.backend.util.devices import torch_dtype, choose_torch_device
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
from ..util.logging import InvokeAILogger
warnings.filterwarnings("ignore")

View File

@ -1,29 +1,30 @@
import inspect
from enum import Enum
from pydantic import BaseModel
from typing import Literal, get_origin
from pydantic import BaseModel
from .base import ( # noqa: F401
BaseModelType,
ModelType,
SubModelType,
DuplicateModelException,
InvalidModelException,
ModelBase,
ModelConfigBase,
ModelError,
ModelNotFoundException,
ModelType,
ModelVariantType,
SchedulerPredictionType,
ModelError,
SilenceWarnings,
ModelNotFoundException,
InvalidModelException,
DuplicateModelException,
SubModelType,
)
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .sdxl import StableDiffusionXLModel
from .vae import VaeModel
from .lora import LoRAModel
from .controlnet import ControlNetModel # TODO:
from .textual_inversion import TextualInversionModel
from .lora import LoRAModel
from .sdxl import StableDiffusionXLModel
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model
from .textual_inversion import TextualInversionModel
from .vae import VaeModel
MODEL_CLASSES = {
BaseModelType.StableDiffusion1: {

View File

@ -1,29 +1,25 @@
import inspect
import json
import os
import sys
import typing
import inspect
import warnings
from abc import ABCMeta, abstractmethod
from contextlib import suppress
from enum import Enum
from pathlib import Path
from picklescan.scanner import scan_file_path
from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar, Union
import torch
import numpy as np
import onnx
import safetensors.torch
from diffusers import DiffusionPipeline, ConfigMixin
from onnx import numpy_helper
from onnxruntime import (
InferenceSession,
SessionOptions,
get_available_providers,
)
from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
import torch
from diffusers import ConfigMixin, DiffusionPipeline
from diffusers import logging as diffusers_logging
from onnx import numpy_helper
from onnxruntime import InferenceSession, SessionOptions, get_available_providers
from picklescan.scanner import scan_file_path
from pydantic import BaseModel, Field
from transformers import logging as transformers_logging

View File

@ -1,23 +1,26 @@
import os
import torch
from enum import Enum
from pathlib import Path
from typing import Optional, Literal
from typing import Literal, Optional
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from .base import (
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
ModelBase,
ModelConfigBase,
BaseModelType,
ModelNotFoundException,
ModelType,
SubModelType,
EmptyConfigLoader,
calc_model_size_by_fs,
calc_model_size_by_data,
calc_model_size_by_fs,
classproperty,
InvalidModelException,
ModelNotFoundException,
)
from invokeai.app.services.config import InvokeAIAppConfig
import invokeai.backend.util.logging as logger
class ControlNetModelFormat(str, Enum):

View File

@ -1,19 +1,21 @@
import os
import json
import os
from enum import Enum
from pydantic import Field
from typing import Literal, Optional
from omegaconf import OmegaConf
from pydantic import Field
from .base import (
ModelConfigBase,
BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelType,
ModelVariantType,
DiffusersModel,
read_checkpoint_meta,
classproperty,
InvalidModelException,
read_checkpoint_meta,
)
from omegaconf import OmegaConf
class StableDiffusionXLModelFormat(str, Enum):

View File

@ -1,26 +1,29 @@
import os
import json
import os
from enum import Enum
from pydantic import Field
from pathlib import Path
from typing import Literal, Optional, Union
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from .base import (
ModelConfigBase,
BaseModelType,
ModelType,
ModelVariantType,
DiffusersModel,
SilenceWarnings,
read_checkpoint_meta,
classproperty,
InvalidModelException,
ModelNotFoundException,
)
from .sdxl import StableDiffusionXLModel
from omegaconf import OmegaConf
from pydantic import Field
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from omegaconf import OmegaConf
from .base import (
BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelNotFoundException,
ModelType,
ModelVariantType,
SilenceWarnings,
classproperty,
read_checkpoint_meta,
)
from .sdxl import StableDiffusionXLModel
class StableDiffusion1ModelFormat(str, Enum):
@ -272,8 +275,8 @@ def _convert_ckpt_and_cache(
return output_path
# to avoid circular import errors
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
from ...util.devices import choose_torch_device, torch_dtype
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
model_base_to_model_type = {
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",

View File

@ -2,15 +2,16 @@ from enum import Enum
from typing import Literal
from diffusers import OnnxRuntimeModel
from .base import (
ModelConfigBase,
BaseModelType,
DiffusersModel,
IAIOnnxRuntimeModel,
ModelConfigBase,
ModelType,
ModelVariantType,
DiffusersModel,
SchedulerPredictionType,
classproperty,
IAIOnnxRuntimeModel,
)

View File

@ -1,19 +1,20 @@
import os
import torch
from typing import Optional
from .base import (
ModelBase,
ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
classproperty,
ModelNotFoundException,
InvalidModelException,
)
import torch
# TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw
from .base import (
BaseModelType,
InvalidModelException,
ModelBase,
ModelConfigBase,
ModelNotFoundException,
ModelType,
SubModelType,
classproperty,
)
class TextualInversionModel(ModelBase):

View File

@ -8,19 +8,20 @@ import torch
from omegaconf import OmegaConf
from invokeai.app.services.config import InvokeAIAppConfig
from .base import (
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
ModelBase,
ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
ModelVariantType,
EmptyConfigLoader,
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
ModelNotFoundException,
ModelType,
ModelVariantType,
SubModelType,
calc_model_size_by_data,
calc_model_size_by_fs,
classproperty,
)

View File

@ -9,7 +9,7 @@ from .diffusers_pipeline import ( # noqa: F401
from .diffusion import InvokeAIDiffuserComponent # noqa: F401
from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401
from .diffusion.shared_invokeai_diffusion import ( # noqa: F401
PostprocessingSettings,
BasicConditioningInfo,
PostprocessingSettings,
SDXLConditioningInfo,
)

View File

@ -5,20 +5,16 @@ import inspect
from dataclasses import dataclass, field
from typing import Any, Callable, List, Optional, Union
import PIL.Image
import einops
import PIL.Image
import psutil
import torch
import torchvision.transforms as T
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.controlnet import ControlNetModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import (
StableDiffusionPipeline,
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
from diffusers.utils.import_utils import is_xformers_available

View File

@ -4,8 +4,8 @@ Initialization file for invokeai.models.diffusion
from .cross_attention_control import InvokeAICrossAttentionMixin # noqa: F401
from .cross_attention_map_saving import AttentionMapSaver # noqa: F401
from .shared_invokeai_diffusion import ( # noqa: F401
BasicConditioningInfo,
InvokeAIDiffuserComponent,
PostprocessingSettings,
BasicConditioningInfo,
SDXLConditioningInfo,
)

View File

@ -11,16 +11,12 @@ import diffusers
import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.attention_processor import AttentionProcessor
from diffusers.models.attention_processor import (
Attention,
AttnProcessor,
SlicedAttnProcessor,
)
from torch import nn
import invokeai.backend.util.logging as logger
from ...util import torch_dtype

View File

@ -1,8 +1,8 @@
from __future__ import annotations
import math
from contextlib import contextmanager
from dataclasses import dataclass
import math
from typing import Any, Callable, Optional, Union
import torch

View File

@ -1,18 +1,18 @@
from diffusers import (
DDIMScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
KDPM2DiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
EulerDiscreteScheduler,
DPMSolverSDEScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UniPCMultistepScheduler,
DPMSolverSinglestepScheduler,
DEISMultistepScheduler,
DDPMScheduler,
DPMSolverSDEScheduler,
)
SCHEDULER_MAP = dict(

View File

@ -24,13 +24,8 @@ import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed, ProjectConfiguration
from diffusers import (
AutoencoderKL,
DDPMScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from accelerate.utils import ProjectConfiguration, set_seed
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available

View File

@ -1,12 +1,13 @@
from __future__ import annotations
from contextlib import nullcontext
from packaging import version
import platform
from contextlib import nullcontext
from typing import Union
import torch
from packaging import version
from torch import autocast
from typing import Union
from invokeai.app.services.config import InvokeAIAppConfig
CPU_DEVICE = torch.device("cpu")

View File

@ -178,7 +178,6 @@ InvokeAI:
import logging.handlers
import socket
import urllib.parse
from abc import abstractmethod
from pathlib import Path

View File

@ -1,11 +1,10 @@
import base64
import importlib
import io
import math
import multiprocessing as mp
import os
import re
import io
import base64
from collections import abc
from inspect import isfunction
from pathlib import Path
@ -19,6 +18,7 @@ from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
import invokeai.backend.util.logging as logger
from .devices import torch_dtype

View File

@ -2,6 +2,7 @@
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
import warnings
from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure
if __name__ == "__main__":

View File

@ -2,6 +2,7 @@
"""This script reads the "Invoke" Stable Diffusion prompt embedded in files generated by invoke.py"""
import sys
from PIL import Image
if len(sys.argv) < 2:

View File

@ -2,8 +2,8 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import os
import logging
import os
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())

View File

@ -2,8 +2,8 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import os
import logging
import os
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())

View File

@ -5,9 +5,10 @@ This script is used at release time to generate a markdown table describing the
starter models. This text is then manually copied into 050_INSTALL_MODELS.md.
"""
from omegaconf import OmegaConf
from pathlib import Path
from omegaconf import OmegaConf
def main():
initial_models_file = Path(__file__).parent / "../invokeai/configs/INITIAL_MODELS.yaml"

View File

@ -2,6 +2,7 @@
import argparse
from pathlib import Path
from invokeai.backend.model_management.model_probe import ModelProbe
parser = argparse.ArgumentParser(description="Probe model type")

View File

@ -4,11 +4,11 @@
Scan the models directory and print out a new models.yaml
"""
import argparse
import os
import sys
import argparse
from pathlib import Path
from omegaconf import OmegaConf

View File

@ -1,7 +1,8 @@
#!/usr/bin/env python
import sys
import json
import sys
from invokeai.backend.image_util import retrieve_metadata
if len(sys.argv) < 2: