isort wip 2

This commit is contained in:
Martin Kristiansen 2023-08-18 11:13:28 -04:00 committed by Kent Keirsey
parent 5615c31799
commit caea6d11c6
35 changed files with 168 additions and 180 deletions

View File

@ -5,6 +5,7 @@ InvokeAI Installer
import argparse import argparse
import os import os
from pathlib import Path from pathlib import Path
from installer import Installer from installer import Installer
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,5 +1,5 @@
""" """
Initialization file for invokeai.backend Initialization file for invokeai.backend
""" """
from .model_management import ModelManager, ModelCache, BaseModelType, ModelType, SubModelType, ModelInfo # noqa: F401 from .model_management import BaseModelType, ModelCache, ModelInfo, ModelManager, ModelType, SubModelType # noqa: F401
from .model_management.models import SilenceWarnings # noqa: F401 from .model_management.models import SilenceWarnings # noqa: F401

View File

@ -3,12 +3,13 @@ This module defines a singleton object, "invisible_watermark" that
wraps the invisible watermark model. It respects the global "invisible_watermark" wraps the invisible watermark model. It respects the global "invisible_watermark"
configuration variable, that allows the watermarking to be supressed. configuration variable, that allows the watermarking to be supressed.
""" """
import numpy as np
import cv2 import cv2
from PIL import Image import numpy as np
from imwatermark import WatermarkEncoder from imwatermark import WatermarkEncoder
from invokeai.app.services.config import InvokeAIAppConfig from PIL import Image
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config() config = InvokeAIAppConfig.get_config()

View File

@ -5,6 +5,7 @@ wraps the actual patchmatch object. It respects the global
be suppressed or deferred be suppressed or deferred
""" """
import numpy as np import numpy as np
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig

View File

@ -5,10 +5,11 @@ configuration variable, that allows the checker to be supressed.
""" """
import numpy as np import numpy as np
from PIL import Image from PIL import Image
from invokeai.backend import SilenceWarnings
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.devices import choose_torch_device
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend import SilenceWarnings
from invokeai.backend.util.devices import choose_torch_device
config = InvokeAIAppConfig.get_config() config = InvokeAIAppConfig.get_config()

View File

@ -2,9 +2,8 @@
Check that the invokeai_root is correctly configured and exit if not. Check that the invokeai_root is correctly configured and exit if not.
""" """
import sys import sys
from invokeai.app.services.config import (
InvokeAIAppConfig, from invokeai.app.services.config import InvokeAIAppConfig
)
def check_invokeai_root(config: InvokeAIAppConfig): def check_invokeai_root(config: InvokeAIAppConfig):

View File

@ -6,16 +6,13 @@
# #
# Coauthor: Kevin Turner http://github.com/keturn # Coauthor: Kevin Turner http://github.com/keturn
# #
import sys
import argparse import argparse
import io import io
import os import os
import psutil
import shutil import shutil
import sys
import textwrap import textwrap
import torch
import traceback import traceback
import yaml
import warnings import warnings
from argparse import Namespace from argparse import Namespace
from enum import Enum from enum import Enum
@ -25,26 +22,25 @@ from typing import get_type_hints, get_args, Any
from urllib import request from urllib import request
import npyscreen import npyscreen
import transformers
import omegaconf import omegaconf
import psutil
import torch
import transformers
import yaml
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from huggingface_hub import HfFolder from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf from omegaconf import OmegaConf
from pydantic.error_wrappers import ValidationError
from tqdm import tqdm from tqdm import tqdm
from transformers import ( from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
CLIPTextModel,
CLIPTextConfig,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
import invokeai.configs as configs
from invokeai.app.services.config import ( import invokeai.configs as configs
InvokeAIAppConfig, from invokeai.app.services.config import InvokeAIAppConfig
) from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained
from invokeai.backend.model_management.model_probe import BaseModelType, ModelType
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
@ -58,16 +54,12 @@ from invokeai.frontend.install.widgets import (
CyclingForm, CyclingForm,
MIN_COLS, MIN_COLS,
MIN_LINES, MIN_LINES,
CenteredButtonPress,
CyclingForm,
FileBox,
WindowTooSmallException, WindowTooSmallException,
set_min_terminal_size,
) )
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import (
hf_download_from_pretrained,
InstallSelections,
ModelInstall,
)
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
from pydantic.error_wrappers import ValidationError
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()

View File

@ -3,33 +3,26 @@ Migrate the models directory and models.yaml file from an existing
InvokeAI 2.3 installation to 3.0.0. InvokeAI 2.3 installation to 3.0.0.
""" """
import os
import argparse import argparse
import os
import shutil import shutil
import yaml
import transformers
import diffusers
import warnings import warnings
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from omegaconf import OmegaConf, DictConfig
from typing import Union from typing import Union
from diffusers import StableDiffusionPipeline, AutoencoderKL import diffusers
import transformers
import yaml
from diffusers import AutoencoderKL, StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from transformers import ( from omegaconf import DictConfig, OmegaConf
CLIPTextModel, from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager from invokeai.backend.model_management import ModelManager
from invokeai.backend.model_management.model_probe import ModelProbe, ModelType, BaseModelType, ModelProbeInfo from invokeai.backend.model_management.model_probe import BaseModelType, ModelProbe, ModelProbeInfo, ModelType
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()

View File

@ -7,23 +7,23 @@ import warnings
from dataclasses import dataclass, field from dataclasses import dataclass, field
from pathlib import Path from pathlib import Path
from tempfile import TemporaryDirectory from tempfile import TemporaryDirectory
from typing import Optional, List, Dict, Callable, Union, Set from typing import Callable, Dict, List, Optional, Set, Union
import requests import requests
import torch
from diffusers import DiffusionPipeline from diffusers import DiffusionPipeline
from diffusers import logging as dlogging from diffusers import logging as dlogging
import torch from huggingface_hub import HfApi, HfFolder, hf_hub_url
from huggingface_hub import hf_hub_url, HfFolder, HfApi
from omegaconf import OmegaConf from omegaconf import OmegaConf
from tqdm import tqdm from tqdm import tqdm
import invokeai.configs as configs import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType, AddModelResult from invokeai.backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo from invokeai.backend.model_management.model_probe import ModelProbe, ModelProbeInfo, SchedulerPredictionType
from invokeai.backend.util import download_with_resume from invokeai.backend.util import download_with_resume
from invokeai.backend.util.devices import torch_dtype, choose_torch_device from invokeai.backend.util.devices import choose_torch_device, torch_dtype
from ..util.logging import InvokeAILogger from ..util.logging import InvokeAILogger
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")

View File

@ -1,29 +1,30 @@
import inspect import inspect
from enum import Enum from enum import Enum
from pydantic import BaseModel
from typing import Literal, get_origin from typing import Literal, get_origin
from pydantic import BaseModel
from .base import ( # noqa: F401 from .base import ( # noqa: F401
BaseModelType, BaseModelType,
ModelType, DuplicateModelException,
SubModelType, InvalidModelException,
ModelBase, ModelBase,
ModelConfigBase, ModelConfigBase,
ModelError,
ModelNotFoundException,
ModelType,
ModelVariantType, ModelVariantType,
SchedulerPredictionType, SchedulerPredictionType,
ModelError,
SilenceWarnings, SilenceWarnings,
ModelNotFoundException, SubModelType,
InvalidModelException,
DuplicateModelException,
) )
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .sdxl import StableDiffusionXLModel
from .vae import VaeModel
from .lora import LoRAModel
from .controlnet import ControlNetModel # TODO: from .controlnet import ControlNetModel # TODO:
from .textual_inversion import TextualInversionModel from .lora import LoRAModel
from .sdxl import StableDiffusionXLModel
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model
from .textual_inversion import TextualInversionModel
from .vae import VaeModel
MODEL_CLASSES = { MODEL_CLASSES = {
BaseModelType.StableDiffusion1: { BaseModelType.StableDiffusion1: {

View File

@ -1,29 +1,25 @@
import inspect
import json import json
import os import os
import sys import sys
import typing import typing
import inspect
import warnings import warnings
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
from contextlib import suppress from contextlib import suppress
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from picklescan.scanner import scan_file_path from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar, Union
import torch
import numpy as np import numpy as np
import onnx import onnx
import safetensors.torch import safetensors.torch
from diffusers import DiffusionPipeline, ConfigMixin import torch
from onnx import numpy_helper from diffusers import ConfigMixin, DiffusionPipeline
from onnxruntime import (
InferenceSession,
SessionOptions,
get_available_providers,
)
from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
from diffusers import logging as diffusers_logging from diffusers import logging as diffusers_logging
from onnx import numpy_helper
from onnxruntime import InferenceSession, SessionOptions, get_available_providers
from picklescan.scanner import scan_file_path
from pydantic import BaseModel, Field
from transformers import logging as transformers_logging from transformers import logging as transformers_logging

View File

@ -1,23 +1,26 @@
import os import os
import torch
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Optional, Literal from typing import Literal, Optional
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from .base import ( from .base import (
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
ModelBase, ModelBase,
ModelConfigBase, ModelConfigBase,
BaseModelType, ModelNotFoundException,
ModelType, ModelType,
SubModelType, SubModelType,
EmptyConfigLoader,
calc_model_size_by_fs,
calc_model_size_by_data, calc_model_size_by_data,
calc_model_size_by_fs,
classproperty, classproperty,
InvalidModelException,
ModelNotFoundException,
) )
from invokeai.app.services.config import InvokeAIAppConfig
import invokeai.backend.util.logging as logger
class ControlNetModelFormat(str, Enum): class ControlNetModelFormat(str, Enum):

View File

@ -1,19 +1,21 @@
import os
import json import json
import os
from enum import Enum from enum import Enum
from pydantic import Field
from typing import Literal, Optional from typing import Literal, Optional
from omegaconf import OmegaConf
from pydantic import Field
from .base import ( from .base import (
ModelConfigBase,
BaseModelType, BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelType, ModelType,
ModelVariantType, ModelVariantType,
DiffusersModel,
read_checkpoint_meta,
classproperty, classproperty,
InvalidModelException, read_checkpoint_meta,
) )
from omegaconf import OmegaConf
class StableDiffusionXLModelFormat(str, Enum): class StableDiffusionXLModelFormat(str, Enum):

View File

@ -1,26 +1,29 @@
import os
import json import json
import os
from enum import Enum from enum import Enum
from pydantic import Field
from pathlib import Path from pathlib import Path
from typing import Literal, Optional, Union from typing import Literal, Optional, Union
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from .base import ( from omegaconf import OmegaConf
ModelConfigBase, from pydantic import Field
BaseModelType,
ModelType,
ModelVariantType,
DiffusersModel,
SilenceWarnings,
read_checkpoint_meta,
classproperty,
InvalidModelException,
ModelNotFoundException,
)
from .sdxl import StableDiffusionXLModel
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from omegaconf import OmegaConf
from .base import (
BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelNotFoundException,
ModelType,
ModelVariantType,
SilenceWarnings,
classproperty,
read_checkpoint_meta,
)
from .sdxl import StableDiffusionXLModel
class StableDiffusion1ModelFormat(str, Enum): class StableDiffusion1ModelFormat(str, Enum):
@ -272,8 +275,8 @@ def _convert_ckpt_and_cache(
return output_path return output_path
# to avoid circular import errors # to avoid circular import errors
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
from ...util.devices import choose_torch_device, torch_dtype from ...util.devices import choose_torch_device, torch_dtype
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
model_base_to_model_type = { model_base_to_model_type = {
BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder", BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder",

View File

@ -2,15 +2,16 @@ from enum import Enum
from typing import Literal from typing import Literal
from diffusers import OnnxRuntimeModel from diffusers import OnnxRuntimeModel
from .base import ( from .base import (
ModelConfigBase,
BaseModelType, BaseModelType,
DiffusersModel,
IAIOnnxRuntimeModel,
ModelConfigBase,
ModelType, ModelType,
ModelVariantType, ModelVariantType,
DiffusersModel,
SchedulerPredictionType, SchedulerPredictionType,
classproperty, classproperty,
IAIOnnxRuntimeModel,
) )

View File

@ -1,19 +1,20 @@
import os import os
import torch
from typing import Optional from typing import Optional
from .base import (
ModelBase, import torch
ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
classproperty,
ModelNotFoundException,
InvalidModelException,
)
# TODO: naming # TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw from ..lora import TextualInversionModel as TextualInversionModelRaw
from .base import (
BaseModelType,
InvalidModelException,
ModelBase,
ModelConfigBase,
ModelNotFoundException,
ModelType,
SubModelType,
classproperty,
)
class TextualInversionModel(ModelBase): class TextualInversionModel(ModelBase):

View File

@ -8,19 +8,20 @@ import torch
from omegaconf import OmegaConf from omegaconf import OmegaConf
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from .base import ( from .base import (
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
ModelBase, ModelBase,
ModelConfigBase, ModelConfigBase,
BaseModelType,
ModelType,
SubModelType,
ModelVariantType,
EmptyConfigLoader,
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
ModelNotFoundException, ModelNotFoundException,
ModelType,
ModelVariantType,
SubModelType,
calc_model_size_by_data,
calc_model_size_by_fs,
classproperty,
) )

View File

@ -9,7 +9,7 @@ from .diffusers_pipeline import ( # noqa: F401
from .diffusion import InvokeAIDiffuserComponent # noqa: F401 from .diffusion import InvokeAIDiffuserComponent # noqa: F401
from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401 from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401
from .diffusion.shared_invokeai_diffusion import ( # noqa: F401 from .diffusion.shared_invokeai_diffusion import ( # noqa: F401
PostprocessingSettings,
BasicConditioningInfo, BasicConditioningInfo,
PostprocessingSettings,
SDXLConditioningInfo, SDXLConditioningInfo,
) )

View File

@ -5,20 +5,16 @@ import inspect
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Any, Callable, List, Optional, Union from typing import Any, Callable, List, Optional, Union
import PIL.Image
import einops import einops
import PIL.Image
import psutil import psutil
import torch import torch
import torchvision.transforms as T import torchvision.transforms as T
from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.controlnet import ControlNetModel from diffusers.models.controlnet import ControlNetModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipeline
StableDiffusionPipeline, from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
)
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput from diffusers.schedulers.scheduling_utils import SchedulerMixin, SchedulerOutput
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available

View File

@ -4,8 +4,8 @@ Initialization file for invokeai.models.diffusion
from .cross_attention_control import InvokeAICrossAttentionMixin # noqa: F401 from .cross_attention_control import InvokeAICrossAttentionMixin # noqa: F401
from .cross_attention_map_saving import AttentionMapSaver # noqa: F401 from .cross_attention_map_saving import AttentionMapSaver # noqa: F401
from .shared_invokeai_diffusion import ( # noqa: F401 from .shared_invokeai_diffusion import ( # noqa: F401
BasicConditioningInfo,
InvokeAIDiffuserComponent, InvokeAIDiffuserComponent,
PostprocessingSettings, PostprocessingSettings,
BasicConditioningInfo,
SDXLConditioningInfo, SDXLConditioningInfo,
) )

View File

@ -11,16 +11,12 @@ import diffusers
import psutil import psutil
import torch import torch
from compel.cross_attention_control import Arguments from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.attention_processor import AttentionProcessor
from diffusers.models.attention_processor import (
Attention,
AttnProcessor,
SlicedAttnProcessor,
)
from torch import nn from torch import nn
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from ...util import torch_dtype from ...util import torch_dtype

View File

@ -1,8 +1,8 @@
from __future__ import annotations from __future__ import annotations
import math
from contextlib import contextmanager from contextlib import contextmanager
from dataclasses import dataclass from dataclasses import dataclass
import math
from typing import Any, Callable, Optional, Union from typing import Any, Callable, Optional, Union
import torch import torch

View File

@ -1,18 +1,18 @@
from diffusers import ( from diffusers import (
DDIMScheduler, DDIMScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepScheduler, DPMSolverMultistepScheduler,
KDPM2DiscreteScheduler, DPMSolverSDEScheduler,
KDPM2AncestralDiscreteScheduler, DPMSolverSinglestepScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler, EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler, HeunDiscreteScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LMSDiscreteScheduler, LMSDiscreteScheduler,
PNDMScheduler, PNDMScheduler,
UniPCMultistepScheduler, UniPCMultistepScheduler,
DPMSolverSinglestepScheduler,
DEISMultistepScheduler,
DDPMScheduler,
DPMSolverSDEScheduler,
) )
SCHEDULER_MAP = dict( SCHEDULER_MAP = dict(

View File

@ -24,13 +24,8 @@ import torch.utils.checkpoint
import transformers import transformers
from accelerate import Accelerator from accelerate import Accelerator
from accelerate.logging import get_logger from accelerate.logging import get_logger
from accelerate.utils import set_seed, ProjectConfiguration from accelerate.utils import ProjectConfiguration, set_seed
from diffusers import ( from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
AutoencoderKL,
DDPMScheduler,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available

View File

@ -1,12 +1,13 @@
from __future__ import annotations from __future__ import annotations
from contextlib import nullcontext
from packaging import version
import platform import platform
from contextlib import nullcontext
from typing import Union
import torch import torch
from packaging import version
from torch import autocast from torch import autocast
from typing import Union
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
CPU_DEVICE = torch.device("cpu") CPU_DEVICE = torch.device("cpu")

View File

@ -178,7 +178,6 @@ InvokeAI:
import logging.handlers import logging.handlers
import socket import socket
import urllib.parse import urllib.parse
from abc import abstractmethod from abc import abstractmethod
from pathlib import Path from pathlib import Path

View File

@ -1,11 +1,10 @@
import base64
import importlib import importlib
import io
import math import math
import multiprocessing as mp import multiprocessing as mp
import os import os
import re import re
import io
import base64
from collections import abc from collections import abc
from inspect import isfunction from inspect import isfunction
from pathlib import Path from pathlib import Path
@ -19,6 +18,7 @@ from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm from tqdm import tqdm
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from .devices import torch_dtype from .devices import torch_dtype

View File

@ -2,6 +2,7 @@
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) # Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
import warnings import warnings
from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -2,6 +2,7 @@
"""This script reads the "Invoke" Stable Diffusion prompt embedded in files generated by invoke.py""" """This script reads the "Invoke" Stable Diffusion prompt embedded in files generated by invoke.py"""
import sys import sys
from PIL import Image from PIL import Image
if len(sys.argv) < 2: if len(sys.argv) < 2:

View File

@ -2,8 +2,8 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import os
import logging import logging
import os
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage()) logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())

View File

@ -2,8 +2,8 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import os
import logging import logging
import os
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage()) logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())

View File

@ -5,9 +5,10 @@ This script is used at release time to generate a markdown table describing the
starter models. This text is then manually copied into 050_INSTALL_MODELS.md. starter models. This text is then manually copied into 050_INSTALL_MODELS.md.
""" """
from omegaconf import OmegaConf
from pathlib import Path from pathlib import Path
from omegaconf import OmegaConf
def main(): def main():
initial_models_file = Path(__file__).parent / "../invokeai/configs/INITIAL_MODELS.yaml" initial_models_file = Path(__file__).parent / "../invokeai/configs/INITIAL_MODELS.yaml"

View File

@ -2,6 +2,7 @@
import argparse import argparse
from pathlib import Path from pathlib import Path
from invokeai.backend.model_management.model_probe import ModelProbe from invokeai.backend.model_management.model_probe import ModelProbe
parser = argparse.ArgumentParser(description="Probe model type") parser = argparse.ArgumentParser(description="Probe model type")

View File

@ -4,11 +4,11 @@
Scan the models directory and print out a new models.yaml Scan the models directory and print out a new models.yaml
""" """
import argparse
import os import os
import sys import sys
import argparse
from pathlib import Path from pathlib import Path
from omegaconf import OmegaConf from omegaconf import OmegaConf

View File

@ -1,7 +1,8 @@
#!/usr/bin/env python #!/usr/bin/env python
import sys
import json import json
import sys
from invokeai.backend.image_util import retrieve_metadata from invokeai.backend.image_util import retrieve_metadata
if len(sys.argv) < 2: if len(sys.argv) < 2: