blackify and isort

This commit is contained in:
Lincoln Stein
2023-09-15 22:19:29 -04:00
parent 08952b9aa0
commit b9a90fbd28
43 changed files with 160 additions and 214 deletions

View File

@ -25,7 +25,6 @@ from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsSto
from ..services.model_manager_service import ModelManagerService
from ..services.processor import DefaultInvocationProcessor
from ..services.sqlite import SqliteItemStorage
from ..services.invocation_stats import InvocationStatsService
from .events import FastAPIEventService

View File

@ -10,13 +10,13 @@ from pydantic import BaseModel, parse_obj_as
from starlette.exceptions import HTTPException
from invokeai.backend import BaseModelType, ModelType
from invokeai.backend.model_manager import MergeInterpolationMethod
from invokeai.backend.model_manager import (
OPENAPI_MODEL_CONFIGS,
ModelConfigBase,
InvalidModelException,
UnknownModelException,
MergeInterpolationMethod,
ModelConfigBase,
SchedulerPredictionType,
UnknownModelException,
)
from ..dependencies import ApiDependencies

View File

@ -28,7 +28,6 @@ from pydantic import BaseModel, Field, validator
from invokeai.app.invocations.primitives import ImageField, ImageOutput
from ...backend.model_manager import BaseModelType
from ..models.image import ImageCategory, ResourceOrigin
from .baseinvocation import (

View File

@ -34,8 +34,8 @@ from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_manager import BaseModelType, ModelType, SilenceWarnings
from ...backend.model_manager.lora import ModelPatcher
from ...backend.model_manager.seamless import set_seamless
from ...backend.model_manager.models import BaseModelType
from ...backend.model_manager.seamless import set_seamless
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.stable_diffusion.diffusers_pipeline import (
ConditioningData,

View File

@ -178,7 +178,7 @@ class IntegerMathInvocation(BaseInvocation):
elif self.operation == "DIV":
return IntegerOutput(value=int(self.a / self.b))
elif self.operation == "EXP":
return IntegerOutput(value=self.a**self.b)
return IntegerOutput(value=self.a ** self.b)
elif self.operation == "MOD":
return IntegerOutput(value=self.a % self.b)
elif self.operation == "ABS":
@ -252,7 +252,7 @@ class FloatMathInvocation(BaseInvocation):
elif self.operation == "DIV":
return FloatOutput(value=self.a / self.b)
elif self.operation == "EXP":
return FloatOutput(value=self.a**self.b)
return FloatOutput(value=self.a ** self.b)
elif self.operation == "SQRT":
return FloatOutput(value=np.sqrt(self.a))
elif self.operation == "ABS":

View File

@ -5,10 +5,13 @@ Model download service.
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Optional, List, Union
from typing import List, Optional, Union
from pydantic.networks import AnyHttpUrl
from invokeai.backend.model_manager.download import DownloadEventHandler, DownloadJobBase, DownloadQueue
from .events import EventServiceBase
from invokeai.backend.model_manager.download import DownloadQueue, DownloadJobBase, DownloadEventHandler
class DownloadQueueServiceBase(ABC):

View File

@ -5,12 +5,6 @@ from typing import Any, Optional
from invokeai.app.models.image import ProgressImage
from invokeai.app.services.model_manager_service import BaseModelType, ModelInfo, ModelType, SubModelType
from invokeai.app.util.misc import get_timestamp
from invokeai.app.services.model_manager_service import (
BaseModelType,
ModelType,
SubModelType,
ModelInfo,
)
from invokeai.backend.model_manager.download import DownloadJobBase

View File

@ -1,5 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:

View File

@ -44,7 +44,6 @@ from ..invocations.baseinvocation import BaseInvocation
from .graph import GraphExecutionState
from .item_storage import ItemStorageABC
from .model_manager_service import ModelManagerService
from invokeai.backend.model_manager.cache import CacheStats
# size of GIG in bytes
GIG = 1073741824

View File

@ -5,9 +5,15 @@ from __future__ import annotations
import shutil
from abc import ABC, abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from pydantic import Field
from pydantic.networks import AnyHttpUrl
from invokeai.app.models.exceptions import CanceledException
from invokeai.backend.model_manager import (
BaseModelType,
DuplicateModelException,
MergeInterpolationMethod,
ModelConfigBase,
ModelInfo,
@ -18,15 +24,9 @@ from invokeai.backend.model_manager import (
ModelType,
SubModelType,
UnknownModelException,
DuplicateModelException,
)
from invokeai.backend.model_manager.cache import CacheStats
from typing import TYPE_CHECKING, List, Optional, Union, Dict, Any
from pydantic import Field
from pydantic.networks import AnyHttpUrl
from invokeai.app.models.exceptions import CanceledException
from .config import InvokeAIAppConfig
from .events import EventServiceBase

View File

@ -3,10 +3,11 @@ from PIL import Image
from invokeai.app.models.exceptions import CanceledException
from invokeai.app.models.image import ProgressImage
from ..invocations.baseinvocation import InvocationContext
from ...backend.util.util import image_to_dataURL
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.model_manager import BaseModelType
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.util.util import image_to_dataURL
from ..invocations.baseinvocation import InvocationContext
def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix=None):

View File

@ -2,15 +2,15 @@
Initialization file for invokeai.backend
"""
from .model_manager import ( # noqa F401
ModelLoader,
ModelInstall,
ModelConfigStore,
SilenceWarnings,
BaseModelType,
DuplicateModelException,
InvalidModelException,
BaseModelType,
ModelConfigStore,
ModelInstall,
ModelLoader,
ModelType,
SubModelType,
SchedulerPredictionType,
ModelVariantType,
SchedulerPredictionType,
SilenceWarnings,
SubModelType,
)

View File

@ -1,7 +1,6 @@
"""
Initialization file for invokeai.backend.model_manager.config
"""
from .models import read_checkpoint_meta, OPENAPI_MODEL_CONFIGS # noqa F401
from .config import ( # noqa F401
BaseModelType,
InvalidModelConfigException,
@ -11,19 +10,20 @@ from .config import ( # noqa F401
ModelType,
ModelVariantType,
SchedulerPredictionType,
SubModelType,
SilenceWarnings,
SubModelType,
)
from .lora import ONNXModelPatcher, ModelPatcher
from .loader import ModelLoader, ModelInfo # noqa F401
from .install import ModelInstall, ModelInstallJob # noqa F401
from .probe import ModelProbe, InvalidModelException # noqa F401
from .storage import (
UnknownModelException,
from .loader import ModelInfo, ModelLoader # noqa F401
from .lora import ModelPatcher, ONNXModelPatcher
from .merge import MergeInterpolationMethod, ModelMerger
from .models import OPENAPI_MODEL_CONFIGS, read_checkpoint_meta # noqa F401
from .probe import InvalidModelException, ModelProbe # noqa F401
from .search import ModelSearch # noqa F401
from .storage import ( # noqa F401
DuplicateModelException,
ModelConfigStore,
ModelConfigStoreYAML,
ModelConfigStoreSQL,
) # noqa F401
from .search import ModelSearch # noqa F401
from .merge import MergeInterpolationMethod, ModelMerger
ModelConfigStoreYAML,
UnknownModelException,
)

View File

@ -20,16 +20,16 @@ Validation errors will raise an InvalidModelConfigException error.
"""
import warnings
from enum import Enum
from typing import Optional, Literal, List, Union, Type
from omegaconf.listconfig import ListConfig # to support the yaml backend
from typing import List, Literal, Optional, Type, Union
import pydantic
from pydantic import BaseModel, Field, Extra
from pydantic.error_wrappers import ValidationError
# import these so that we can silence them
from diffusers import logging as diffusers_logging
from omegaconf.listconfig import ListConfig # to support the yaml backend
from pydantic import BaseModel, Extra, Field
from pydantic.error_wrappers import ValidationError
from transformers import logging as transformers_logging

View File

@ -1,14 +1,13 @@
"""Initialization file for threaded download manager."""
from .base import ( # noqa F401
DownloadQueueBase,
DownloadJobStatus,
DownloadEventHandler,
UnknownJobIDException,
DownloadJobBase,
ModelSourceMetadata,
REPO_ID_RE,
HTTP_RE,
REPO_ID_RE,
DownloadEventHandler,
DownloadJobBase,
DownloadJobStatus,
DownloadQueueBase,
ModelSourceMetadata,
UnknownJobIDException,
)
from .queue import DownloadQueue # noqa F401

View File

@ -7,7 +7,8 @@ from abc import ABC, abstractmethod
from enum import Enum
from functools import total_ordering
from pathlib import Path
from typing import List, Optional, Callable, Union
from typing import Callable, List, Optional, Union
from pydantic import BaseModel, Field
from pydantic.networks import AnyHttpUrl

View File

@ -1,37 +1,36 @@
# Copyright (c) 2023, Lincoln D. Stein
"""Implementation of multithreaded download queue for invokeai."""
import re
import os
import requests
import re
import shutil
import threading
import time
import traceback
from json import JSONDecodeError
from pathlib import Path
from requests import HTTPError
from typing import Dict, Optional, Set, List, Tuple, Union
from pydantic import Field, validator, ValidationError
from pydantic.networks import AnyHttpUrl
from queue import PriorityQueue
from typing import Dict, List, Optional, Set, Tuple, Union
import requests
from huggingface_hub import HfApi, hf_hub_url
from pydantic import Field, ValidationError, validator
from pydantic.networks import AnyHttpUrl
from requests import HTTPError
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.app.services.config import InvokeAIAppConfig
from . import REPO_ID_RE, HTTP_RE
from .base import (
DownloadQueueBase,
DownloadJobStatus,
DownloadEventHandler,
UnknownJobIDException,
DownloadJobBase,
ModelSourceMetadata,
)
from invokeai.backend.util.logging import InvokeAILogger
from ..storage import DuplicateModelException
from . import HTTP_RE, REPO_ID_RE
from .base import (
DownloadEventHandler,
DownloadJobBase,
DownloadJobStatus,
DownloadQueueBase,
ModelSourceMetadata,
UnknownJobIDException,
)
# Maximum number of bytes to download during each call to requests.iter_content()
DOWNLOAD_CHUNK_SIZE = 100000

View File

@ -8,12 +8,13 @@ from invokeai.backend.model_managre.model_hash import FastModelHash
'a8e693a126ea5b831c96064dc569956f'
"""
import os
import hashlib
from imohash import hashfile
import os
from pathlib import Path
from typing import Dict, Union
from imohash import hashfile
class FastModelHash(object):
"""FastModelHash obect provides one public class method, hash()."""

View File

@ -53,32 +53,29 @@ import tempfile
from abc import ABC, abstractmethod
from pathlib import Path
from shutil import rmtree
from typing import Optional, List, Union, Dict, Set, Any, Callable
from typing import Any, Callable, Dict, List, Optional, Set, Union
from pydantic import Field
from pydantic.networks import AnyHttpUrl
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util.logging import InvokeAILogger
from .search import ModelSearch
from .storage import ModelConfigStore, DuplicateModelException, get_config_store
from .config import BaseModelType, ModelFormat, ModelType, ModelVariantType, SchedulerPredictionType
from .download import (
DownloadQueueBase,
DownloadQueue,
DownloadJobBase,
ModelSourceMetadata,
DownloadEventHandler,
REPO_ID_RE,
HTTP_RE,
REPO_ID_RE,
DownloadEventHandler,
DownloadJobBase,
DownloadQueue,
DownloadQueueBase,
ModelSourceMetadata,
)
from .download.queue import DownloadJobURL, DownloadJobRepoID, DownloadJobPath
from .download.queue import DownloadJobPath, DownloadJobRepoID, DownloadJobURL
from .hash import FastModelHash
from .probe import ModelProbe, ModelProbeInfo, InvalidModelException
from .config import (
ModelType,
BaseModelType,
ModelVariantType,
ModelFormat,
SchedulerPredictionType,
)
from .probe import InvalidModelException, ModelProbe, ModelProbeInfo
from .search import ModelSearch
from .storage import DuplicateModelException, ModelConfigStore, get_config_store
class ModelInstallJob(DownloadJobBase):

View File

@ -5,18 +5,19 @@ import hashlib
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Union, Optional, List
from typing import List, Optional, Union
import torch
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util import choose_precision, choose_torch_device, InvokeAILogger, Chdir
from .config import BaseModelType, ModelType, SubModelType, ModelConfigBase
from .install import ModelInstallBase, ModelInstall
from .storage import ModelConfigStore, get_config_store
from .cache import ModelCache, ModelLocker, CacheStats
from .models import InvalidModelException, ModelBase, MODEL_CLASSES
from invokeai.backend.util import Chdir, InvokeAILogger, choose_precision, choose_torch_device
from .cache import CacheStats, ModelCache, ModelLocker
from .config import BaseModelType, ModelConfigBase, ModelType, SubModelType
from .download import DownloadEventHandler
from .install import ModelInstall, ModelInstallBase
from .models import MODEL_CLASSES, InvalidModelException, ModelBase
from .storage import ModelConfigStore, get_config_store
@dataclass

View File

@ -16,7 +16,7 @@ from diffusers import logging as dlogging
import invokeai.backend.util.logging as logger
from . import ModelLoader, ModelType, BaseModelType, ModelVariantType, ModelConfigBase
from . import BaseModelType, ModelConfigBase, ModelLoader, ModelType, ModelVariantType
class MergeInterpolationMethod(str, Enum):

View File

@ -11,10 +11,9 @@ from .base import ( # noqa: F401
ModelConfigBase,
ModelNotFoundException,
ModelType,
SubModelType,
ModelVariantType,
SchedulerPredictionType,
InvalidModelException,
SubModelType,
read_checkpoint_meta,
)
from .controlnet import ControlNetModel # TODO:

View File

@ -2,33 +2,30 @@ import inspect
import json
import os
import sys
import torch
import typing
from abc import ABCMeta, abstractmethod
from contextlib import suppress
from enum import Enum
from pathlib import Path
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar, Union
import numpy as np
import onnx
import safetensors.torch
from diffusers import DiffusionPipeline, ConfigMixin
import torch
from diffusers import ConfigMixin, DiffusionPipeline
from onnx import numpy_helper
from onnxruntime import (
InferenceSession,
SessionOptions,
get_available_providers,
)
from onnxruntime import InferenceSession, SessionOptions, get_available_providers
from picklescan.scanner import scan_file_path
from ..config import ( # noqa F401
BaseModelType,
ModelType,
SubModelType,
ModelVariantType,
ModelFormat,
SchedulerPredictionType,
ModelConfigBase,
ModelFormat,
ModelType,
ModelVariantType,
SchedulerPredictionType,
SubModelType,
)

View File

@ -7,7 +7,8 @@ import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from ..config import ControlNetDiffusersConfig, ControlNetCheckpointConfig
from ..config import ControlNetCheckpointConfig, ControlNetDiffusersConfig
from .base import (
BaseModelType,
EmptyConfigLoader,

View File

@ -2,10 +2,11 @@ import bisect
import os
from enum import Enum
from pathlib import Path
from typing import Dict, Optional, Union, Literal
from typing import Dict, Literal, Optional, Union
import torch
from safetensors.torch import load_file
from ..config import LoRAConfig
from .base import (
BaseModelType,

View File

@ -5,7 +5,8 @@ from typing import Literal, Optional
from omegaconf import OmegaConf
from pydantic import Field
from ..config import MainDiffusersConfig, MainCheckpointConfig
from ..config import MainCheckpointConfig, MainDiffusersConfig
from .base import (
BaseModelType,
DiffusersModel,

View File

@ -4,15 +4,14 @@ from enum import Enum
from pathlib import Path
from typing import Literal, Optional, Union
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from omegaconf import OmegaConf
from pydantic import Field
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from ..config import SilenceWarnings
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from ..config import MainCheckpointConfig, MainDiffusersConfig
from ..config import MainCheckpointConfig, MainDiffusersConfig, SilenceWarnings
from .base import (
BaseModelType,
DiffusersModel,

View File

@ -2,6 +2,7 @@ from enum import Enum
from typing import Literal
from diffusers import OnnxRuntimeModel
from ..config import ONNXSD1Config, ONNXSD2Config
from .base import (
BaseModelType,

View File

@ -1,11 +1,12 @@
import os
from typing import Optional, Literal
from typing import Literal, Optional
import torch
from ..config import ModelFormat, TextualInversionConfig
# TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw
from ..config import ModelFormat, TextualInversionConfig
from .base import (
BaseModelType,
InvalidModelException,

View File

@ -1,14 +1,15 @@
import os
from enum import Enum
from pathlib import Path
from typing import Optional, Literal
from typing import Literal, Optional
import safetensors
import torch
from omegaconf import OmegaConf
from invokeai.app.services.config import InvokeAIAppConfig
from ..config import VaeDiffusersConfig, VaeCheckpointConfig
from ..config import VaeCheckpointConfig, VaeDiffusersConfig
from .base import (
BaseModelType,
EmptyConfigLoader,

View File

@ -10,21 +10,14 @@ import json
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Callable
from typing import Callable, Optional
import safetensors.torch
import torch
from picklescan.scanner import scan_file_path
import torch
import safetensors.torch
from .util import read_checkpoint_meta
from .config import (
ModelType,
BaseModelType,
ModelVariantType,
ModelFormat,
SchedulerPredictionType,
)
from .util import SilenceWarnings, lora_token_vector_length
from .config import BaseModelType, ModelFormat, ModelType, ModelVariantType, SchedulerPredictionType
from .util import SilenceWarnings, lora_token_vector_length, read_checkpoint_meta
class InvalidModelException(Exception):

View File

@ -22,11 +22,12 @@ Example usage:
import os
from abc import ABC, abstractmethod
from typing import Set, Optional, Callable, Union
from pathlib import Path
from typing import Callable, Optional, Set, Union
from pydantic import BaseModel, Field
from invokeai.backend.util.logging import InvokeAILogger
from pydantic import Field, BaseModel
default_logger = InvokeAILogger.getLogger()

View File

@ -3,9 +3,9 @@ Initialization file for invokeai.backend.model_manager.storage
"""
import pathlib
from .base import ModelConfigStore, UnknownModelException, DuplicateModelException # noqa F401
from .yaml import ModelConfigStoreYAML # noqa F401
from .base import DuplicateModelException, ModelConfigStore, UnknownModelException # noqa F401
from .sql import ModelConfigStoreSQL # noqa F401
from .yaml import ModelConfigStoreYAML # noqa F401
def get_config_store(location: pathlib.Path) -> ModelConfigStore:

View File

@ -4,9 +4,9 @@ Abstract base class for storing and retrieving model configuration records.
"""
from abc import ABC, abstractmethod
from typing import Union, Set, List, Optional
from typing import List, Optional, Set, Union
from ..config import ModelConfigBase, BaseModelType, ModelType
from ..config import BaseModelType, ModelConfigBase, ModelType
# should match the InvokeAI version when this is first released.
CONFIG_FILE_VERSION = "3.1.1"

View File

@ -40,26 +40,14 @@ Typical usage:
configs = store.search_by_name(base_model='sd-2', model_type='main')
"""
import threading
import sqlite3
import json
import sqlite3
import threading
from pathlib import Path
from typing import Union, List, Optional, Set
from typing import List, Optional, Set, Union
from ..config import (
ModelConfigBase,
ModelConfigFactory,
BaseModelType,
ModelType,
)
from .base import (
DuplicateModelException,
UnknownModelException,
ModelConfigStore,
CONFIG_FILE_VERSION,
)
from ..config import BaseModelType, ModelConfigBase, ModelConfigFactory, ModelType
from .base import CONFIG_FILE_VERSION, DuplicateModelException, ModelConfigStore, UnknownModelException
class ModelConfigStoreSQL(ModelConfigStore):

View File

@ -41,26 +41,16 @@ Typical usage:
"""
import threading
import yaml
from enum import Enum
from pathlib import Path
from typing import Union, Set, List, Optional
from typing import List, Optional, Set, Union
import yaml
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from ..config import (
ModelConfigBase,
ModelConfigFactory,
BaseModelType,
ModelType,
)
from .base import (
DuplicateModelException,
UnknownModelException,
ModelConfigStore,
CONFIG_FILE_VERSION,
)
from ..config import BaseModelType, ModelConfigBase, ModelConfigFactory, ModelType
from .base import CONFIG_FILE_VERSION, DuplicateModelException, ModelConfigStore, UnknownModelException
class ModelConfigStoreYAML(ModelConfigStore):

View File

@ -4,13 +4,14 @@ Various utilities used by the model manager.
"""
import json
import warnings
import torch
import safetensors
from pathlib import Path
from typing import Optional, Union
import safetensors
import torch
from diffusers import logging as diffusers_logging
from transformers import logging as transformers_logging
from picklescan.scanner import scan_file_path
from transformers import logging as transformers_logging
class SilenceWarnings(object):

View File

@ -261,7 +261,7 @@ class InvokeAICrossAttentionMixin:
if q.shape[1] <= 4096: # (512x512) max q.shape[1]: 4096
return self.einsum_lowest_level(q, k, v, None, None, None)
else:
slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1]))
slice_size = math.floor(2 ** 30 / (q.shape[0] * q.shape[1]))
return self.einsum_op_slice_dim1(q, k, v, slice_size)
def einsum_op_mps_v2(self, q, k, v):

View File

@ -215,10 +215,7 @@ class InvokeAIDiffuserComponent:
dim=0,
),
}
(
encoder_hidden_states,
encoder_attention_mask,
) = self._concat_conditionings_for_batch(
(encoder_hidden_states, encoder_attention_mask,) = self._concat_conditionings_for_batch(
conditioning_data.unconditioned_embeddings.embeds,
conditioning_data.text_embeddings.embeds,
)
@ -280,10 +277,7 @@ class InvokeAIDiffuserComponent:
wants_cross_attention_control = len(cross_attention_control_types_to_do) > 0
if wants_cross_attention_control:
(
unconditioned_next_x,
conditioned_next_x,
) = self._apply_cross_attention_controlled_conditioning(
(unconditioned_next_x, conditioned_next_x,) = self._apply_cross_attention_controlled_conditioning(
sample,
timestep,
conditioning_data,
@ -291,10 +285,7 @@ class InvokeAIDiffuserComponent:
**kwargs,
)
elif self.sequential_guidance:
(
unconditioned_next_x,
conditioned_next_x,
) = self._apply_standard_conditioning_sequentially(
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning_sequentially(
sample,
timestep,
conditioning_data,
@ -302,10 +293,7 @@ class InvokeAIDiffuserComponent:
)
else:
(
unconditioned_next_x,
conditioned_next_x,
) = self._apply_standard_conditioning(
(unconditioned_next_x, conditioned_next_x,) = self._apply_standard_conditioning(
sample,
timestep,
conditioning_data,

View File

@ -470,10 +470,7 @@ class TextualInversionDataset(Dataset):
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(
h,
w,
) = (
(h, w,) = (
img.shape[0],
img.shape[1],
)

View File

@ -11,12 +11,5 @@ from .devices import ( # noqa: F401
normalize_device,
torch_dtype,
)
from .util import ( # noqa: F401
ask_user,
download_with_resume,
instantiate_from_config,
url_attachment_name,
Chdir,
)
from .attention import auto_detect_slice_size # noqa: F401
from .logging import InvokeAILogger # noqa: F401
from .util import Chdir, ask_user, download_with_resume, instantiate_from_config, url_attachment_name # noqa: F401

View File

@ -203,7 +203,7 @@ class ChunkedSlicedAttnProcessor:
if attn.upcast_attention:
out_item_size = 4
chunk_size = 2**29
chunk_size = 2 ** 29
out_size = query.shape[1] * key.shape[1] * out_item_size
chunks_count = min(query.shape[1], math.ceil((out_size - 1) / chunk_size))

View File

@ -207,7 +207,7 @@ def parallel_data_prefetch(
return gather_res
def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3):
def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3):
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])