attempt to fix flake8 lint errors

This commit is contained in:
Lincoln Stein
2023-09-17 17:13:56 -04:00
parent 238d7fa0ee
commit d051c0868e
11 changed files with 7 additions and 15 deletions

View File

@ -34,7 +34,6 @@ from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.model_manager import BaseModelType, ModelType, SilenceWarnings
from ...backend.model_manager.lora import ModelPatcher
from ...backend.model_manager.models import BaseModelType
from ...backend.model_manager.seamless import set_seamless
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.stable_diffusion.diffusers_pipeline import (

View File

@ -3,7 +3,7 @@
from typing import Any, Optional
from invokeai.app.models.image import ProgressImage
from invokeai.app.services.model_manager_service import BaseModelType, ModelInfo, ModelType, SubModelType
from invokeai.app.services.model_manager_service import ModelInfo, SubModelType
from invokeai.app.util.misc import get_timestamp
from invokeai.backend.model_manager.download import DownloadJobBase
from invokeai.backend.util.logging import InvokeAILogger

View File

@ -1,7 +1,7 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from logging import Logger

View File

@ -15,7 +15,7 @@ from .config import ( # noqa F401
)
from .install import ModelInstall, ModelInstallJob # noqa F401
from .loader import ModelInfo, ModelLoad # noqa F401
from .lora import ModelPatcher, ONNXModelPatcher
from .lora import ModelPatcher, ONNXModelPatcher # noqa F401
from .models import OPENAPI_MODEL_CONFIGS, read_checkpoint_meta # noqa F401
from .probe import InvalidModelException, ModelProbe, ModelProbeInfo # noqa F401
from .search import ModelSearch # noqa F401

View File

@ -226,7 +226,6 @@ class ModelLoad(ModelLoadBase):
model_path, is_submodel_override = self._get_model_path(model_config, submodel_type)
if is_submodel_override:
model_type = submodel_type
submodel_type = None
model_class = self._get_implementation(model_config.base_model, model_config.model_type)

View File

@ -18,7 +18,6 @@ import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from . import ModelConfigBase, ModelConfigStore, ModelInstall, ModelType
from .probe import ModelProbe, ModelProbeInfo
class MergeInterpolationMethod(str, Enum):

View File

@ -1,16 +1,13 @@
import json
import os
from enum import Enum
from pathlib import Path
from typing import Literal, Optional, Union
from typing import Literal
from omegaconf import OmegaConf
from pydantic import Field
from ..config import CheckpointConfig, MainCheckpointConfig, MainDiffusersConfig
from ..config import MainDiffusersConfig
from .base import (
BaseModelType,
DiffusersModel,
InvalidModelException,
ModelConfigBase,
ModelType,

View File

@ -2,7 +2,7 @@ import json
import os
from enum import Enum
from pathlib import Path
from typing import Literal, Optional, Union
from typing import Literal, Optional
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline
from omegaconf import OmegaConf

View File

@ -8,7 +8,6 @@ its base type, model type, format and variant.
import json
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, Optional

View File

@ -12,7 +12,6 @@ These will mostly be empty after conversion, but will be populated
when new models are downloaded from HuggingFace or Civitae.
"""
import argparse
import sys
from pathlib import Path
from omegaconf import OmegaConf

View File

@ -3,7 +3,7 @@ from pathlib import Path
import pytest
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend import BaseModelType, ModelConfigStore, ModelType, SubModelType
from invokeai.backend import SubModelType
from invokeai.backend.model_manager import ModelLoad
BASIC_MODEL_NAME = "sdxl-base-1-0"