This commit is contained in:
Ryan Dick 2023-09-15 13:18:00 -04:00
parent b57acb7353
commit 343df03a92
5 changed files with 7 additions and 31 deletions

View File

@ -16,9 +16,7 @@ from invokeai.app.invocations.baseinvocation import (
) )
from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.primitives import ImageField
from invokeai.backend.model_management.models.base import BaseModelType, ModelType from invokeai.backend.model_management.models.base import BaseModelType, ModelType
from invokeai.backend.model_management.models.ip_adapter import ( from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id
get_ip_adapter_image_encoder_model_id,
)
class IPAdapterModelField(BaseModel): class IPAdapterModelField(BaseModel):

View File

@ -35,10 +35,7 @@ from invokeai.app.util.controlnet_utils import prepare_control_image
from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
from invokeai.backend.model_management.models import ModelType, SilenceWarnings from invokeai.backend.model_management.models import ModelType, SilenceWarnings
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningData, IPAdapterConditioningInfo
ConditioningData,
IPAdapterConditioningInfo,
)
from ...backend.model_management.lora import ModelPatcher from ...backend.model_management.lora import ModelPatcher
from ...backend.model_management.models import BaseModelType from ...backend.model_management.models import BaseModelType
@ -50,9 +47,7 @@ from ...backend.stable_diffusion.diffusers_pipeline import (
StableDiffusionGeneratorPipeline, StableDiffusionGeneratorPipeline,
image_resized_to_grid_as_tensor, image_resized_to_grid_as_tensor,
) )
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import ( from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
PostprocessingSettings,
)
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from ...backend.util.devices import choose_precision, choose_torch_device from ...backend.util.devices import choose_precision, choose_torch_device
from ..models.image import ImageCategory, ResourceOrigin from ..models.image import ImageCategory, ResourceOrigin

View File

@ -13,12 +13,7 @@ from pydantic import BaseModel, Field, validator
from tqdm import tqdm from tqdm import tqdm
from invokeai.app.invocations.metadata import CoreMetadata from invokeai.app.invocations.metadata import CoreMetadata
from invokeai.app.invocations.primitives import ( from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput
ConditioningField,
ConditioningOutput,
ImageField,
ImageOutput,
)
from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.app.util.step_callback import stable_diffusion_step_callback
from invokeai.backend import BaseModelType, ModelType, SubModelType from invokeai.backend import BaseModelType, ModelType, SubModelType
@ -40,13 +35,7 @@ from .baseinvocation import (
invocation_output, invocation_output,
) )
from .controlnet_image_processors import ControlField from .controlnet_image_processors import ControlField
from .latent import ( from .latent import SAMPLER_NAME_VALUES, LatentsField, LatentsOutput, build_latents_output, get_scheduler
SAMPLER_NAME_VALUES,
LatentsField,
LatentsOutput,
build_latents_output,
get_scheduler,
)
from .model import ClipField, ModelInfo, UNetField, VaeField from .model import ClipField, ModelInfo, UNetField, VaeField
ORT_TO_NP_TYPE = { ORT_TO_NP_TYPE = {

View File

@ -7,9 +7,7 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from diffusers.models.attention_processor import AttnProcessor as DiffusersAttnProcessor from diffusers.models.attention_processor import AttnProcessor as DiffusersAttnProcessor
from diffusers.models.attention_processor import ( from diffusers.models.attention_processor import AttnProcessor2_0 as DiffusersAttnProcessor2_0
AttnProcessor2_0 as DiffusersAttnProcessor2_0,
)
# Create versions of AttnProcessor and AttnProcessor2_0 that are sub-classes of nn.Module. This is required for # Create versions of AttnProcessor and AttnProcessor2_0 that are sub-classes of nn.Module. This is required for

View File

@ -5,11 +5,7 @@ from typing import Literal, Optional
import torch import torch
from invokeai.backend.ip_adapter.ip_adapter import ( from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus, build_ip_adapter
IPAdapter,
IPAdapterPlus,
build_ip_adapter,
)
from invokeai.backend.model_management.models.base import ( from invokeai.backend.model_management.models.base import (
BaseModelType, BaseModelType,
InvalidModelException, InvalidModelException,