Merge branch 'main' into enhance/invokeai-logs

- resolve conflicts
- remove unused code identified by pyflakes
This commit is contained in:
Lincoln Stein
2023-04-28 10:09:46 -04:00
427 changed files with 18057 additions and 2831 deletions

View File

@ -10,7 +10,7 @@ from .generator import (
Img2Img,
Inpaint
)
from .model_management import ModelManager
from .model_management import ModelManager, SDModelComponent
from .safety_checker import SafetyChecker
from .args import Args
from .globals import Globals

View File

@ -5,6 +5,7 @@ from .convert_ckpt_to_diffusers import (
convert_ckpt_to_diffusers,
load_pipeline_from_original_stable_diffusion_ckpt,
)
from .model_manager import ModelManager
from .model_manager import ModelManager,SDModelComponent

View File

@ -58,7 +58,7 @@ class HuggingFaceConceptsLibrary(object):
self.concept_list.extend(list(local_concepts_to_add))
return self.concept_list
return self.concept_list
else:
elif Globals.internet_available is True:
try:
models = self.hf_api.list_models(
filter=ModelFilter(model_name="sd-concepts-library/")
@ -74,6 +74,8 @@ class HuggingFaceConceptsLibrary(object):
"You may load .bin and .pt file(s) manually using the --embedding_directory argument."
)
return self.concept_list
else:
return self.concept_list
def get_concept_model_path(self, concept_name: str) -> str:
"""

View File

@ -445,8 +445,15 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property
def _submodels(self) -> Sequence[torch.nn.Module]:
module_names, _, _ = self.extract_init_dict(dict(self.config))
values = [getattr(self, name) for name in module_names.keys()]
return [m for m in values if isinstance(m, torch.nn.Module)]
submodels = []
for name in module_names.keys():
if hasattr(self, name):
value = getattr(self, name)
else:
value = getattr(self.config, name)
if isinstance(value, torch.nn.Module):
submodels.append(value)
return submodels
def image_from_embeddings(
self,
@ -544,7 +551,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
yield PipelineIntermediateState(
run_id=run_id,
step=-1,
timestep=self.scheduler.num_train_timesteps,
timestep=self.scheduler.config.num_train_timesteps,
latents=latents,
)
@ -915,7 +922,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
@property
def channels(self) -> int:
"""Compatible with DiffusionWrapper"""
return self.unet.in_channels
return self.unet.config.in_channels
def decode_latents(self, latents):
# Explicit call to get the vae loaded, since `decode` isn't the forward method.

View File

@ -10,8 +10,7 @@ import diffusers
import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.cross_attention import AttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.attention_processor import AttentionProcessor
from torch import nn
import invokeai.backend.util.logging as log
@ -188,7 +187,7 @@ class Context:
class InvokeAICrossAttentionMixin:
"""
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
Enable InvokeAI-flavoured Attention calculation, which does aggressive low-memory slicing and calls
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
and dymamic slicing strategy selection.
"""
@ -209,7 +208,7 @@ class InvokeAICrossAttentionMixin:
Set custom attention calculator to be called when attention is calculated
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
which returns either the suggested_attention_slice or an adjusted equivalent.
`module` is the current CrossAttention module for which the callback is being invoked.
`module` is the current Attention module for which the callback is being invoked.
`suggested_attention_slice` is the default-calculated attention slice
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
@ -345,11 +344,11 @@ class InvokeAICrossAttentionMixin:
def restore_default_cross_attention(
model,
is_running_diffusers: bool,
restore_attention_processor: Optional[AttnProcessor] = None,
restore_attention_processor: Optional[AttentionProcessor] = None,
):
if is_running_diffusers:
unet = model
unet.set_attn_processor(restore_attention_processor or CrossAttnProcessor())
unet.set_attn_processor(restore_attention_processor or AttnProcessor())
else:
remove_attention_function(model)
@ -408,12 +407,9 @@ def override_cross_attention(model, context: Context, is_running_diffusers=False
def get_cross_attention_modules(
model, which: CrossAttentionType
) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
from ldm.modules.attention import CrossAttention # avoid circular import
cross_attention_class: type = (
InvokeAIDiffusersCrossAttention
if isinstance(model, UNet2DConditionModel)
else CrossAttention
)
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
attention_module_tuples = [
@ -550,7 +546,7 @@ def get_mem_free_total(device):
class InvokeAIDiffusersCrossAttention(
diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin
diffusers.models.attention.Attention, InvokeAICrossAttentionMixin
):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@ -572,8 +568,8 @@ class InvokeAIDiffusersCrossAttention(
"""
# base implementation
class CrossAttnProcessor:
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
class AttnProcessor:
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
batch_size, sequence_length, _ = hidden_states.shape
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length)
@ -601,9 +597,9 @@ class CrossAttnProcessor:
from dataclasses import dataclass, field
import torch
from diffusers.models.cross_attention import (
CrossAttention,
CrossAttnProcessor,
from diffusers.models.attention_processor import (
Attention,
AttnProcessor,
SlicedAttnProcessor,
)
@ -653,7 +649,7 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor):
def __call__(
self,
attn: CrossAttention,
attn: Attention,
hidden_states,
encoder_hidden_states=None,
attention_mask=None,

View File

@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import torch
from diffusers.models.cross_attention import AttnProcessor
from diffusers.models.attention_processor import AttentionProcessor
from typing_extensions import TypeAlias
import invokeai.backend.util.logging as log
@ -102,7 +102,7 @@ class InvokeAIDiffuserComponent:
def override_cross_attention(
self, conditioning: ExtraConditioningInfo, step_count: int
) -> Dict[str, AttnProcessor]:
) -> Dict[str, AttentionProcessor]:
"""
setup cross attention .swap control. for diffusers this replaces the attention processor, so
the previous attention processor is returned so that the caller can restore it later.
@ -119,7 +119,7 @@ class InvokeAIDiffuserComponent:
)
def restore_default_cross_attention(
self, restore_attention_processor: Optional["AttnProcessor"] = None
self, restore_attention_processor: Optional["AttentionProcessor"] = None
):
self.conditioning = None
self.cross_attention_control_context = None