mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
27 Commits
bugfix/con
...
v2.3.5-rc3
Author | SHA1 | Date | |
---|---|---|---|
e52e7418bb | |||
73be58a0b5 | |||
5a7d11bca8 | |||
5bbf7fe34a | |||
bfb968bbe8 | |||
6db72f83a2 | |||
432e526999 | |||
830740b93b | |||
ff3f289342 | |||
34abbb3589 | |||
c0eb1a9921 | |||
2ddd0301f4 | |||
ce6629b6f5 | |||
0f3c456d59 | |||
a45b3387c0 | |||
2a2c86896a | |||
aabe79686e | |||
216b1c3a4a | |||
47883860a6 | |||
8f17d17208 | |||
c6ecf3afc5 | |||
0bc5dcc663 | |||
16c97ca0cb | |||
e24dd97b80 | |||
5a54039dd7 | |||
9385edb453 | |||
66364501d5 |
2
.gitignore
vendored
2
.gitignore
vendored
@ -233,5 +233,3 @@ installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
|
||||
# no longer stored in source directory
|
||||
models
|
||||
|
@ -1 +1 @@
|
||||
__version__='2.3.5-rc1'
|
||||
__version__='2.3.5-rc3'
|
||||
|
@ -111,7 +111,6 @@ def install_requested_models(
|
||||
if len(external_models)>0:
|
||||
print("== INSTALLING EXTERNAL MODELS ==")
|
||||
for path_url_or_repo in external_models:
|
||||
print(f'DEBUG: path_url_or_repo = {path_url_or_repo}')
|
||||
try:
|
||||
model_manager.heuristic_import(
|
||||
path_url_or_repo,
|
||||
|
@ -400,8 +400,15 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
@property
|
||||
def _submodels(self) -> Sequence[torch.nn.Module]:
|
||||
module_names, _, _ = self.extract_init_dict(dict(self.config))
|
||||
values = [getattr(self, name) for name in module_names.keys()]
|
||||
return [m for m in values if isinstance(m, torch.nn.Module)]
|
||||
submodels = []
|
||||
for name in module_names.keys():
|
||||
if hasattr(self, name):
|
||||
value = getattr(self, name)
|
||||
else:
|
||||
value = getattr(self.config, name)
|
||||
if isinstance(value, torch.nn.Module):
|
||||
submodels.append(value)
|
||||
return submodels
|
||||
|
||||
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
||||
conditioning_data: ConditioningData,
|
||||
@ -472,7 +479,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
step_count=len(self.scheduler.timesteps)
|
||||
):
|
||||
|
||||
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps,
|
||||
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.config.num_train_timesteps,
|
||||
latents=latents)
|
||||
|
||||
batch_size = latents.shape[0]
|
||||
@ -756,7 +763,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
@property
|
||||
def channels(self) -> int:
|
||||
"""Compatible with DiffusionWrapper"""
|
||||
return self.unet.in_channels
|
||||
return self.unet.config.in_channels
|
||||
|
||||
def decode_latents(self, latents):
|
||||
# Explicit call to get the vae loaded, since `decode` isn't the forward method.
|
||||
|
@ -1156,7 +1156,7 @@ class ModelManager(object):
|
||||
return self.device.type == "cuda"
|
||||
|
||||
def _diffuser_sha256(
|
||||
self, name_or_path: Union[str, Path], chunksize=4096
|
||||
self, name_or_path: Union[str, Path], chunksize=16777216
|
||||
) -> Union[str, bytes]:
|
||||
path = None
|
||||
if isinstance(name_or_path, Path):
|
||||
|
@ -14,7 +14,6 @@ from torch import nn
|
||||
|
||||
from compel.cross_attention_control import Arguments
|
||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.models.cross_attention import AttnProcessor
|
||||
from ldm.invoke.devices import torch_dtype
|
||||
|
||||
|
||||
@ -163,7 +162,7 @@ class Context:
|
||||
|
||||
class InvokeAICrossAttentionMixin:
|
||||
"""
|
||||
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
|
||||
Enable InvokeAI-flavoured Attention calculation, which does aggressive low-memory slicing and calls
|
||||
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
|
||||
and dymamic slicing strategy selection.
|
||||
"""
|
||||
@ -178,7 +177,7 @@ class InvokeAICrossAttentionMixin:
|
||||
Set custom attention calculator to be called when attention is calculated
|
||||
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
|
||||
which returns either the suggested_attention_slice or an adjusted equivalent.
|
||||
`module` is the current CrossAttention module for which the callback is being invoked.
|
||||
`module` is the current Attention module for which the callback is being invoked.
|
||||
`suggested_attention_slice` is the default-calculated attention slice
|
||||
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
|
||||
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
|
||||
@ -326,7 +325,7 @@ def setup_cross_attention_control_attention_processors(unet: UNet2DConditionMode
|
||||
|
||||
|
||||
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
|
||||
from ldm.modules.attention import CrossAttention # avoid circular import
|
||||
from ldm.modules.attention import CrossAttention # avoid circular import # TODO: rename as in diffusers?
|
||||
cross_attention_class: type = InvokeAIDiffusersCrossAttention if isinstance(model,UNet2DConditionModel) else CrossAttention
|
||||
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
|
||||
attention_module_tuples = [(name,module) for name, module in model.named_modules() if
|
||||
@ -432,7 +431,7 @@ def get_mem_free_total(device):
|
||||
|
||||
|
||||
|
||||
class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin):
|
||||
class InvokeAIDiffusersCrossAttention(diffusers.models.attention.Attention, InvokeAICrossAttentionMixin):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
@ -457,8 +456,8 @@ class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention,
|
||||
"""
|
||||
# base implementation
|
||||
|
||||
class CrossAttnProcessor:
|
||||
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
||||
class AttnProcessor:
|
||||
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
||||
batch_size, sequence_length, _ = hidden_states.shape
|
||||
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length)
|
||||
|
||||
@ -487,7 +486,7 @@ from dataclasses import field, dataclass
|
||||
|
||||
import torch
|
||||
|
||||
from diffusers.models.cross_attention import CrossAttention, CrossAttnProcessor, SlicedAttnProcessor
|
||||
from diffusers.models.attention_processor import Attention, AttnProcessor, SlicedAttnProcessor
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -532,7 +531,7 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor):
|
||||
|
||||
# TODO: dynamically pick slice size based on memory conditions
|
||||
|
||||
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None,
|
||||
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None,
|
||||
# kwargs
|
||||
swap_cross_attn_context: SwapCrossAttnContext=None):
|
||||
|
||||
|
@ -5,7 +5,6 @@ from typing import Callable, Optional, Union, Any
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from diffusers import UNet2DConditionModel
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
|
@ -9,7 +9,7 @@ from safetensors.torch import load_file
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
from transformers import CLIPTextModel
|
||||
|
||||
from ..invoke.globals import global_lora_models_dir
|
||||
from ..invoke.globals import global_lora_models_dir, Globals
|
||||
from ..invoke.devices import choose_torch_device
|
||||
|
||||
"""
|
||||
@ -456,16 +456,25 @@ class LoRA:
|
||||
|
||||
|
||||
class KohyaLoraManager:
|
||||
lora_path = Path(global_lora_models_dir())
|
||||
vector_length_cache_path = lora_path / '.vectorlength.cache'
|
||||
|
||||
def __init__(self, pipe):
|
||||
self.vector_length_cache_path = self.lora_path / '.vectorlength.cache'
|
||||
self.unet = pipe.unet
|
||||
self.wrapper = LoRAModuleWrapper(pipe.unet, pipe.text_encoder)
|
||||
self.text_encoder = pipe.text_encoder
|
||||
self.device = torch.device(choose_torch_device())
|
||||
self.dtype = pipe.unet.dtype
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def lora_path(cls)->Path:
|
||||
return Path(global_lora_models_dir())
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def vector_length_cache_path(cls)->Path:
|
||||
return cls.lora_path / '.vectorlength.cache'
|
||||
|
||||
def load_lora_module(self, name, path_file, multiplier: float = 1.0):
|
||||
print(f" | Found lora {name} at {path_file}")
|
||||
if path_file.suffix == ".safetensors":
|
||||
|
@ -34,7 +34,7 @@ dependencies = [
|
||||
"clip_anytorch",
|
||||
"compel~=1.1.0",
|
||||
"datasets",
|
||||
"diffusers[torch]==0.14",
|
||||
"diffusers[torch]~=0.15.0",
|
||||
"dnspython==2.2.1",
|
||||
"einops",
|
||||
"eventlet",
|
||||
|
Reference in New Issue
Block a user