Merge remote-tracking branch 'origin/main' into api/add-trigger-string-retrieval

# Conflicts:
#	ldm/generate.py
This commit is contained in:
Kevin Turner
2023-02-18 17:44:59 -08:00
424 changed files with 209144 additions and 15732 deletions

View File

@ -863,6 +863,7 @@ def optimize_model(model_name_or_path: Union[Path,str], gen, opt, completer):
model_name_or_path = model_name_or_path.replace('\\','/') # windows
manager = gen.model_manager
ckpt_path = None
original_config_file=None
if model_name_or_path == gen.model_name:
print("** Can't convert the active model. !switch to another model first. **")

View File

@ -2,3 +2,12 @@ from ._version import __version__
__app_id__= 'invoke-ai/InvokeAI'
__app_name__= 'InvokeAI'
def _ignore_xformers_triton_message_on_windows():
import logging
logging.getLogger("xformers").addFilter(
lambda record: 'A matching Triton is not available' not in record.getMessage())
# In order to be effective, this needs to happen before anything could possibly import xformers.
_ignore_xformers_triton_message_on_windows()

View File

@ -53,6 +53,7 @@ from diffusers import (
)
from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder, PaintByExamplePipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import is_safetensors_available
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig
@ -984,6 +985,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
elif model_type in ['FrozenCLIPEmbedder','WeightedFrozenCLIPEmbedder']:
text_model = convert_ldm_clip_checkpoint(checkpoint)
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir)
safety_checker = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker',cache_dir=global_cache_dir("hub"))
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir)
pipe = pipeline_class(
vae=vae,
@ -991,7 +993,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=None,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
else:

View File

@ -93,7 +93,7 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
"""
if log_tokens or Globals.log_tokenization:
if log_tokens or getattr(Globals, "log_tokenization", False):
print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}")
print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}")
@ -236,7 +236,7 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm
fragments = [x.text for x in flattened_prompt.children]
weights = [x.weight for x in flattened_prompt.children]
embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights])
if log_tokens or Globals.log_tokenization:
if log_tokens or getattr(Globals, "log_tokenization", False):
text = " ".join(fragments)
log_tokenization(text, model, display_label=log_display_label)
@ -296,4 +296,4 @@ def log_tokenization(text, model, display_label=None):
if discarded != "":
print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):')
print(f'{discarded}\x1b[0m')
print(f'{discarded}\x1b[0m')

View File

@ -40,7 +40,6 @@ from ldm.invoke.globals import Globals, global_cache_dir, global_config_dir
from ldm.invoke.readline import generic_completer
warnings.filterwarnings("ignore")
import torch
transformers.logging.set_verbosity_error()
@ -764,7 +763,7 @@ def download_weights(opt: dict) -> Union[str, None]:
precision = (
"float32"
if opt.full_precision
else choose_precision(torch.device(choose_torch_device()))
else choose_precision(choose_torch_device())
)
if opt.yes_to_all:

View File

@ -1,19 +1,25 @@
from __future__ import annotations
from contextlib import nullcontext
import torch
from torch import autocast
from contextlib import nullcontext
from ldm.invoke.globals import Globals
def choose_torch_device() -> str:
CPU_DEVICE = torch.device("cpu")
def choose_torch_device() -> torch.device:
'''Convenience routine for guessing which GPU device to run model on'''
if Globals.always_use_cpu:
return "cpu"
return CPU_DEVICE
if torch.cuda.is_available():
return 'cuda'
return torch.device('cuda')
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
return 'mps'
return 'cpu'
return torch.device('mps')
return CPU_DEVICE
def choose_precision(device) -> str:
def choose_precision(device: torch.device) -> str:
'''Returns an appropriate precision for the given torch device'''
if device.type == 'cuda':
device_name = torch.cuda.get_device_name(device)
@ -21,7 +27,7 @@ def choose_precision(device) -> str:
return 'float16'
return 'float32'
def torch_dtype(device) -> torch.dtype:
def torch_dtype(device: torch.device) -> torch.dtype:
if Globals.full_precision:
return torch.float32
if choose_precision(device) == 'float16':
@ -36,3 +42,13 @@ def choose_autocast(precision):
if precision == 'autocast' or precision == 'float16':
return autocast
return nullcontext
def normalize_device(device: str | torch.device) -> torch.device:
"""Ensure device has a device index defined, if appropriate."""
device = torch.device(device)
if device.index is None:
# cuda might be the only torch backend that currently uses the device index?
# I don't see anything like `current_device` for cpu or mps.
if device.type == 'cuda':
device = torch.device(device.type, torch.cuda.current_device())
return device

View File

@ -28,6 +28,7 @@ from typing_extensions import ParamSpec
from ldm.invoke.globals import Globals
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent, PostprocessingSettings
from ldm.modules.textual_inversion_manager import TextualInversionManager
from ..devices import normalize_device, CPU_DEVICE
from ..offloading import LazilyLoadedModelGroup, FullyLoadedModelGroup, ModelGroup
from ...models.diffusion.cross_attention_map_saving import AttentionMapSaver
from ...modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
@ -319,7 +320,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if self.device.type == 'cpu' or self.device.type == 'mps':
mem_free = psutil.virtual_memory().free
elif self.device.type == 'cuda':
mem_free, _ = torch.cuda.mem_get_info(self.device)
mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.device))
else:
raise ValueError(f"unrecognized device {self.device}")
# input tensor of [1, 4, h/8, w/8]
@ -380,9 +381,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
self._model_group.ready()
def to(self, torch_device: Optional[Union[str, torch.device]] = None):
# overridden method; types match the superclass.
if torch_device is None:
return self
self._model_group.set_device(torch_device)
self._model_group.set_device(torch.device(torch_device))
self._model_group.ready()
@property
@ -689,8 +691,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
if device.type == 'mps':
# workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222
# TODO remove this workaround once kulinseth#222 is merged to pytorch mainline
self.vae.to('cpu')
init_image = init_image.to('cpu')
self.vae.to(CPU_DEVICE)
init_image = init_image.to(CPU_DEVICE)
else:
self._model_group.load(self.vae)
init_latent_dist = self.vae.encode(init_image).latent_dist

View File

@ -54,6 +54,9 @@ Globals.full_precision = False
# whether we should convert ckpt files into diffusers models on the fly
Globals.ckpt_convert = False
# logging tokenization everywhere
Globals.log_tokenization = False
def global_config_file()->Path:
return Path(Globals.root, Globals.config_dir, Globals.models_file)

View File

@ -79,8 +79,8 @@ def merge_diffusion_models_and_commit(
merged_model_name = name for new model
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None.
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported.
interp - The interpolation method to use for the merging. Supports "weighted_average", "sigmoid", "inv_sigmoid", "add_difference" and None.
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. Add_difference is A+(B-C).
force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
@ -173,7 +173,6 @@ def _parse_args() -> Namespace:
# ------------------------- GUI HERE -------------------------
class FloatSlider(npyscreen.Slider):
# this is supposed to adjust display precision, but doesn't
def translate_value(self):
stri = "%3.2f / %3.2f" % (self.value, self.out_of)
l = (len(str(self.out_of))) * 2 + 4
@ -186,7 +185,7 @@ class FloatTitleSlider(npyscreen.TitleText):
class mergeModelsForm(npyscreen.FormMultiPageAction):
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"]
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid"]
def __init__(self, parentApp, name):
self.parentApp = parentApp
@ -305,8 +304,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.alpha = self.add_widget_intelligent(
FloatTitleSlider,
name="Weight (alpha) to assign to second and third models:",
out_of=1,
step=0.05,
out_of=1.0,
step=0.01,
lowest=0,
value=0.5,
labelColor="CONTROL",
@ -323,7 +322,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.merged_model_name.value = merged_model_name
if selected_model3 > 0:
self.merge_method.values = (["add_difference"],)
self.merge_method.values = ['add_difference ( A+(B-C) )']
self.merged_model_name.value += f"+{models[selected_model3]}"
else:
self.merge_method.values = self.interpolations
@ -349,11 +348,14 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
]
if self.model3.value[0] > 0:
models.append(model_names[self.model3.value[0] - 1])
interp='add_difference'
else:
interp=self.interpolations[self.merge_method.value[0]]
args = dict(
models=models,
alpha=self.alpha.value,
interp=self.interpolations[self.merge_method.value[0]],
interp=interp,
force=self.force.value,
merged_model_name=self.merged_model_name.value,
)

View File

@ -30,6 +30,7 @@ from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from picklescan.scanner import scan_file_path
from ldm.invoke.devices import CPU_DEVICE
from ldm.invoke.generator.diffusers_pipeline import \
StableDiffusionGeneratorPipeline
from ldm.invoke.globals import (Globals, global_autoscan_dir, global_cache_dir,
@ -47,7 +48,7 @@ class ModelManager(object):
def __init__(
self,
config: OmegaConf,
device_type: str | torch.device = "cpu",
device_type: torch.device = CPU_DEVICE,
precision: str = "float16",
max_loaded_models=DEFAULT_MAX_MODELS,
sequential_offload = False
@ -675,7 +676,7 @@ class ModelManager(object):
"""
if str(weights).startswith(("http:", "https:")):
model_name = model_name or url_attachment_name(weights)
weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1")
config_path = self._resolve_path(config, "configs/stable-diffusion")
@ -996,25 +997,25 @@ class ModelManager(object):
self.models.pop(model_name, None)
def _model_to_cpu(self, model):
if self.device == "cpu":
if self.device == CPU_DEVICE:
return model
if isinstance(model, StableDiffusionGeneratorPipeline):
model.offload_all()
return model
model.cond_stage_model.device = "cpu"
model.to("cpu")
model.cond_stage_model.device = CPU_DEVICE
model.to(CPU_DEVICE)
for submodel in ("first_stage_model", "cond_stage_model", "model"):
try:
getattr(model, submodel).to("cpu")
getattr(model, submodel).to(CPU_DEVICE)
except AttributeError:
pass
return model
def _model_from_cpu(self, model):
if self.device == "cpu":
if self.device == CPU_DEVICE:
return model
if isinstance(model, StableDiffusionGeneratorPipeline):