mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into feat/onnx
This commit is contained in:
@ -8,9 +8,9 @@ from invokeai.app.services.config import (
|
||||
|
||||
def check_invokeai_root(config: InvokeAIAppConfig):
|
||||
try:
|
||||
assert config.model_conf_path.exists()
|
||||
assert config.db_path.exists()
|
||||
assert config.models_path.exists()
|
||||
assert config.model_conf_path.exists(), f'{config.model_conf_path} not found'
|
||||
assert config.db_path.parent.exists(), f'{config.db_path.parent} not found'
|
||||
assert config.models_path.exists(), f'{config.models_path} not found'
|
||||
for model in [
|
||||
'CLIP-ViT-bigG-14-laion2B-39B-b160k',
|
||||
'bert-base-uncased',
|
||||
@ -18,9 +18,11 @@ def check_invokeai_root(config: InvokeAIAppConfig):
|
||||
'sd-vae-ft-mse',
|
||||
'stable-diffusion-2-clip',
|
||||
'stable-diffusion-safety-checker']:
|
||||
assert (config.models_path / f'core/convert/{model}').exists()
|
||||
except:
|
||||
path = config.models_path / f'core/convert/{model}'
|
||||
assert path.exists(), f'{path} is missing'
|
||||
except Exception as e:
|
||||
print()
|
||||
print(f'An exception has occurred: {str(e)}')
|
||||
print('== STARTUP ABORTED ==')
|
||||
print('** One or more necessary files is missing from your InvokeAI root directory **')
|
||||
print('** Please rerun the configuration script to fix this problem. **')
|
||||
|
@ -13,8 +13,8 @@ import os
|
||||
import shutil
|
||||
import textwrap
|
||||
import traceback
|
||||
import warnings
|
||||
import yaml
|
||||
import warnings
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from shutil import get_terminal_size
|
||||
@ -45,6 +45,7 @@ from invokeai.app.services.config import (
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
|
||||
from invokeai.frontend.install.widgets import (
|
||||
SingleSelectColumns,
|
||||
CenteredButtonPress,
|
||||
FileBox,
|
||||
IntTitleSlider,
|
||||
@ -56,7 +57,6 @@ from invokeai.frontend.install.widgets import (
|
||||
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
|
||||
from invokeai.backend.install.model_install_backend import (
|
||||
hf_download_from_pretrained,
|
||||
hf_download_with_resume,
|
||||
InstallSelections,
|
||||
ModelInstall,
|
||||
)
|
||||
@ -330,34 +330,49 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
npyscreen.Checkbox,
|
||||
name="Free GPU memory after each generation",
|
||||
value=old_opts.free_gpu_mem,
|
||||
max_width=45,
|
||||
relx=5,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely -= 1
|
||||
self.xformers_enabled = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Enable xformers support if available",
|
||||
name="Enable xformers support",
|
||||
value=old_opts.xformers_enabled,
|
||||
relx=5,
|
||||
max_width=30,
|
||||
relx=50,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely -=1
|
||||
self.always_use_cpu = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="Force CPU to be used on GPU systems",
|
||||
value=old_opts.always_use_cpu,
|
||||
relx=5,
|
||||
relx=80,
|
||||
scroll_exit=True,
|
||||
)
|
||||
precision = old_opts.precision or (
|
||||
"float32" if program_opts.full_precision else "auto"
|
||||
)
|
||||
self.nextrely +=1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.TitleFixedText,
|
||||
name="Floating Point Precision",
|
||||
begin_entry_at=0,
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely -=1
|
||||
self.precision = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
columns = 2,
|
||||
SingleSelectColumns,
|
||||
columns = 3,
|
||||
name="Precision",
|
||||
values=PRECISION_CHOICES,
|
||||
value=PRECISION_CHOICES.index(precision),
|
||||
begin_entry_at=3,
|
||||
max_height=len(PRECISION_CHOICES) + 1,
|
||||
max_height=2,
|
||||
max_width=80,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.max_cache_size = self.add_widget_intelligent(
|
||||
@ -370,12 +385,6 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models (<tab> autocompletes, ctrl-N advances):",
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
)
|
||||
self.outdir = self.add_widget_intelligent(
|
||||
FileBox,
|
||||
name="Output directory for images (<tab> autocompletes, ctrl-N advances):",
|
||||
@ -391,7 +400,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
self.autoimport_dirs = {}
|
||||
self.autoimport_dirs['autoimport_dir'] = self.add_widget_intelligent(
|
||||
FileBox,
|
||||
name=f'Autoimport Folder',
|
||||
name=f'Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models',
|
||||
value=str(config.root_path / config.autoimport_dir),
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
@ -402,18 +411,10 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
scroll_exit=True
|
||||
)
|
||||
self.nextrely += 1
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.TitleFixedText,
|
||||
name="== LICENSE ==",
|
||||
begin_entry_at=0,
|
||||
editable=False,
|
||||
color="CONTROL",
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely -= 1
|
||||
label = """BY DOWNLOADING THE STABLE DIFFUSION WEIGHT FILES, YOU AGREE TO HAVE READ
|
||||
AND ACCEPTED THE CREATIVEML RESPONSIBLE AI LICENSE LOCATED AT
|
||||
https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
||||
AND ACCEPTED THE CREATIVEML RESPONSIBLE AI LICENSES LOCATED AT
|
||||
https://huggingface.co/spaces/CompVis/stable-diffusion-license and
|
||||
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md
|
||||
"""
|
||||
for i in textwrap.wrap(label,width=window_width-6):
|
||||
self.add_widget_intelligent(
|
||||
@ -424,7 +425,7 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
||||
)
|
||||
self.license_acceptance = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name="I accept the CreativeML Responsible AI License",
|
||||
name="I accept the CreativeML Responsible AI Licenses",
|
||||
value=not first_time,
|
||||
relx=2,
|
||||
scroll_exit=True,
|
||||
@ -439,7 +440,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
||||
CenteredButtonPress,
|
||||
name=label,
|
||||
relx=(window_width - len(label)) // 2,
|
||||
rely=-3,
|
||||
when_pressed_function=self.on_ok,
|
||||
)
|
||||
|
||||
@ -558,7 +558,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
|
||||
# -------------------------------------
|
||||
def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
||||
logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
||||
logger.info("Initializing InvokeAI runtime directory")
|
||||
for name in (
|
||||
"models",
|
||||
"databases",
|
||||
@ -583,7 +583,18 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
||||
path = dest / 'core'
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(root / 'configs' / 'models.yaml','w') as yaml_file:
|
||||
maybe_create_models_yaml(root)
|
||||
|
||||
def maybe_create_models_yaml(root: Path):
|
||||
models_yaml = root / 'configs' / 'models.yaml'
|
||||
if models_yaml.exists():
|
||||
if OmegaConf.load(models_yaml).get('__metadata__'): # up to date
|
||||
return
|
||||
else:
|
||||
logger.info('Creating new models.yaml, original saved as models.yaml.orig')
|
||||
models_yaml.rename(models_yaml.parent / 'models.yaml.orig')
|
||||
|
||||
with open(models_yaml,'w') as yaml_file:
|
||||
yaml_file.write(yaml.dump({'__metadata__':
|
||||
{'version':'3.0.0'}
|
||||
}
|
||||
@ -767,8 +778,8 @@ def main():
|
||||
if migrate_if_needed(opt, config.root_path):
|
||||
sys.exit(0)
|
||||
|
||||
if not config.model_conf_path.exists():
|
||||
initialize_rootdir(config.root_path, opt.yes_to_all)
|
||||
# run this unconditionally in case new directories need to be added
|
||||
initialize_rootdir(config.root_path, opt.yes_to_all)
|
||||
|
||||
models_to_download = default_user_selections(opt)
|
||||
new_init_file = config.root_path / 'invokeai.yaml'
|
||||
@ -788,15 +799,14 @@ def main():
|
||||
sys.exit(0)
|
||||
|
||||
if opt.skip_support_models:
|
||||
logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST")
|
||||
logger.info("Skipping support models at user's request")
|
||||
else:
|
||||
logger.info("CHECKING/UPDATING SUPPORT MODELS")
|
||||
logger.info("Installing support models")
|
||||
download_support_models()
|
||||
|
||||
if opt.skip_sd_weights:
|
||||
logger.warning("SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST")
|
||||
logger.warning("Skipping diffusion weights download per user request")
|
||||
elif models_to_download:
|
||||
logger.info("DOWNLOADING DIFFUSION WEIGHTS")
|
||||
process_and_execute(opt, models_to_download)
|
||||
|
||||
postscript(errors=errors)
|
||||
|
@ -149,16 +149,17 @@ class ModelInstall(object):
|
||||
for i in installed:
|
||||
print(f"{i['model_name']}\t{i['base_model']}\t{i['path']}")
|
||||
|
||||
def starter_models(self)->Set[str]:
|
||||
# logic here a little reversed to maintain backward compatibility
|
||||
def starter_models(self, all_models: bool=False)->Set[str]:
|
||||
models = set()
|
||||
for key, value in self.datasets.items():
|
||||
name,base,model_type = ModelManager.parse_key(key)
|
||||
if model_type==ModelType.Main:
|
||||
if all_models or model_type in [ModelType.Main, ModelType.Vae]:
|
||||
models.add(key)
|
||||
return models
|
||||
|
||||
def recommended_models(self)->Set[str]:
|
||||
starters = self.starter_models()
|
||||
starters = self.starter_models(all_models=True)
|
||||
return set([x for x in starters if self.datasets[x].get('recommended',False)])
|
||||
|
||||
def default_model(self)->str:
|
||||
|
@ -754,7 +754,7 @@ class ModelManager(object):
|
||||
# We are taking advantage of a side effect of get_model() that converts check points
|
||||
# into cached diffusers directories stored at `location`. It doesn't matter
|
||||
# what submodeltype we request here, so we get the smallest.
|
||||
submodel = {"submodel_type": SubModelType.Tokenizer} if model_type==ModelType.Main else {}
|
||||
submodel = {"submodel_type": SubModelType.Scheduler} if model_type==ModelType.Main else {}
|
||||
model = self.get_model(model_name,
|
||||
base_model,
|
||||
model_type,
|
||||
|
@ -416,7 +416,14 @@ class PipelineFolderProbe(FolderProbeBase):
|
||||
|
||||
class VaeFolderProbe(FolderProbeBase):
|
||||
def get_base_type(self)->BaseModelType:
|
||||
return BaseModelType.StableDiffusion1
|
||||
config_file = self.folder_path / 'config.json'
|
||||
if not config_file.exists():
|
||||
raise InvalidModelException(f"Cannot determine base type for {self.folder_path}")
|
||||
with open(config_file,'r') as file:
|
||||
config = json.load(file)
|
||||
return BaseModelType.StableDiffusionXL \
|
||||
if config.get('scaling_factor',0)==0.13025 and config.get('sample_size') in [512, 1024] \
|
||||
else BaseModelType.StableDiffusion1
|
||||
|
||||
class TextualInversionFolderProbe(FolderProbeBase):
|
||||
def get_format(self)->str:
|
||||
|
@ -112,16 +112,12 @@ class StableDiffusionXLModel(DiffusersModel):
|
||||
# The convert script adapted from the diffusers package uses
|
||||
# strings for the base model type. To avoid making too many
|
||||
# source code changes, we simply translate here
|
||||
model_base_to_model_type = {BaseModelType.StableDiffusionXL: 'SDXL',
|
||||
BaseModelType.StableDiffusionXLRefiner: 'SDXL-Refiner',
|
||||
}
|
||||
if isinstance(config, cls.CheckpointConfig):
|
||||
from invokeai.backend.model_management.models.stable_diffusion import _convert_ckpt_and_cache
|
||||
return _convert_ckpt_and_cache(
|
||||
version=base_model,
|
||||
model_config=config,
|
||||
output_path=output_path,
|
||||
model_type=model_base_to_model_type[base_model],
|
||||
use_safetensors=False, # corrupts sdxl models for some reason
|
||||
)
|
||||
else:
|
||||
|
@ -14,6 +14,7 @@ from .base import (
|
||||
read_checkpoint_meta,
|
||||
classproperty,
|
||||
InvalidModelException,
|
||||
ModelNotFoundException,
|
||||
)
|
||||
from .sdxl import StableDiffusionXLModel
|
||||
import invokeai.backend.util.logging as logger
|
||||
@ -268,12 +269,18 @@ def _convert_ckpt_and_cache(
|
||||
# to avoid circular import errors
|
||||
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
|
||||
from ...util.devices import choose_torch_device, torch_dtype
|
||||
|
||||
|
||||
model_base_to_model_type = {BaseModelType.StableDiffusion1: 'FrozenCLIPEmbedder',
|
||||
BaseModelType.StableDiffusion2: 'FrozenOpenCLIPEmbedder',
|
||||
BaseModelType.StableDiffusionXL: 'SDXL',
|
||||
BaseModelType.StableDiffusionXLRefiner: 'SDXL-Refiner',
|
||||
}
|
||||
logger.info(f'Converting {weights} to diffusers format')
|
||||
with SilenceWarnings():
|
||||
convert_ckpt_to_diffusers(
|
||||
weights,
|
||||
output_path,
|
||||
model_type=model_base_to_model_type[version],
|
||||
model_version=version,
|
||||
model_variant=model_config.variant,
|
||||
original_config_file=config_file,
|
||||
|
Reference in New Issue
Block a user