mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
reformat with black and isort
This commit is contained in:
parent
4878c7a2d5
commit
5a4967582e
@ -177,6 +177,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
|
|||||||
print(f"Error downloading {label} model", file=sys.stderr)
|
print(f"Error downloading {label} model", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
# this will preload the Bert tokenizer fles
|
# this will preload the Bert tokenizer fles
|
||||||
def download_bert():
|
def download_bert():
|
||||||
@ -284,37 +285,36 @@ def download_safety_checker():
|
|||||||
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
|
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
|
||||||
print("...success", file=sys.stderr)
|
print("...success", file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def download_vaes(precision: str):
|
def download_vaes(precision: str):
|
||||||
print("Installing stabilityai VAE...", file=sys.stderr)
|
print("Installing stabilityai VAE...", file=sys.stderr)
|
||||||
try:
|
try:
|
||||||
# first the diffusers version
|
# first the diffusers version
|
||||||
repo_id = 'stabilityai/sd-vae-ft-mse'
|
repo_id = "stabilityai/sd-vae-ft-mse"
|
||||||
args = dict(
|
args = dict(
|
||||||
cache_dir=global_cache_dir('diffusers'),
|
cache_dir=global_cache_dir("diffusers"),
|
||||||
)
|
)
|
||||||
if precision=='float16':
|
if precision == "float16":
|
||||||
args.update(
|
args.update(torch_dtype=torch.float16, revision="fp16")
|
||||||
torch_dtype=torch.float16,
|
|
||||||
revision='fp16'
|
|
||||||
)
|
|
||||||
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
||||||
raise Exception(f'download of {repo_id} failed')
|
raise Exception(f"download of {repo_id} failed")
|
||||||
|
|
||||||
repo_id = 'stabilityai/sd-vae-ft-mse-original'
|
repo_id = "stabilityai/sd-vae-ft-mse-original"
|
||||||
model_name = 'vae-ft-mse-840000-ema-pruned.ckpt'
|
model_name = "vae-ft-mse-840000-ema-pruned.ckpt"
|
||||||
# next the legacy checkpoint version
|
# next the legacy checkpoint version
|
||||||
if not hf_download_with_resume(
|
if not hf_download_with_resume(
|
||||||
repo_id = repo_id,
|
repo_id=repo_id,
|
||||||
model_name = model_name,
|
model_name=model_name,
|
||||||
model_dir = str(Globals.root / Model_dir / Weights_dir)
|
model_dir=str(Globals.root / Model_dir / Weights_dir),
|
||||||
):
|
):
|
||||||
raise Exception(f'download of {model_name} failed')
|
raise Exception(f"download of {model_name} failed")
|
||||||
print("...downloaded successfully", file=sys.stderr)
|
print("...downloaded successfully", file=sys.stderr)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
|
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def get_root(root: str = None) -> str:
|
def get_root(root: str = None) -> str:
|
||||||
if root:
|
if root:
|
||||||
@ -329,7 +329,7 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
def create(self):
|
def create(self):
|
||||||
program_opts = self.parentApp.program_opts
|
program_opts = self.parentApp.program_opts
|
||||||
old_opts = self.parentApp.invokeai_opts
|
old_opts = self.parentApp.invokeai_opts
|
||||||
with open('log.txt','w') as f:
|
with open("log.txt", "w") as f:
|
||||||
f.write(str(old_opts))
|
f.write(str(old_opts))
|
||||||
first_time = not (Globals.root / Globals.initfile).exists()
|
first_time = not (Globals.root / Globals.initfile).exists()
|
||||||
access_token = HfFolder.get_token()
|
access_token = HfFolder.get_token()
|
||||||
@ -576,14 +576,14 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
new_opts = Namespace()
|
new_opts = Namespace()
|
||||||
|
|
||||||
for attr in [
|
for attr in [
|
||||||
"outdir",
|
"outdir",
|
||||||
"safety_checker",
|
"safety_checker",
|
||||||
"free_gpu_mem",
|
"free_gpu_mem",
|
||||||
"max_loaded_models",
|
"max_loaded_models",
|
||||||
"xformers",
|
"xformers",
|
||||||
"always_use_cpu",
|
"always_use_cpu",
|
||||||
"embedding_path",
|
"embedding_path",
|
||||||
"ckpt_convert",
|
"ckpt_convert",
|
||||||
]:
|
]:
|
||||||
setattr(new_opts, attr, getattr(self, attr).value)
|
setattr(new_opts, attr, getattr(self, attr).value)
|
||||||
|
|
||||||
@ -672,7 +672,9 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
|
|||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def run_console_ui(program_opts: Namespace, initfile: Path=None) -> (Namespace, Namespace):
|
def run_console_ui(
|
||||||
|
program_opts: Namespace, initfile: Path = None
|
||||||
|
) -> (Namespace, Namespace):
|
||||||
# parse_args() will read from init file if present
|
# parse_args() will read from init file if present
|
||||||
invokeai_opts = default_startup_options(initfile)
|
invokeai_opts = default_startup_options(initfile)
|
||||||
editApp = EditOptApplication(program_opts, invokeai_opts)
|
editApp = EditOptApplication(program_opts, invokeai_opts)
|
||||||
@ -747,6 +749,7 @@ def write_default_options(program_opts: Namespace, initfile: Path):
|
|||||||
opt.hf_token = HfFolder.get_token()
|
opt.hf_token = HfFolder.get_token()
|
||||||
write_opts(opt, initfile)
|
write_opts(opt, initfile)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
@ -816,7 +819,9 @@ def main():
|
|||||||
|
|
||||||
if opt.yes_to_all:
|
if opt.yes_to_all:
|
||||||
write_default_options(opt, init_file)
|
write_default_options(opt, init_file)
|
||||||
init_options = Namespace(precision='float32' if opt.full_precision else 'float16')
|
init_options = Namespace(
|
||||||
|
precision="float32" if opt.full_precision else "float16"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
init_options, models_to_download = run_console_ui(opt, init_file)
|
init_options, models_to_download = run_console_ui(opt, init_file)
|
||||||
if init_options:
|
if init_options:
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
# run this script from one with internet connectivity. The
|
# run this script from one with internet connectivity. The
|
||||||
# two machines must share a common .cache directory.
|
# two machines must share a common .cache directory.
|
||||||
|
|
||||||
'''
|
"""
|
||||||
This is the npyscreen frontend to the model installation application.
|
This is the npyscreen frontend to the model installation application.
|
||||||
The work is actually done in backend code in model_install_backend.py.
|
The work is actually done in backend code in model_install_backend.py.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
import curses
|
||||||
@ -15,25 +15,26 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
import torch
|
import torch
|
||||||
from pathlib import Path
|
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
from ..devices import choose_precision, choose_torch_device
|
from ..devices import choose_precision, choose_torch_device
|
||||||
from ..globals import Globals, global_config_dir
|
from ..globals import Globals, global_config_dir
|
||||||
from .widgets import MultiSelectColumns, TextBox
|
|
||||||
from .model_install_backend import (Dataset_path, default_config_file,
|
from .model_install_backend import (Dataset_path, default_config_file,
|
||||||
|
default_dataset, get_root,
|
||||||
install_requested_models,
|
install_requested_models,
|
||||||
default_dataset, recommended_datasets, get_root
|
recommended_datasets)
|
||||||
)
|
from .widgets import MultiSelectColumns, TextBox
|
||||||
|
|
||||||
|
|
||||||
class addModelsForm(npyscreen.FormMultiPage):
|
class addModelsForm(npyscreen.FormMultiPage):
|
||||||
def __init__(self, parentApp, name, multipage=False, *args, **keywords):
|
def __init__(self, parentApp, name, multipage=False, *args, **keywords):
|
||||||
self.multipage=multipage
|
self.multipage = multipage
|
||||||
self.initial_models = OmegaConf.load(Dataset_path)
|
self.initial_models = OmegaConf.load(Dataset_path)
|
||||||
try:
|
try:
|
||||||
self.existing_models = OmegaConf.load(default_config_file())
|
self.existing_models = OmegaConf.load(default_config_file())
|
||||||
@ -42,7 +43,7 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
self.starter_model_list = [
|
self.starter_model_list = [
|
||||||
x for x in list(self.initial_models.keys()) if x not in self.existing_models
|
x for x in list(self.initial_models.keys()) if x not in self.existing_models
|
||||||
]
|
]
|
||||||
self.installed_models=dict()
|
self.installed_models = dict()
|
||||||
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
|
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
@ -54,19 +55,17 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
if self.initial_models[x].get("recommended", False)
|
if self.initial_models[x].get("recommended", False)
|
||||||
]
|
]
|
||||||
self.installed_models = sorted(
|
self.installed_models = sorted(
|
||||||
[
|
[x for x in list(self.initial_models.keys()) if x in self.existing_models]
|
||||||
x for x in list(self.initial_models.keys()) if x in self.existing_models
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
self.nextrely -= 1
|
self.nextrely -= 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value='Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields,',
|
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields,",
|
||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value='cursor arrows to make a selection, and space to toggle checkboxes.',
|
value="cursor arrows to make a selection, and space to toggle checkboxes.",
|
||||||
editable=False,
|
editable=False,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -84,17 +83,17 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
MultiSelectColumns,
|
MultiSelectColumns,
|
||||||
columns=columns,
|
columns=columns,
|
||||||
values=self.installed_models,
|
values=self.installed_models,
|
||||||
value=[x for x in range(0,len(self.installed_models))],
|
value=[x for x in range(0, len(self.installed_models))],
|
||||||
max_height=1+len(self.installed_models) // columns,
|
max_height=1 + len(self.installed_models) // columns,
|
||||||
relx = 4,
|
relx=4,
|
||||||
slow_scroll=True,
|
slow_scroll=True,
|
||||||
scroll_exit = True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.purge_deleted = self.add_widget_intelligent(
|
self.purge_deleted = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='Purge deleted models from disk',
|
name="Purge deleted models from disk",
|
||||||
value=False,
|
value=False,
|
||||||
scroll_exit=True
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
@ -119,13 +118,13 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
if show_recommended and x in recommended_models
|
if show_recommended and x in recommended_models
|
||||||
],
|
],
|
||||||
max_height=len(starter_model_labels) + 1,
|
max_height=len(starter_model_labels) + 1,
|
||||||
relx = 4,
|
relx=4,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
for line in [
|
for line in [
|
||||||
'== IMPORT LOCAL AND REMOTE MODELS ==',
|
"== IMPORT LOCAL AND REMOTE MODELS ==",
|
||||||
'Enter URLs, file paths, or HuggingFace diffusers repository IDs separated by spaces.',
|
"Enter URLs, file paths, or HuggingFace diffusers repository IDs separated by spaces.",
|
||||||
'Use control-V or shift-control-V to paste:'
|
"Use control-V or shift-control-V to paste:",
|
||||||
]:
|
]:
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.TitleText,
|
npyscreen.TitleText,
|
||||||
@ -135,40 +134,36 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
)
|
)
|
||||||
self.nextrely -= 1
|
self.nextrely -= 1
|
||||||
self.import_model_paths = self.add_widget_intelligent(
|
self.import_model_paths = self.add_widget_intelligent(
|
||||||
TextBox,
|
TextBox, max_height=5, scroll_exit=True, editable=True, relx=4
|
||||||
max_height=5,
|
|
||||||
scroll_exit=True,
|
|
||||||
editable=True,
|
|
||||||
relx=4
|
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.show_directory_fields= self.add_widget_intelligent(
|
self.show_directory_fields = self.add_widget_intelligent(
|
||||||
npyscreen.FormControlCheckbox,
|
npyscreen.FormControlCheckbox,
|
||||||
name='Select a directory for models to import',
|
name="Select a directory for models to import",
|
||||||
value=False,
|
value=False,
|
||||||
)
|
)
|
||||||
self.autoload_directory = self.add_widget_intelligent(
|
self.autoload_directory = self.add_widget_intelligent(
|
||||||
npyscreen.TitleFilename,
|
npyscreen.TitleFilename,
|
||||||
name='Directory (<tab> autocompletes):',
|
name="Directory (<tab> autocompletes):",
|
||||||
select_dir=True,
|
select_dir=True,
|
||||||
must_exist=True,
|
must_exist=True,
|
||||||
use_two_lines=False,
|
use_two_lines=False,
|
||||||
labelColor='DANGER',
|
labelColor="DANGER",
|
||||||
begin_entry_at=34,
|
begin_entry_at=34,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.autoscan_on_startup = self.add_widget_intelligent(
|
self.autoscan_on_startup = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name='Scan this directory each time InvokeAI starts for new models to import',
|
name="Scan this directory each time InvokeAI starts for new models to import",
|
||||||
value=False,
|
value=False,
|
||||||
relx = 4,
|
relx=4,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.convert_models = self.add_widget_intelligent(
|
self.convert_models = self.add_widget_intelligent(
|
||||||
npyscreen.TitleSelectOne,
|
npyscreen.TitleSelectOne,
|
||||||
name='== CONVERT IMPORTED MODELS INTO DIFFUSERS==',
|
name="== CONVERT IMPORTED MODELS INTO DIFFUSERS==",
|
||||||
values=['Keep original format','Convert to diffusers'],
|
values=["Keep original format", "Convert to diffusers"],
|
||||||
value=0,
|
value=0,
|
||||||
begin_entry_at=4,
|
begin_entry_at=4,
|
||||||
max_height=4,
|
max_height=4,
|
||||||
@ -177,33 +172,33 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
)
|
)
|
||||||
self.cancel = self.add_widget_intelligent(
|
self.cancel = self.add_widget_intelligent(
|
||||||
npyscreen.ButtonPress,
|
npyscreen.ButtonPress,
|
||||||
name='CANCEL',
|
name="CANCEL",
|
||||||
rely = -3,
|
rely=-3,
|
||||||
when_pressed_function=self.on_cancel,
|
when_pressed_function=self.on_cancel,
|
||||||
)
|
)
|
||||||
done_label = 'DONE'
|
done_label = "DONE"
|
||||||
back_label = 'BACK'
|
back_label = "BACK"
|
||||||
button_length = len(done_label)
|
button_length = len(done_label)
|
||||||
button_offset = 0
|
button_offset = 0
|
||||||
if self.multipage:
|
if self.multipage:
|
||||||
button_length += len(back_label)+1
|
button_length += len(back_label) + 1
|
||||||
button_offset += len(back_label)+1
|
button_offset += len(back_label) + 1
|
||||||
self.back_button = self.add_widget_intelligent(
|
self.back_button = self.add_widget_intelligent(
|
||||||
npyscreen.ButtonPress,
|
npyscreen.ButtonPress,
|
||||||
name=back_label,
|
name=back_label,
|
||||||
relx= (window_width-button_length)//2,
|
relx=(window_width - button_length) // 2,
|
||||||
rely= -3,
|
rely=-3,
|
||||||
when_pressed_function=self.on_back
|
when_pressed_function=self.on_back,
|
||||||
)
|
)
|
||||||
self.ok_button = self.add_widget_intelligent(
|
self.ok_button = self.add_widget_intelligent(
|
||||||
npyscreen.ButtonPress,
|
npyscreen.ButtonPress,
|
||||||
name=done_label,
|
name=done_label,
|
||||||
relx= button_offset + 1 + (window_width-button_length)//2,
|
relx=button_offset + 1 + (window_width - button_length) // 2,
|
||||||
rely= -3,
|
rely=-3,
|
||||||
when_pressed_function=self.on_ok
|
when_pressed_function=self.on_ok,
|
||||||
)
|
)
|
||||||
|
|
||||||
for i in [self.autoload_directory,self.autoscan_on_startup]:
|
for i in [self.autoload_directory, self.autoscan_on_startup]:
|
||||||
self.show_directory_fields.addVisibleWhenSelected(i)
|
self.show_directory_fields.addVisibleWhenSelected(i)
|
||||||
|
|
||||||
self.show_directory_fields.when_value_edited = self._clear_scan_directory
|
self.show_directory_fields.when_value_edited = self._clear_scan_directory
|
||||||
@ -216,14 +211,16 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
def _clear_scan_directory(self):
|
def _clear_scan_directory(self):
|
||||||
if not self.show_directory_fields.value:
|
if not self.show_directory_fields.value:
|
||||||
self.autoload_directory.value = ''
|
self.autoload_directory.value = ""
|
||||||
|
|
||||||
def _show_hide_convert(self):
|
def _show_hide_convert(self):
|
||||||
model_paths = self.import_model_paths.value or ''
|
model_paths = self.import_model_paths.value or ""
|
||||||
autoload_directory = self.autoload_directory.value or ''
|
autoload_directory = self.autoload_directory.value or ""
|
||||||
self.convert_models.hidden = len(model_paths)==0 and len(autoload_directory)==0
|
self.convert_models.hidden = (
|
||||||
|
len(model_paths) == 0 and len(autoload_directory) == 0
|
||||||
def _get_starter_model_labels(self)->List[str]:
|
)
|
||||||
|
|
||||||
|
def _get_starter_model_labels(self) -> List[str]:
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
window_height, window_width = curses.initscr().getmaxyx()
|
||||||
label_width = 25
|
label_width = 25
|
||||||
checkbox_width = 4
|
checkbox_width = 4
|
||||||
@ -231,18 +228,29 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
description_width = window_width - label_width - checkbox_width - spacing_width
|
description_width = window_width - label_width - checkbox_width - spacing_width
|
||||||
im = self.initial_models
|
im = self.initial_models
|
||||||
names = self.starter_model_list
|
names = self.starter_model_list
|
||||||
descriptions = [im[x].description [0:description_width-3]+'...'
|
descriptions = [
|
||||||
if len(im[x].description) > description_width
|
im[x].description[0 : description_width - 3] + "..."
|
||||||
else im[x].description
|
if len(im[x].description) > description_width
|
||||||
for x in names]
|
else im[x].description
|
||||||
|
for x in names
|
||||||
|
]
|
||||||
return [
|
return [
|
||||||
f"%-{label_width}s %s" % (names[x], descriptions[x]) for x in range(0,len(names))
|
f"%-{label_width}s %s" % (names[x], descriptions[x])
|
||||||
|
for x in range(0, len(names))
|
||||||
]
|
]
|
||||||
|
|
||||||
def _get_columns(self)->int:
|
def _get_columns(self) -> int:
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
window_height, window_width = curses.initscr().getmaxyx()
|
||||||
cols = 4 if window_width > 240 else 3 if window_width>160 else 2 if window_width>80 else 1
|
cols = (
|
||||||
return min(cols,len(self.installed_models))
|
4
|
||||||
|
if window_width > 240
|
||||||
|
else 3
|
||||||
|
if window_width > 160
|
||||||
|
else 2
|
||||||
|
if window_width > 80
|
||||||
|
else 1
|
||||||
|
)
|
||||||
|
return min(cols, len(self.installed_models))
|
||||||
|
|
||||||
def on_ok(self):
|
def on_ok(self):
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.setNextForm(None)
|
||||||
@ -256,14 +264,14 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
def on_cancel(self):
|
def on_cancel(self):
|
||||||
if npyscreen.notify_yes_no(
|
if npyscreen.notify_yes_no(
|
||||||
'Are you sure you want to cancel?\nYou may re-run this script later using the invoke.sh or invoke.bat command.\n'
|
"Are you sure you want to cancel?\nYou may re-run this script later using the invoke.sh or invoke.bat command.\n"
|
||||||
):
|
):
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.setNextForm(None)
|
||||||
self.parentApp.user_cancelled = True
|
self.parentApp.user_cancelled = True
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
def marshall_arguments(self):
|
def marshall_arguments(self):
|
||||||
'''
|
"""
|
||||||
Assemble arguments and store as attributes of the application:
|
Assemble arguments and store as attributes of the application:
|
||||||
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
||||||
True => Install
|
True => Install
|
||||||
@ -272,25 +280,27 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
.autoscan_on_startup: True if invokeai should scan and import at startup time
|
.autoscan_on_startup: True if invokeai should scan and import at startup time
|
||||||
.import_model_paths: list of URLs, repo_ids and file paths to import
|
.import_model_paths: list of URLs, repo_ids and file paths to import
|
||||||
.convert_to_diffusers: if True, convert legacy checkpoints into diffusers
|
.convert_to_diffusers: if True, convert legacy checkpoints into diffusers
|
||||||
'''
|
"""
|
||||||
# we're using a global here rather than storing the result in the parentapp
|
# we're using a global here rather than storing the result in the parentapp
|
||||||
# due to some bug in npyscreen that is causing attributes to be lost
|
# due to some bug in npyscreen that is causing attributes to be lost
|
||||||
selections = self.parentApp.user_selections
|
selections = self.parentApp.user_selections
|
||||||
|
|
||||||
# starter models to install/remove
|
# starter models to install/remove
|
||||||
starter_models = dict(map(lambda x: (self.starter_model_list[x], True), self.models_selected.value))
|
starter_models = dict(
|
||||||
selections.purge_deleted_models=False
|
map(
|
||||||
if hasattr(self,'previously_installed_models'):
|
lambda x: (self.starter_model_list[x], True), self.models_selected.value
|
||||||
|
)
|
||||||
|
)
|
||||||
|
selections.purge_deleted_models = False
|
||||||
|
if hasattr(self, "previously_installed_models"):
|
||||||
unchecked = [
|
unchecked = [
|
||||||
self.previously_installed_models.values[x]
|
self.previously_installed_models.values[x]
|
||||||
for x in range(0,len(self.previously_installed_models.values))
|
for x in range(0, len(self.previously_installed_models.values))
|
||||||
if x not in self.previously_installed_models.value
|
if x not in self.previously_installed_models.value
|
||||||
]
|
]
|
||||||
starter_models.update(
|
starter_models.update(map(lambda x: (x, False), unchecked))
|
||||||
map(lambda x: (x, False), unchecked)
|
|
||||||
)
|
|
||||||
selections.purge_deleted_models = self.purge_deleted.value
|
selections.purge_deleted_models = self.purge_deleted.value
|
||||||
selections.starter_models=starter_models
|
selections.starter_models = starter_models
|
||||||
|
|
||||||
# load directory and whether to scan on startup
|
# load directory and whether to scan on startup
|
||||||
if self.show_directory_fields.value:
|
if self.show_directory_fields.value:
|
||||||
@ -303,61 +313,72 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
# URLs and the like
|
# URLs and the like
|
||||||
selections.import_model_paths = self.import_model_paths.value.split()
|
selections.import_model_paths = self.import_model_paths.value.split()
|
||||||
selections.convert_to_diffusers = self.convert_models.value[0] == 1
|
selections.convert_to_diffusers = self.convert_models.value[0] == 1
|
||||||
|
|
||||||
|
|
||||||
class AddModelApplication(npyscreen.NPSAppManaged):
|
class AddModelApplication(npyscreen.NPSAppManaged):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.user_cancelled = False
|
self.user_cancelled = False
|
||||||
self.user_selections = Namespace(
|
self.user_selections = Namespace(
|
||||||
starter_models = None,
|
starter_models=None,
|
||||||
purge_deleted_models = False,
|
purge_deleted_models=False,
|
||||||
scan_directory = None,
|
scan_directory=None,
|
||||||
autoscan_on_startup = None,
|
autoscan_on_startup=None,
|
||||||
import_model_paths = None,
|
import_model_paths=None,
|
||||||
convert_to_diffusers = None
|
convert_to_diffusers=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def onStart(self):
|
def onStart(self):
|
||||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
self.main_form = self.addForm(
|
self.main_form = self.addForm(
|
||||||
"MAIN",
|
"MAIN", addModelsForm, name="Install Stable Diffusion Models"
|
||||||
addModelsForm,
|
|
||||||
name="Install Stable Diffusion Models"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
def process_and_execute(opt: Namespace, selections: Namespace):
|
def process_and_execute(opt: Namespace, selections: Namespace):
|
||||||
models_to_remove = [x for x in selections.starter_models if not selections.starter_models[x]]
|
models_to_remove = [
|
||||||
models_to_install = [x for x in selections.starter_models if selections.starter_models[x]]
|
x for x in selections.starter_models if not selections.starter_models[x]
|
||||||
|
]
|
||||||
|
models_to_install = [
|
||||||
|
x for x in selections.starter_models if selections.starter_models[x]
|
||||||
|
]
|
||||||
directory_to_scan = selections.scan_directory
|
directory_to_scan = selections.scan_directory
|
||||||
scan_at_startup = selections.autoscan_on_startup
|
scan_at_startup = selections.autoscan_on_startup
|
||||||
potential_models_to_install = selections.import_model_paths
|
potential_models_to_install = selections.import_model_paths
|
||||||
convert_to_diffusers = selections.convert_to_diffusers
|
convert_to_diffusers = selections.convert_to_diffusers
|
||||||
|
|
||||||
install_requested_models(
|
install_requested_models(
|
||||||
install_initial_models = models_to_install,
|
install_initial_models=models_to_install,
|
||||||
remove_models = models_to_remove,
|
remove_models=models_to_remove,
|
||||||
scan_directory = Path(directory_to_scan) if directory_to_scan else None,
|
scan_directory=Path(directory_to_scan) if directory_to_scan else None,
|
||||||
external_models = potential_models_to_install,
|
external_models=potential_models_to_install,
|
||||||
scan_at_startup = scan_at_startup,
|
scan_at_startup=scan_at_startup,
|
||||||
convert_to_diffusers = convert_to_diffusers,
|
convert_to_diffusers=convert_to_diffusers,
|
||||||
precision = 'float32' if opt.full_precision else choose_precision(torch.device(choose_torch_device())),
|
precision="float32"
|
||||||
purge_deleted = selections.purge_deleted_models,
|
if opt.full_precision
|
||||||
config_file_path = Path(opt.config_file) if opt.config_file else None,
|
else choose_precision(torch.device(choose_torch_device())),
|
||||||
|
purge_deleted=selections.purge_deleted_models,
|
||||||
|
config_file_path=Path(opt.config_file) if opt.config_file else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
def select_and_download_models(opt: Namespace):
|
def select_and_download_models(opt: Namespace):
|
||||||
precision= 'float32' if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
precision = (
|
||||||
|
"float32"
|
||||||
|
if opt.full_precision
|
||||||
|
else choose_precision(torch.device(choose_torch_device()))
|
||||||
|
)
|
||||||
if opt.default_only:
|
if opt.default_only:
|
||||||
install_requested_models(
|
install_requested_models(
|
||||||
install_initial_models = default_dataset(),
|
install_initial_models=default_dataset(),
|
||||||
precision = precision,
|
precision=precision,
|
||||||
)
|
)
|
||||||
elif opt.yes_to_all:
|
elif opt.yes_to_all:
|
||||||
install_requested_models(
|
install_requested_models(
|
||||||
install_initial_models = recommended_datasets(),
|
install_initial_models=recommended_datasets(),
|
||||||
precision = precision,
|
precision=precision,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
installApp = AddModelApplication()
|
installApp = AddModelApplication()
|
||||||
@ -366,6 +387,7 @@ def select_and_download_models(opt: Namespace):
|
|||||||
if not installApp.user_cancelled:
|
if not installApp.user_cancelled:
|
||||||
process_and_execute(opt, installApp.user_selections)
|
process_and_execute(opt, installApp.user_selections)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
@ -410,8 +432,11 @@ def main():
|
|||||||
Globals.root = os.path.expanduser(get_root(opt.root) or "")
|
Globals.root = os.path.expanduser(get_root(opt.root) or "")
|
||||||
|
|
||||||
if not global_config_dir().exists():
|
if not global_config_dir().exists():
|
||||||
print('>> Your InvokeAI root directory is not set up. Calling invokeai-configure.')
|
print(
|
||||||
|
">> Your InvokeAI root directory is not set up. Calling invokeai-configure."
|
||||||
|
)
|
||||||
import ldm.invoke.config.invokeai_configure
|
import ldm.invoke.config.invokeai_configure
|
||||||
|
|
||||||
ldm.invoke.config.invokeai_configure.main()
|
ldm.invoke.config.invokeai_configure.main()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
@ -427,15 +452,16 @@ def main():
|
|||||||
print(
|
print(
|
||||||
"** Insufficient vertical space for the interface. Please make your window taller and try again"
|
"** Insufficient vertical space for the interface. Please make your window taller and try again"
|
||||||
)
|
)
|
||||||
elif str(e).startswith('addwstr'):
|
elif str(e).startswith("addwstr"):
|
||||||
print(
|
print(
|
||||||
'** Insufficient horizontal space for the interface. Please make your window wider and try again.'
|
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(f"** An error has occurred: {str(e)}")
|
print(f"** An error has occurred: {str(e)}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -33,17 +33,22 @@ from omegaconf.dictconfig import DictConfig
|
|||||||
from picklescan.scanner import scan_file_path
|
from picklescan.scanner import scan_file_path
|
||||||
|
|
||||||
from ldm.invoke.devices import CPU_DEVICE
|
from ldm.invoke.devices import CPU_DEVICE
|
||||||
from ldm.invoke.generator.diffusers_pipeline import \
|
from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||||
StableDiffusionGeneratorPipeline
|
from ldm.invoke.globals import Globals, global_cache_dir
|
||||||
from ldm.invoke.globals import (Globals, global_cache_dir)
|
from ldm.util import (
|
||||||
from ldm.util import (ask_user, download_with_resume,
|
ask_user,
|
||||||
url_attachment_name, instantiate_from_config)
|
download_with_resume,
|
||||||
|
instantiate_from_config,
|
||||||
|
url_attachment_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class SDLegacyType(Enum):
|
class SDLegacyType(Enum):
|
||||||
V1 = 1
|
V1 = 1
|
||||||
V1_INPAINT = 2
|
V1_INPAINT = 2
|
||||||
V2 = 3
|
V2 = 3
|
||||||
UNKNOWN = 99
|
UNKNOWN = 99
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_MAX_MODELS = 2
|
DEFAULT_MAX_MODELS = 2
|
||||||
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
|
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
|
||||||
@ -58,7 +63,7 @@ class ModelManager(object):
|
|||||||
device_type: torch.device = CPU_DEVICE,
|
device_type: torch.device = CPU_DEVICE,
|
||||||
precision: str = "float16",
|
precision: str = "float16",
|
||||||
max_loaded_models=DEFAULT_MAX_MODELS,
|
max_loaded_models=DEFAULT_MAX_MODELS,
|
||||||
sequential_offload = False
|
sequential_offload=False,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize with the path to the models.yaml config file,
|
Initialize with the path to the models.yaml config file,
|
||||||
@ -386,6 +391,7 @@ class ModelManager(object):
|
|||||||
from ldm.invoke.ckpt_to_diffuser import (
|
from ldm.invoke.ckpt_to_diffuser import (
|
||||||
load_pipeline_from_original_stable_diffusion_ckpt,
|
load_pipeline_from_original_stable_diffusion_ckpt,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.offload_model(self.current_model)
|
self.offload_model(self.current_model)
|
||||||
if vae_config := self._choose_diffusers_vae(model_name):
|
if vae_config := self._choose_diffusers_vae(model_name):
|
||||||
vae = self._load_vae(vae_config)
|
vae = self._load_vae(vae_config)
|
||||||
@ -396,13 +402,15 @@ class ModelManager(object):
|
|||||||
original_config_file=config,
|
original_config_file=config,
|
||||||
vae=vae,
|
vae=vae,
|
||||||
return_generator_pipeline=True,
|
return_generator_pipeline=True,
|
||||||
precision=torch.float16 if self.precision=='float16' else torch.float32,
|
precision=torch.float16
|
||||||
|
if self.precision == "float16"
|
||||||
|
else torch.float32,
|
||||||
)
|
)
|
||||||
if self.sequential_offload:
|
if self.sequential_offload:
|
||||||
pipeline.enable_offload_submodels(self.device)
|
pipeline.enable_offload_submodels(self.device)
|
||||||
else:
|
else:
|
||||||
pipeline.to(self.device)
|
pipeline.to(self.device)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
pipeline,
|
pipeline,
|
||||||
width,
|
width,
|
||||||
@ -615,12 +623,12 @@ class ModelManager(object):
|
|||||||
print(">> Model scanned ok")
|
print(">> Model scanned ok")
|
||||||
|
|
||||||
def import_diffuser_model(
|
def import_diffuser_model(
|
||||||
self,
|
self,
|
||||||
repo_or_path: Union[str, Path],
|
repo_or_path: Union[str, Path],
|
||||||
model_name: str = None,
|
model_name: str = None,
|
||||||
model_description: str = None,
|
model_description: str = None,
|
||||||
vae: dict = None,
|
vae: dict = None,
|
||||||
commit_to_conf: Path = None,
|
commit_to_conf: Path = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""
|
"""
|
||||||
Attempts to install the indicated diffuser model and returns True if successful.
|
Attempts to install the indicated diffuser model and returns True if successful.
|
||||||
@ -640,15 +648,15 @@ class ModelManager(object):
|
|||||||
vae=vae,
|
vae=vae,
|
||||||
format="diffusers",
|
format="diffusers",
|
||||||
)
|
)
|
||||||
print(f'DEBUG: here i am 1')
|
print(f"DEBUG: here i am 1")
|
||||||
if isinstance(repo_or_path, Path) and repo_or_path.exists():
|
if isinstance(repo_or_path, Path) and repo_or_path.exists():
|
||||||
new_config.update(path=str(repo_or_path))
|
new_config.update(path=str(repo_or_path))
|
||||||
else:
|
else:
|
||||||
new_config.update(repo_id=repo_or_path)
|
new_config.update(repo_id=repo_or_path)
|
||||||
print(f'DEBUG: here i am 2')
|
print(f"DEBUG: here i am 2")
|
||||||
|
|
||||||
self.add_model(model_name, new_config, True)
|
self.add_model(model_name, new_config, True)
|
||||||
print(f'DEBUG: config = {self.config}')
|
print(f"DEBUG: config = {self.config}")
|
||||||
if commit_to_conf:
|
if commit_to_conf:
|
||||||
self.commit(commit_to_conf)
|
self.commit(commit_to_conf)
|
||||||
return model_name
|
return model_name
|
||||||
@ -685,14 +693,16 @@ class ModelManager(object):
|
|||||||
model_name = model_name or url_attachment_name(weights)
|
model_name = model_name or url_attachment_name(weights)
|
||||||
|
|
||||||
weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1")
|
weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1")
|
||||||
config_path = self._resolve_path(config, "configs/stable-diffusion")
|
config_path = self._resolve_path(config, "configs/stable-diffusion")
|
||||||
|
|
||||||
if weights_path is None or not weights_path.exists():
|
if weights_path is None or not weights_path.exists():
|
||||||
return
|
return
|
||||||
if config_path is None or not config_path.exists():
|
if config_path is None or not config_path.exists():
|
||||||
return
|
return
|
||||||
|
|
||||||
model_name = model_name or Path(weights).stem # note this gives ugly pathnames if used on a URL without a Content-Disposition header
|
model_name = (
|
||||||
|
model_name or Path(weights).stem
|
||||||
|
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
|
||||||
model_description = (
|
model_description = (
|
||||||
model_description or f"imported stable diffusion weights file {model_name}"
|
model_description or f"imported stable diffusion weights file {model_name}"
|
||||||
)
|
)
|
||||||
@ -712,8 +722,8 @@ class ModelManager(object):
|
|||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def probe_model_type(self, checkpoint: dict)->SDLegacyType:
|
def probe_model_type(self, checkpoint: dict) -> SDLegacyType:
|
||||||
'''
|
"""
|
||||||
Given a pickle or safetensors model object, probes contents
|
Given a pickle or safetensors model object, probes contents
|
||||||
of the object and returns an SDLegacyType indicating its
|
of the object and returns an SDLegacyType indicating its
|
||||||
format. Valid return values include:
|
format. Valid return values include:
|
||||||
@ -721,14 +731,16 @@ class ModelManager(object):
|
|||||||
SDLegacyType.V1_INPAINT
|
SDLegacyType.V1_INPAINT
|
||||||
SDLegacyType.V2
|
SDLegacyType.V2
|
||||||
UNKNOWN
|
UNKNOWN
|
||||||
'''
|
"""
|
||||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||||
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
|
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
|
||||||
return SDLegacyType.V2
|
return SDLegacyType.V2
|
||||||
|
|
||||||
try:
|
try:
|
||||||
state_dict = checkpoint.get('state_dict') or checkpoint
|
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||||
in_channels = state_dict['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
|
in_channels = state_dict[
|
||||||
|
"model.diffusion_model.input_blocks.0.0.weight"
|
||||||
|
].shape[1]
|
||||||
if in_channels == 9:
|
if in_channels == 9:
|
||||||
return SDLegacyType.V1_INPAINT
|
return SDLegacyType.V1_INPAINT
|
||||||
elif in_channels == 4:
|
elif in_channels == 4:
|
||||||
@ -739,15 +751,15 @@ class ModelManager(object):
|
|||||||
return SDLegacyType.UNKNOWN
|
return SDLegacyType.UNKNOWN
|
||||||
|
|
||||||
def heuristic_import(
|
def heuristic_import(
|
||||||
self,
|
self,
|
||||||
path_url_or_repo: str,
|
path_url_or_repo: str,
|
||||||
convert: bool= False,
|
convert: bool = False,
|
||||||
model_name: str = None,
|
model_name: str = None,
|
||||||
description: str = None,
|
description: str = None,
|
||||||
commit_to_conf: Path=None,
|
commit_to_conf: Path = None,
|
||||||
)->str:
|
) -> str:
|
||||||
'''
|
"""
|
||||||
Accept a string which could be:
|
Accept a string which could be:
|
||||||
- a HF diffusers repo_id
|
- a HF diffusers repo_id
|
||||||
- a URL pointing to a legacy .ckpt or .safetensors file
|
- a URL pointing to a legacy .ckpt or .safetensors file
|
||||||
- a local path pointing to a legacy .ckpt or .safetensors file
|
- a local path pointing to a legacy .ckpt or .safetensors file
|
||||||
@ -771,88 +783,119 @@ class ModelManager(object):
|
|||||||
The (potentially derived) name of the model is returned on success, or None
|
The (potentially derived) name of the model is returned on success, or None
|
||||||
on failure. When multiple models are added from a directory, only the last
|
on failure. When multiple models are added from a directory, only the last
|
||||||
imported one is returned.
|
imported one is returned.
|
||||||
'''
|
"""
|
||||||
model_path: Path = None
|
model_path: Path = None
|
||||||
thing = path_url_or_repo # to save typing
|
thing = path_url_or_repo # to save typing
|
||||||
|
|
||||||
print(f'>> Probing {thing} for import')
|
print(f">> Probing {thing} for import")
|
||||||
|
|
||||||
if thing.startswith(('http:','https:','ftp:')):
|
if thing.startswith(("http:", "https:", "ftp:")):
|
||||||
print(f' | {thing} appears to be a URL')
|
print(f" | {thing} appears to be a URL")
|
||||||
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') # _resolve_path does a download if needed
|
model_path = self._resolve_path(
|
||||||
|
thing, "models/ldm/stable-diffusion-v1"
|
||||||
|
) # _resolve_path does a download if needed
|
||||||
|
|
||||||
elif Path(thing).is_file() and thing.endswith(('.ckpt','.safetensors')):
|
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
|
||||||
if Path(thing).stem in ['model','diffusion_pytorch_model']:
|
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
|
||||||
print(f' | {Path(thing).name} appears to be part of a diffusers model. Skipping import')
|
print(
|
||||||
|
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print(f' | {thing} appears to be a checkpoint file on disk')
|
print(f" | {thing} appears to be a checkpoint file on disk")
|
||||||
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1')
|
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
|
||||||
|
|
||||||
elif Path(thing).is_dir() and Path(thing, 'model_index.json').exists():
|
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
|
||||||
print(f' | {thing} appears to be a diffusers file on disk')
|
print(f" | {thing} appears to be a diffusers file on disk")
|
||||||
model_name = self.import_diffuser_model(
|
model_name = self.import_diffuser_model(
|
||||||
thing,
|
thing,
|
||||||
vae=dict(repo_id='stabilityai/sd-vae-ft-mse'),
|
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
description=description,
|
description=description,
|
||||||
commit_to_conf=commit_to_conf
|
commit_to_conf=commit_to_conf,
|
||||||
)
|
)
|
||||||
|
|
||||||
elif Path(thing).is_dir():
|
elif Path(thing).is_dir():
|
||||||
|
if (Path(thing) / "model_index.json").exists():
|
||||||
if (Path(thing) / 'model_index.json').exists():
|
print(f">> {thing} appears to be a diffusers model.")
|
||||||
print(f'>> {thing} appears to be a diffusers model.')
|
model_name = self.import_diffuser_model(
|
||||||
model_name = self.import_diffuser_model(thing, commit_to_conf=commit_to_conf)
|
thing, commit_to_conf=commit_to_conf
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(f'>> {thing} appears to be a directory. Will scan for models to import')
|
print(
|
||||||
for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')):
|
f">> {thing} appears to be a directory. Will scan for models to import"
|
||||||
if model_name := self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf):
|
)
|
||||||
print(f' >> {model_name} successfully imported')
|
for m in list(Path(thing).rglob("*.ckpt")) + list(
|
||||||
|
Path(thing).rglob("*.safetensors")
|
||||||
|
):
|
||||||
|
if model_name := self.heuristic_import(
|
||||||
|
str(m), convert, commit_to_conf=commit_to_conf
|
||||||
|
):
|
||||||
|
print(f" >> {model_name} successfully imported")
|
||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
elif re.match(r'^[\w.+-]+/[\w.+-]+$', thing):
|
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
|
||||||
print(f' | {thing} appears to be a HuggingFace diffusers repo_id')
|
print(f" | {thing} appears to be a HuggingFace diffusers repo_id")
|
||||||
model_name = self.import_diffuser_model(thing, commit_to_conf=commit_to_conf)
|
model_name = self.import_diffuser_model(
|
||||||
pipeline,_,_,_ = self._load_diffusers_model(self.config[model_name])
|
thing, commit_to_conf=commit_to_conf
|
||||||
|
)
|
||||||
|
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id")
|
print(
|
||||||
|
f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id"
|
||||||
|
)
|
||||||
|
|
||||||
# Model_path is set in the event of a legacy checkpoint file.
|
# Model_path is set in the event of a legacy checkpoint file.
|
||||||
# If not set, we're all done
|
# If not set, we're all done
|
||||||
if not model_path:
|
if not model_path:
|
||||||
return
|
return
|
||||||
|
|
||||||
if model_path.stem in self.config: #already imported
|
if model_path.stem in self.config: # already imported
|
||||||
print(' | Already imported. Skipping')
|
print(" | Already imported. Skipping")
|
||||||
return
|
return
|
||||||
|
|
||||||
# another round of heuristics to guess the correct config file.
|
# another round of heuristics to guess the correct config file.
|
||||||
checkpoint = safetensors.torch.load_file(model_path) if model_path.suffix == '.safetensors' else torch.load(model_path)
|
checkpoint = (
|
||||||
|
safetensors.torch.load_file(model_path)
|
||||||
|
if model_path.suffix == ".safetensors"
|
||||||
|
else torch.load(model_path)
|
||||||
|
)
|
||||||
model_type = self.probe_model_type(checkpoint)
|
model_type = self.probe_model_type(checkpoint)
|
||||||
|
|
||||||
model_config_file = None
|
model_config_file = None
|
||||||
if model_type == SDLegacyType.V1:
|
if model_type == SDLegacyType.V1:
|
||||||
print(' | SD-v1 model detected')
|
print(" | SD-v1 model detected")
|
||||||
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
||||||
|
)
|
||||||
elif model_type == SDLegacyType.V1_INPAINT:
|
elif model_type == SDLegacyType.V1_INPAINT:
|
||||||
print(' | SD-v1 inpainting model detected')
|
print(" | SD-v1 inpainting model detected")
|
||||||
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml')
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
|
||||||
|
)
|
||||||
elif model_type == SDLegacyType.V2:
|
elif model_type == SDLegacyType.V2:
|
||||||
print(' | SD-v2 model detected; model will be converted to diffusers format')
|
print(
|
||||||
model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml')
|
" | SD-v2 model detected; model will be converted to diffusers format"
|
||||||
|
)
|
||||||
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
||||||
|
)
|
||||||
convert = True
|
convert = True
|
||||||
else:
|
else:
|
||||||
print(f'** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import')
|
print(
|
||||||
|
f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if convert:
|
if convert:
|
||||||
diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir, model_path.stem)
|
diffuser_path = Path(
|
||||||
|
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
|
||||||
|
)
|
||||||
model_name = self.convert_and_import(
|
model_name = self.convert_and_import(
|
||||||
model_path,
|
model_path,
|
||||||
diffusers_path=diffuser_path,
|
diffusers_path=diffuser_path,
|
||||||
vae=dict(repo_id='stabilityai/sd-vae-ft-mse'),
|
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
model_description=description,
|
model_description=description,
|
||||||
original_config_file=model_config_file,
|
original_config_file=model_config_file,
|
||||||
@ -864,7 +907,12 @@ class ModelManager(object):
|
|||||||
config=model_config_file,
|
config=model_config_file,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
model_description=description,
|
model_description=description,
|
||||||
vae=str(Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt')),
|
vae=str(
|
||||||
|
Path(
|
||||||
|
Globals.root,
|
||||||
|
"models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt",
|
||||||
|
)
|
||||||
|
),
|
||||||
commit_to_conf=commit_to_conf,
|
commit_to_conf=commit_to_conf,
|
||||||
)
|
)
|
||||||
if commit_to_conf:
|
if commit_to_conf:
|
||||||
@ -872,23 +920,25 @@ class ModelManager(object):
|
|||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
def convert_and_import(
|
def convert_and_import(
|
||||||
self,
|
self,
|
||||||
ckpt_path: Path,
|
ckpt_path: Path,
|
||||||
diffusers_path: Path,
|
diffusers_path: Path,
|
||||||
model_name=None,
|
model_name=None,
|
||||||
model_description=None,
|
model_description=None,
|
||||||
vae=None,
|
vae=None,
|
||||||
original_config_file: Path = None,
|
original_config_file: Path = None,
|
||||||
commit_to_conf: Path = None,
|
commit_to_conf: Path = None,
|
||||||
) -> dict:
|
) -> dict:
|
||||||
"""
|
"""
|
||||||
Convert a legacy ckpt weights file to diffuser model and import
|
Convert a legacy ckpt weights file to diffuser model and import
|
||||||
into models.yaml.
|
into models.yaml.
|
||||||
"""
|
"""
|
||||||
ckpt_path = self._resolve_path(ckpt_path, 'models/ldm/stable-diffusion-v1')
|
ckpt_path = self._resolve_path(ckpt_path, "models/ldm/stable-diffusion-v1")
|
||||||
if original_config_file:
|
if original_config_file:
|
||||||
original_config_file = self._resolve_path(original_config_file, 'configs/stable-diffusion')
|
original_config_file = self._resolve_path(
|
||||||
|
original_config_file, "configs/stable-diffusion"
|
||||||
|
)
|
||||||
|
|
||||||
new_config = None
|
new_config = None
|
||||||
|
|
||||||
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
|
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
|
||||||
@ -949,10 +999,11 @@ class ModelManager(object):
|
|||||||
found_models = []
|
found_models = []
|
||||||
for file in files:
|
for file in files:
|
||||||
location = str(file.resolve()).replace("\\", "/")
|
location = str(file.resolve()).replace("\\", "/")
|
||||||
if 'model.safetensors' not in location and 'diffusion_pytorch_model.safetensors' not in location:
|
if (
|
||||||
found_models.append(
|
"model.safetensors" not in location
|
||||||
{"name": file.stem, "location": location}
|
and "diffusion_pytorch_model.safetensors" not in location
|
||||||
)
|
):
|
||||||
|
found_models.append({"name": file.stem, "location": location})
|
||||||
|
|
||||||
return search_folder, found_models
|
return search_folder, found_models
|
||||||
|
|
||||||
@ -1112,7 +1163,7 @@ class ModelManager(object):
|
|||||||
print("** Migration is done. Continuing...")
|
print("** Migration is done. Continuing...")
|
||||||
|
|
||||||
def _resolve_path(
|
def _resolve_path(
|
||||||
self, source: Union[str, Path], dest_directory: str
|
self, source: Union[str, Path], dest_directory: str
|
||||||
) -> Optional[Path]:
|
) -> Optional[Path]:
|
||||||
resolved_path = None
|
resolved_path = None
|
||||||
if str(source).startswith(("http:", "https:", "ftp:")):
|
if str(source).startswith(("http:", "https:", "ftp:")):
|
||||||
|
Loading…
Reference in New Issue
Block a user