mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
adjust non-app modules to use new config system
This commit is contained in:
parent
15ffb53e59
commit
e4196bbe5b
@ -17,7 +17,7 @@ from .api.dependencies import ApiDependencies
|
|||||||
from .api.routers import images, sessions, models
|
from .api.routers import images, sessions, models
|
||||||
from .api.sockets import SocketIO
|
from .api.sockets import SocketIO
|
||||||
from .invocations.baseinvocation import BaseInvocation
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
from .services.config import InvokeAIWebConfig
|
from .services.config import InvokeAIWebConfig, get_invokeai_config
|
||||||
|
|
||||||
# Create the app
|
# Create the app
|
||||||
# TODO: create this all in a method so configuration/etc. can be passed in?
|
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||||
@ -133,7 +133,7 @@ def invoke_api():
|
|||||||
# parse command-line settings, environment and the init file
|
# parse command-line settings, environment and the init file
|
||||||
# (this is a module global)
|
# (this is a module global)
|
||||||
global web_config
|
global web_config
|
||||||
web_config = InvokeAIWebConfig()
|
web_config = get_invokeai_config(InvokeAIWebConfig)
|
||||||
app.add_middleware(
|
app.add_middleware(
|
||||||
CORSMiddleware,
|
CORSMiddleware,
|
||||||
allow_origins=web_config.allow_origins,
|
allow_origins=web_config.allow_origins,
|
||||||
|
@ -33,7 +33,7 @@ from .services.invocation_services import InvocationServices
|
|||||||
from .services.invoker import Invoker
|
from .services.invoker import Invoker
|
||||||
from .services.processor import DefaultInvocationProcessor
|
from .services.processor import DefaultInvocationProcessor
|
||||||
from .services.sqlite import SqliteItemStorage
|
from .services.sqlite import SqliteItemStorage
|
||||||
from .services.config import InvokeAIAppConfig
|
from .services.config import get_invokeai_config
|
||||||
|
|
||||||
class CliCommand(BaseModel):
|
class CliCommand(BaseModel):
|
||||||
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
|
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
|
||||||
@ -188,7 +188,7 @@ def invoke_all(context: CliContext):
|
|||||||
|
|
||||||
|
|
||||||
def invoke_cli():
|
def invoke_cli():
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
model_manager = get_model_manager(config,logger=logger)
|
model_manager = get_model_manager(config,logger=logger)
|
||||||
|
|
||||||
events = EventServiceBase()
|
events = EventServiceBase()
|
||||||
|
@ -44,28 +44,29 @@ from ...frontend.install.widgets import (
|
|||||||
IntTitleSlider,
|
IntTitleSlider,
|
||||||
set_min_terminal_size,
|
set_min_terminal_size,
|
||||||
)
|
)
|
||||||
from ..args import PRECISION_CHOICES, Args
|
|
||||||
from ..globals import Globals, global_cache_dir, global_config_dir, global_config_file
|
|
||||||
from .model_install_backend import (
|
from .model_install_backend import (
|
||||||
default_dataset,
|
default_dataset,
|
||||||
download_from_hf,
|
download_from_hf,
|
||||||
hf_download_with_resume,
|
hf_download_with_resume,
|
||||||
recommended_datasets,
|
recommended_datasets,
|
||||||
)
|
)
|
||||||
|
from invokeai.app.services.config import get_invokeai_config()
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
|
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
|
|
||||||
# --------------------------globals-----------------------
|
# --------------------------globals-----------------------
|
||||||
|
config = get_invokeai_config()
|
||||||
|
|
||||||
Model_dir = "models"
|
Model_dir = "models"
|
||||||
Weights_dir = "ldm/stable-diffusion-v1/"
|
Weights_dir = "ldm/stable-diffusion-v1/"
|
||||||
|
|
||||||
# the initial "configs" dir is now bundled in the `invokeai.configs` package
|
# the initial "configs" dir is now bundled in the `invokeai.configs` package
|
||||||
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
|
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
|
||||||
|
|
||||||
Default_config_file = Path(global_config_dir()) / "models.yaml"
|
Default_config_file = config.model_conf_path
|
||||||
SD_Configs = Path(global_config_dir()) / "stable-diffusion"
|
SD_Configs = config.legacy_conf_path
|
||||||
|
|
||||||
Datasets = OmegaConf.load(Dataset_path)
|
Datasets = OmegaConf.load(Dataset_path)
|
||||||
|
|
||||||
@ -73,6 +74,8 @@ Datasets = OmegaConf.load(Dataset_path)
|
|||||||
MIN_COLS = 135
|
MIN_COLS = 135
|
||||||
MIN_LINES = 45
|
MIN_LINES = 45
|
||||||
|
|
||||||
|
PRECISION_CHOICES = ['auto','float16','float32','autocast']
|
||||||
|
|
||||||
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
||||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||||
@ -103,7 +106,7 @@ Command-line interface:
|
|||||||
invokeai
|
invokeai
|
||||||
|
|
||||||
If you installed using an installation script, run:
|
If you installed using an installation script, run:
|
||||||
{Globals.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
|
{config.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
|
||||||
|
|
||||||
Add the '--help' argument to see all of the command-line switches available for use.
|
Add the '--help' argument to see all of the command-line switches available for use.
|
||||||
"""
|
"""
|
||||||
@ -216,11 +219,11 @@ def download_realesrgan():
|
|||||||
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
|
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
|
||||||
|
|
||||||
model_dest = os.path.join(
|
model_dest = os.path.join(
|
||||||
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
|
config.root, "models/realesrgan/realesr-general-x4v3.pth"
|
||||||
)
|
)
|
||||||
|
|
||||||
wdn_model_dest = os.path.join(
|
wdn_model_dest = os.path.join(
|
||||||
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
|
config.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
|
||||||
)
|
)
|
||||||
|
|
||||||
download_with_progress_bar(model_url, model_dest, "RealESRGAN")
|
download_with_progress_bar(model_url, model_dest, "RealESRGAN")
|
||||||
@ -243,7 +246,7 @@ def download_gfpgan():
|
|||||||
"./models/gfpgan/weights/parsing_parsenet.pth",
|
"./models/gfpgan/weights/parsing_parsenet.pth",
|
||||||
],
|
],
|
||||||
):
|
):
|
||||||
model_url, model_dest = model[0], os.path.join(Globals.root, model[1])
|
model_url, model_dest = model[0], os.path.join(config.root, model[1])
|
||||||
download_with_progress_bar(model_url, model_dest, "GFPGAN weights")
|
download_with_progress_bar(model_url, model_dest, "GFPGAN weights")
|
||||||
|
|
||||||
|
|
||||||
@ -253,7 +256,7 @@ def download_codeformer():
|
|||||||
model_url = (
|
model_url = (
|
||||||
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
|
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
|
||||||
)
|
)
|
||||||
model_dest = os.path.join(Globals.root, "models/codeformer/codeformer.pth")
|
model_dest = os.path.join(config.root, "models/codeformer/codeformer.pth")
|
||||||
download_with_progress_bar(model_url, model_dest, "CodeFormer")
|
download_with_progress_bar(model_url, model_dest, "CodeFormer")
|
||||||
|
|
||||||
|
|
||||||
@ -306,7 +309,7 @@ def download_vaes():
|
|||||||
if not hf_download_with_resume(
|
if not hf_download_with_resume(
|
||||||
repo_id=repo_id,
|
repo_id=repo_id,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
model_dir=str(Globals.root / Model_dir / Weights_dir),
|
model_dir=str(config.root / Model_dir / Weights_dir),
|
||||||
):
|
):
|
||||||
raise Exception(f"download of {model_name} failed")
|
raise Exception(f"download of {model_name} failed")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -321,8 +324,7 @@ def get_root(root: str = None) -> str:
|
|||||||
elif os.environ.get("INVOKEAI_ROOT"):
|
elif os.environ.get("INVOKEAI_ROOT"):
|
||||||
return os.environ.get("INVOKEAI_ROOT")
|
return os.environ.get("INVOKEAI_ROOT")
|
||||||
else:
|
else:
|
||||||
return Globals.root
|
return config.root
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
class editOptsForm(npyscreen.FormMultiPage):
|
class editOptsForm(npyscreen.FormMultiPage):
|
||||||
@ -332,7 +334,7 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
def create(self):
|
def create(self):
|
||||||
program_opts = self.parentApp.program_opts
|
program_opts = self.parentApp.program_opts
|
||||||
old_opts = self.parentApp.invokeai_opts
|
old_opts = self.parentApp.invokeai_opts
|
||||||
first_time = not (Globals.root / Globals.initfile).exists()
|
first_time = not (config.root / 'invokeai.init').exists()
|
||||||
access_token = HfFolder.get_token()
|
access_token = HfFolder.get_token()
|
||||||
window_width, window_height = get_terminal_size()
|
window_width, window_height = get_terminal_size()
|
||||||
for i in [
|
for i in [
|
||||||
@ -384,7 +386,7 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
self.safety_checker = self.add_widget_intelligent(
|
self.safety_checker = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name="NSFW checker",
|
name="NSFW checker",
|
||||||
value=old_opts.safety_checker,
|
value=old_opts.nsfw_checker,
|
||||||
relx=5,
|
relx=5,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
@ -438,14 +440,7 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
self.xformers = self.add_widget_intelligent(
|
self.xformers = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name="Enable xformers support if available",
|
name="Enable xformers support if available",
|
||||||
value=old_opts.xformers,
|
value=old_opts.xformers_enabled,
|
||||||
relx=5,
|
|
||||||
scroll_exit=True,
|
|
||||||
)
|
|
||||||
self.ckpt_convert = self.add_widget_intelligent(
|
|
||||||
npyscreen.Checkbox,
|
|
||||||
name="Load legacy checkpoint models into memory as diffusers models",
|
|
||||||
value=old_opts.ckpt_convert,
|
|
||||||
relx=5,
|
relx=5,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
@ -583,7 +578,6 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
"xformers",
|
"xformers",
|
||||||
"always_use_cpu",
|
"always_use_cpu",
|
||||||
"embedding_path",
|
"embedding_path",
|
||||||
"ckpt_convert",
|
|
||||||
]:
|
]:
|
||||||
setattr(new_opts, attr, getattr(self, attr).value)
|
setattr(new_opts, attr, getattr(self, attr).value)
|
||||||
|
|
||||||
@ -628,15 +622,14 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
|
|||||||
|
|
||||||
|
|
||||||
def default_startup_options(init_file: Path) -> Namespace:
|
def default_startup_options(init_file: Path) -> Namespace:
|
||||||
opts = Args().parse_args([])
|
opts = InvokeAIAppConfig(argv=[])
|
||||||
outdir = Path(opts.outdir)
|
outdir = Path(opts.outdir)
|
||||||
if not outdir.is_absolute():
|
if not outdir.is_absolute():
|
||||||
opts.outdir = str(Globals.root / opts.outdir)
|
opts.outdir = str(config.root / opts.outdir)
|
||||||
if not init_file.exists():
|
if not init_file.exists():
|
||||||
opts.safety_checker = True
|
opts.nsfw_checker = True
|
||||||
return opts
|
return opts
|
||||||
|
|
||||||
|
|
||||||
def default_user_selections(program_opts: Namespace) -> Namespace:
|
def default_user_selections(program_opts: Namespace) -> Namespace:
|
||||||
return Namespace(
|
return Namespace(
|
||||||
starter_models=default_dataset()
|
starter_models=default_dataset()
|
||||||
@ -724,7 +717,6 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
--max_loaded_models={int(opts.max_loaded_models)}
|
--max_loaded_models={int(opts.max_loaded_models)}
|
||||||
--{'no-' if not opts.safety_checker else ''}nsfw_checker
|
--{'no-' if not opts.safety_checker else ''}nsfw_checker
|
||||||
--{'no-' if not opts.xformers else ''}xformers
|
--{'no-' if not opts.xformers else ''}xformers
|
||||||
--{'no-' if not opts.ckpt_convert else ''}ckpt_convert
|
|
||||||
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
|
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
|
||||||
{'--always_use_cpu' if opts.always_use_cpu else ''}
|
{'--always_use_cpu' if opts.always_use_cpu else ''}
|
||||||
"""
|
"""
|
||||||
@ -740,13 +732,11 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def default_output_dir() -> Path:
|
def default_output_dir() -> Path:
|
||||||
return Globals.root / "outputs"
|
return config.root / "outputs"
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def default_embedding_dir() -> Path:
|
def default_embedding_dir() -> Path:
|
||||||
return Globals.root / "embeddings"
|
return config.root / "embeddings"
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def write_default_options(program_opts: Namespace, initfile: Path):
|
def write_default_options(program_opts: Namespace, initfile: Path):
|
||||||
@ -810,7 +800,7 @@ def main():
|
|||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
|
|
||||||
# setting a global here
|
# setting a global here
|
||||||
Globals.root = Path(os.path.expanduser(get_root(opt.root) or ""))
|
config.root = Path(os.path.expanduser(get_root(opt.root) or ""))
|
||||||
|
|
||||||
errors = set()
|
errors = set()
|
||||||
|
|
||||||
@ -818,9 +808,10 @@ def main():
|
|||||||
models_to_download = default_user_selections(opt)
|
models_to_download = default_user_selections(opt)
|
||||||
|
|
||||||
# We check for to see if the runtime directory is correctly initialized.
|
# We check for to see if the runtime directory is correctly initialized.
|
||||||
init_file = Path(Globals.root, Globals.initfile)
|
print('** invokeai.init init file is no longer supported. Migrate this code to invokeai.yaml **')
|
||||||
if not init_file.exists() or not global_config_file().exists():
|
init_file = Path(config.root, 'invokeai.init')
|
||||||
initialize_rootdir(Globals.root, opt.yes_to_all)
|
if not init_file.exists() or not config.model_conf_path.exists():
|
||||||
|
initialize_rootdir(config.root, opt.yes_to_all)
|
||||||
|
|
||||||
if opt.yes_to_all:
|
if opt.yes_to_all:
|
||||||
write_default_options(opt, init_file)
|
write_default_options(opt, init_file)
|
||||||
|
@ -19,13 +19,15 @@ from tqdm import tqdm
|
|||||||
|
|
||||||
import invokeai.configs as configs
|
import invokeai.configs as configs
|
||||||
|
|
||||||
from ..globals import Globals, global_cache_dir, global_config_dir
|
from invokeai.app.services.config import get_invokeai_config()
|
||||||
from ..model_management import ModelManager
|
from ..model_management import ModelManager
|
||||||
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
||||||
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
|
|
||||||
# --------------------------globals-----------------------
|
# --------------------------globals-----------------------
|
||||||
|
config = get_invokeai_config()
|
||||||
Model_dir = "models"
|
Model_dir = "models"
|
||||||
Weights_dir = "ldm/stable-diffusion-v1/"
|
Weights_dir = "ldm/stable-diffusion-v1/"
|
||||||
|
|
||||||
@ -47,12 +49,11 @@ Config_preamble = """
|
|||||||
|
|
||||||
|
|
||||||
def default_config_file():
|
def default_config_file():
|
||||||
return Path(global_config_dir()) / "models.yaml"
|
return config.model_conf_path
|
||||||
|
|
||||||
|
|
||||||
def sd_configs():
|
def sd_configs():
|
||||||
return Path(global_config_dir()) / "stable-diffusion"
|
return config.legacy_conf_path
|
||||||
|
|
||||||
|
|
||||||
def initial_models():
|
def initial_models():
|
||||||
global Datasets
|
global Datasets
|
||||||
@ -121,8 +122,9 @@ def install_requested_models(
|
|||||||
|
|
||||||
if scan_at_startup and scan_directory.is_dir():
|
if scan_at_startup and scan_directory.is_dir():
|
||||||
argument = "--autoconvert"
|
argument = "--autoconvert"
|
||||||
initfile = Path(Globals.root, Globals.initfile)
|
print('** The global initfile is no longer supported; rewrite to support new yaml format **')
|
||||||
replacement = Path(Globals.root, f"{Globals.initfile}.new")
|
initfile = Path(config.root, 'invokeai.init')
|
||||||
|
replacement = Path(config.root, f"invokeai.init.new")
|
||||||
directory = str(scan_directory).replace("\\", "/")
|
directory = str(scan_directory).replace("\\", "/")
|
||||||
with open(initfile, "r") as input:
|
with open(initfile, "r") as input:
|
||||||
with open(replacement, "w") as output:
|
with open(replacement, "w") as output:
|
||||||
@ -150,7 +152,7 @@ def get_root(root: str = None) -> str:
|
|||||||
elif os.environ.get("INVOKEAI_ROOT"):
|
elif os.environ.get("INVOKEAI_ROOT"):
|
||||||
return os.environ.get("INVOKEAI_ROOT")
|
return os.environ.get("INVOKEAI_ROOT")
|
||||||
else:
|
else:
|
||||||
return Globals.root
|
return config.root
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
@ -183,7 +185,7 @@ def all_datasets() -> dict:
|
|||||||
# look for legacy model.ckpt in models directory and offer to
|
# look for legacy model.ckpt in models directory and offer to
|
||||||
# normalize its name
|
# normalize its name
|
||||||
def migrate_models_ckpt():
|
def migrate_models_ckpt():
|
||||||
model_path = os.path.join(Globals.root, Model_dir, Weights_dir)
|
model_path = os.path.join(config.root, Model_dir, Weights_dir)
|
||||||
if not os.path.exists(os.path.join(model_path, "model.ckpt")):
|
if not os.path.exists(os.path.join(model_path, "model.ckpt")):
|
||||||
return
|
return
|
||||||
new_name = initial_models()["stable-diffusion-1.4"]["file"]
|
new_name = initial_models()["stable-diffusion-1.4"]["file"]
|
||||||
@ -228,7 +230,7 @@ def _download_repo_or_file(
|
|||||||
def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
|
def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
|
||||||
repo_id = mconfig["repo_id"]
|
repo_id = mconfig["repo_id"]
|
||||||
filename = mconfig["file"]
|
filename = mconfig["file"]
|
||||||
cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir)
|
cache_dir = os.path.join(config.root, Model_dir, Weights_dir)
|
||||||
return hf_download_with_resume(
|
return hf_download_with_resume(
|
||||||
repo_id=repo_id,
|
repo_id=repo_id,
|
||||||
model_dir=cache_dir,
|
model_dir=cache_dir,
|
||||||
@ -239,9 +241,9 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
|
|||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_from_hf(
|
def download_from_hf(
|
||||||
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
|
model_class: object, model_name: str, **kwargs
|
||||||
):
|
):
|
||||||
path = global_cache_dir(cache_subdir)
|
path = config.cache_dir
|
||||||
model = model_class.from_pretrained(
|
model = model_class.from_pretrained(
|
||||||
model_name,
|
model_name,
|
||||||
cache_dir=path,
|
cache_dir=path,
|
||||||
@ -417,7 +419,7 @@ def new_config_file_contents(
|
|||||||
stanza["height"] = mod["height"]
|
stanza["height"] = mod["height"]
|
||||||
if "file" in mod:
|
if "file" in mod:
|
||||||
stanza["weights"] = os.path.relpath(
|
stanza["weights"] = os.path.relpath(
|
||||||
successfully_downloaded[model], start=Globals.root
|
successfully_downloaded[model], start=config.root
|
||||||
)
|
)
|
||||||
stanza["config"] = os.path.normpath(
|
stanza["config"] = os.path.normpath(
|
||||||
os.path.join(sd_configs(), mod["config"])
|
os.path.join(sd_configs(), mod["config"])
|
||||||
@ -456,7 +458,7 @@ def delete_weights(model_name: str, conf_stanza: dict):
|
|||||||
|
|
||||||
weights = Path(weights)
|
weights = Path(weights)
|
||||||
if not weights.is_absolute():
|
if not weights.is_absolute():
|
||||||
weights = Path(Globals.root) / weights
|
weights = Path(config.root) / weights
|
||||||
try:
|
try:
|
||||||
weights.unlink()
|
weights.unlink()
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
@ -6,9 +6,9 @@ be suppressed or deferred
|
|||||||
"""
|
"""
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
|
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class PatchMatch:
|
class PatchMatch:
|
||||||
"""
|
"""
|
||||||
|
@ -33,11 +33,11 @@ from PIL import Image, ImageOps
|
|||||||
from transformers import AutoProcessor, CLIPSegForImageSegmentation
|
from transformers import AutoProcessor, CLIPSegForImageSegmentation
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
|
|
||||||
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||||
CLIPSEG_SIZE = 352
|
CLIPSEG_SIZE = 352
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class SegmentedGrayscale(object):
|
class SegmentedGrayscale(object):
|
||||||
def __init__(self, image: Image, heatmap: torch.Tensor):
|
def __init__(self, image: Image, heatmap: torch.Tensor):
|
||||||
|
@ -26,7 +26,7 @@ import torch
|
|||||||
from safetensors.torch import load_file
|
from safetensors.torch import load_file
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
|
|
||||||
from .model_manager import ModelManager, SDLegacyType
|
from .model_manager import ModelManager, SDLegacyType
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ from transformers import (
|
|||||||
|
|
||||||
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
||||||
|
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
def shave_segments(path, n_shave_prefix_segments=1):
|
def shave_segments(path, n_shave_prefix_segments=1):
|
||||||
"""
|
"""
|
||||||
|
@ -47,7 +47,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import (
|
|||||||
from ..stable_diffusion import (
|
from ..stable_diffusion import (
|
||||||
StableDiffusionGeneratorPipeline,
|
StableDiffusionGeneratorPipeline,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
from ..util import CUDA_DEVICE, ask_user, download_with_resume
|
from ..util import CUDA_DEVICE, ask_user, download_with_resume
|
||||||
|
|
||||||
class SDLegacyType(Enum):
|
class SDLegacyType(Enum):
|
||||||
@ -68,7 +68,7 @@ class SDModelComponent(Enum):
|
|||||||
feature_extractor="feature_extractor"
|
feature_extractor="feature_extractor"
|
||||||
|
|
||||||
DEFAULT_MAX_MODELS = 2
|
DEFAULT_MAX_MODELS = 2
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class ModelManager(object):
|
class ModelManager(object):
|
||||||
"""
|
"""
|
||||||
|
@ -20,11 +20,11 @@ from compel.prompt_parser import (
|
|||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
from ..stable_diffusion import InvokeAIDiffuserComponent
|
from ..stable_diffusion import InvokeAIDiffuserComponent
|
||||||
from ..util import torch_dtype
|
from ..util import torch_dtype
|
||||||
|
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
def get_uc_and_c_and_ec(
|
def get_uc_and_c_and_ec(
|
||||||
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False
|
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False
|
||||||
|
@ -6,8 +6,8 @@ import numpy as np
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
pretrained_model_url = (
|
pretrained_model_url = (
|
||||||
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
|
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
|
||||||
|
@ -7,8 +7,8 @@ import torch
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class GFPGAN:
|
class GFPGAN:
|
||||||
def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None:
|
def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None:
|
||||||
|
@ -15,10 +15,10 @@ from transformers import AutoFeatureExtractor
|
|||||||
|
|
||||||
import invokeai.assets.web as web_assets
|
import invokeai.assets.web as web_assets
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
from .util import CPU_DEVICE
|
from .util import CPU_DEVICE
|
||||||
|
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class SafetyChecker(object):
|
class SafetyChecker(object):
|
||||||
CAUTION_IMG = "caution.png"
|
CAUTION_IMG = "caution.png"
|
||||||
|
@ -18,8 +18,8 @@ from huggingface_hub import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class HuggingFaceConceptsLibrary(object):
|
class HuggingFaceConceptsLibrary(object):
|
||||||
def __init__(self, root=None):
|
def __init__(self, root=None):
|
||||||
|
@ -33,7 +33,7 @@ from torchvision.transforms.functional import resize as tv_resize
|
|||||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||||
from typing_extensions import ParamSpec
|
from typing_extensions import ParamSpec
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
from ..util import CPU_DEVICE, normalize_device
|
from ..util import CPU_DEVICE, normalize_device
|
||||||
from .diffusion import (
|
from .diffusion import (
|
||||||
AttentionMapSaver,
|
AttentionMapSaver,
|
||||||
@ -43,7 +43,7 @@ from .diffusion import (
|
|||||||
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup
|
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup
|
||||||
from .textual_inversion_manager import TextualInversionManager
|
from .textual_inversion_manager import TextualInversionManager
|
||||||
|
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class PipelineIntermediateState:
|
class PipelineIntermediateState:
|
||||||
|
@ -9,7 +9,7 @@ from diffusers.models.attention_processor import AttentionProcessor
|
|||||||
from typing_extensions import TypeAlias
|
from typing_extensions import TypeAlias
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
|
|
||||||
from .cross_attention_control import (
|
from .cross_attention_control import (
|
||||||
Arguments,
|
Arguments,
|
||||||
@ -31,7 +31,7 @@ ModelForwardCallback: TypeAlias = Union[
|
|||||||
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
|
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
|
||||||
]
|
]
|
||||||
|
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
@dataclass(frozen=True)
|
@dataclass(frozen=True)
|
||||||
class PostprocessingSettings:
|
class PostprocessingSettings:
|
||||||
|
@ -4,12 +4,12 @@ from contextlib import nullcontext
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
from torch import autocast
|
from torch import autocast
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
|
|
||||||
CPU_DEVICE = torch.device("cpu")
|
CPU_DEVICE = torch.device("cpu")
|
||||||
CUDA_DEVICE = torch.device("cuda")
|
CUDA_DEVICE = torch.device("cuda")
|
||||||
MPS_DEVICE = torch.device("mps")
|
MPS_DEVICE = torch.device("mps")
|
||||||
config = InvokeAIAppConfig()
|
config = get_invokeai_config()
|
||||||
|
|
||||||
def choose_torch_device() -> torch.device:
|
def choose_torch_device() -> torch.device:
|
||||||
"""Convenience routine for guessing which GPU device to run model on"""
|
"""Convenience routine for guessing which GPU device to run model on"""
|
||||||
|
@ -23,7 +23,6 @@ from npyscreen import widget
|
|||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.backend.globals import Globals, global_config_dir
|
|
||||||
|
|
||||||
from ...backend.config.model_install_backend import (
|
from ...backend.config.model_install_backend import (
|
||||||
Dataset_path,
|
Dataset_path,
|
||||||
@ -41,11 +40,13 @@ from .widgets import (
|
|||||||
TextBox,
|
TextBox,
|
||||||
set_min_terminal_size,
|
set_min_terminal_size,
|
||||||
)
|
)
|
||||||
|
from invokeai.app.services.config import get_invokeai_config
|
||||||
|
|
||||||
# minimum size for the UI
|
# minimum size for the UI
|
||||||
MIN_COLS = 120
|
MIN_COLS = 120
|
||||||
MIN_LINES = 45
|
MIN_LINES = 45
|
||||||
|
|
||||||
|
config = get_invokeai_config()
|
||||||
|
|
||||||
class addModelsForm(npyscreen.FormMultiPage):
|
class addModelsForm(npyscreen.FormMultiPage):
|
||||||
# for responsive resizing - disabled
|
# for responsive resizing - disabled
|
||||||
@ -453,9 +454,9 @@ def main():
|
|||||||
opt = parser.parse_args()
|
opt = parser.parse_args()
|
||||||
|
|
||||||
# setting a global here
|
# setting a global here
|
||||||
Globals.root = os.path.expanduser(get_root(opt.root) or "")
|
config.root = os.path.expanduser(get_root(opt.root) or "")
|
||||||
|
|
||||||
if not global_config_dir().exists():
|
if not (config.conf_path / '..' ).exists():
|
||||||
logger.info(
|
logger.info(
|
||||||
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
|
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
|
||||||
)
|
)
|
||||||
|
@ -8,7 +8,6 @@ import argparse
|
|||||||
import curses
|
import curses
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
|
||||||
import warnings
|
import warnings
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -20,20 +19,13 @@ from diffusers import logging as dlogging
|
|||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
from ...backend.globals import (
|
|
||||||
Globals,
|
|
||||||
global_cache_dir,
|
|
||||||
global_config_file,
|
|
||||||
global_models_dir,
|
|
||||||
global_set_root,
|
|
||||||
)
|
|
||||||
|
|
||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
|
from invokeai.services.config import get_invokeai_config
|
||||||
from ...backend.model_management import ModelManager
|
from ...backend.model_management import ModelManager
|
||||||
from ...frontend.install.widgets import FloatTitleSlider
|
from ...frontend.install.widgets import FloatTitleSlider
|
||||||
|
|
||||||
DEST_MERGED_MODEL_DIR = "merged_models"
|
DEST_MERGED_MODEL_DIR = "merged_models"
|
||||||
|
config = get_invokeai_config()
|
||||||
|
|
||||||
def merge_diffusion_models(
|
def merge_diffusion_models(
|
||||||
model_ids_or_paths: List[Union[str, Path]],
|
model_ids_or_paths: List[Union[str, Path]],
|
||||||
@ -60,7 +52,7 @@ def merge_diffusion_models(
|
|||||||
|
|
||||||
pipe = DiffusionPipeline.from_pretrained(
|
pipe = DiffusionPipeline.from_pretrained(
|
||||||
model_ids_or_paths[0],
|
model_ids_or_paths[0],
|
||||||
cache_dir=kwargs.get("cache_dir", global_cache_dir()),
|
cache_dir=kwargs.get("cache_dir", config.cache_dir),
|
||||||
custom_pipeline="checkpoint_merger",
|
custom_pipeline="checkpoint_merger",
|
||||||
)
|
)
|
||||||
merged_pipe = pipe.merge(
|
merged_pipe = pipe.merge(
|
||||||
@ -94,7 +86,7 @@ def merge_diffusion_models_and_commit(
|
|||||||
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||||
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||||
"""
|
"""
|
||||||
config_file = global_config_file()
|
config_file = config.model_conf_path
|
||||||
model_manager = ModelManager(OmegaConf.load(config_file))
|
model_manager = ModelManager(OmegaConf.load(config_file))
|
||||||
for mod in models:
|
for mod in models:
|
||||||
assert mod in model_manager.model_names(), f'** Unknown model "{mod}"'
|
assert mod in model_manager.model_names(), f'** Unknown model "{mod}"'
|
||||||
@ -106,7 +98,7 @@ def merge_diffusion_models_and_commit(
|
|||||||
merged_pipe = merge_diffusion_models(
|
merged_pipe = merge_diffusion_models(
|
||||||
model_ids_or_paths, alpha, interp, force, **kwargs
|
model_ids_or_paths, alpha, interp, force, **kwargs
|
||||||
)
|
)
|
||||||
dump_path = global_models_dir() / DEST_MERGED_MODEL_DIR
|
dump_path = config.models_dir / DEST_MERGED_MODEL_DIR
|
||||||
|
|
||||||
os.makedirs(dump_path, exist_ok=True)
|
os.makedirs(dump_path, exist_ok=True)
|
||||||
dump_path = dump_path / merged_model_name
|
dump_path = dump_path / merged_model_name
|
||||||
@ -126,7 +118,7 @@ def _parse_args() -> Namespace:
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--root_dir",
|
"--root_dir",
|
||||||
type=Path,
|
type=Path,
|
||||||
default=Globals.root,
|
default=config.root,
|
||||||
help="Path to the invokeai runtime directory",
|
help="Path to the invokeai runtime directory",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -398,7 +390,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
class Mergeapp(npyscreen.NPSAppManaged):
|
class Mergeapp(npyscreen.NPSAppManaged):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
conf = OmegaConf.load(global_config_file())
|
conf = OmegaConf.load(config.model_conf_path)
|
||||||
self.model_manager = ModelManager(
|
self.model_manager = ModelManager(
|
||||||
conf, "cpu", "float16"
|
conf, "cpu", "float16"
|
||||||
) # precision doesn't really matter here
|
) # precision doesn't really matter here
|
||||||
@ -429,7 +421,7 @@ def run_cli(args: Namespace):
|
|||||||
f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
|
f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
|
||||||
)
|
)
|
||||||
|
|
||||||
model_manager = ModelManager(OmegaConf.load(global_config_file()))
|
model_manager = ModelManager(OmegaConf.load(config.model_conf_path))
|
||||||
assert (
|
assert (
|
||||||
args.clobber or args.merged_model_name not in model_manager.model_names()
|
args.clobber or args.merged_model_name not in model_manager.model_names()
|
||||||
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
||||||
@ -440,9 +432,9 @@ def run_cli(args: Namespace):
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = _parse_args()
|
args = _parse_args()
|
||||||
global_set_root(args.root_dir)
|
config.root = args.root_dir
|
||||||
|
|
||||||
cache_dir = str(global_cache_dir("hub"))
|
cache_dir = config.cache_dir
|
||||||
os.environ[
|
os.environ[
|
||||||
"HF_HOME"
|
"HF_HOME"
|
||||||
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
|
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
|
||||||
|
Loading…
Reference in New Issue
Block a user