From 1f89cf3343a7dc3492247af0890708ec14104b1b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 31 Mar 2023 04:27:03 -0400 Subject: [PATCH 1/5] remove vestiges of non-functional autoimport code for legacy checkpoints - Closes #3075 --- invokeai/backend/args.py | 2 +- .../backend/config/model_install_backend.py | 4 +-- invokeai/frontend/CLI/CLI.py | 7 +----- invokeai/frontend/install/model_install.py | 25 ------------------- 4 files changed, 3 insertions(+), 35 deletions(-) diff --git a/invokeai/backend/args.py b/invokeai/backend/args.py index 952b799d70..b6c2608b20 100644 --- a/invokeai/backend/args.py +++ b/invokeai/backend/args.py @@ -561,7 +561,7 @@ class Args(object): "--autoimport", default=None, type=str, - help="Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly", + help="(DEPRECATED - NONFUNCTIONAL). Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly", ) model_group.add_argument( "--autoconvert", diff --git a/invokeai/backend/config/model_install_backend.py b/invokeai/backend/config/model_install_backend.py index b5110c53ea..2018cd42af 100644 --- a/invokeai/backend/config/model_install_backend.py +++ b/invokeai/backend/config/model_install_backend.py @@ -67,7 +67,6 @@ def install_requested_models( scan_directory: Path = None, external_models: List[str] = None, scan_at_startup: bool = False, - convert_to_diffusers: bool = False, precision: str = "float16", purge_deleted: bool = False, config_file_path: Path = None, @@ -113,7 +112,6 @@ def install_requested_models( try: model_manager.heuristic_import( path_url_or_repo, - convert=convert_to_diffusers, commit_to_conf=config_file_path, ) except KeyboardInterrupt: @@ -122,7 +120,7 @@ def install_requested_models( pass if scan_at_startup and scan_directory.is_dir(): - argument = "--autoconvert" if convert_to_diffusers else "--autoimport" + argument = "--autoconvert" initfile = Path(Globals.root, Globals.initfile) replacement = Path(Globals.root, f"{Globals.initfile}.new") directory = str(scan_directory).replace("\\", "/") diff --git a/invokeai/frontend/CLI/CLI.py b/invokeai/frontend/CLI/CLI.py index 22e1bbd49d..44d0312954 100644 --- a/invokeai/frontend/CLI/CLI.py +++ b/invokeai/frontend/CLI/CLI.py @@ -158,14 +158,9 @@ def main(): report_model_error(opt, e) # try to autoconvert new models - if path := opt.autoimport: - gen.model_manager.heuristic_import( - str(path), convert=False, commit_to_conf=opt.conf - ) - if path := opt.autoconvert: gen.model_manager.heuristic_import( - str(path), convert=True, commit_to_conf=opt.conf + str(path), commit_to_conf=opt.conf ) # web server loops forever diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index e7b10c34e1..18ec6d55df 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -199,17 +199,6 @@ class addModelsForm(npyscreen.FormMultiPage): relx=4, scroll_exit=True, ) - self.nextrely += 1 - self.convert_models = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="== CONVERT IMPORTED MODELS INTO DIFFUSERS==", - values=["Keep original format", "Convert to diffusers"], - value=0, - begin_entry_at=4, - max_height=4, - hidden=True, # will appear when imported models box is edited - scroll_exit=True, - ) self.cancel = self.add_widget_intelligent( npyscreen.ButtonPress, name="CANCEL", @@ -244,8 +233,6 @@ class addModelsForm(npyscreen.FormMultiPage): self.show_directory_fields.addVisibleWhenSelected(i) self.show_directory_fields.when_value_edited = self._clear_scan_directory - self.import_model_paths.when_value_edited = self._show_hide_convert - self.autoload_directory.when_value_edited = self._show_hide_convert def resize(self): super().resize() @@ -256,13 +243,6 @@ class addModelsForm(npyscreen.FormMultiPage): if not self.show_directory_fields.value: self.autoload_directory.value = "" - def _show_hide_convert(self): - model_paths = self.import_model_paths.value or "" - autoload_directory = self.autoload_directory.value or "" - self.convert_models.hidden = ( - len(model_paths) == 0 and len(autoload_directory) == 0 - ) - def _get_starter_model_labels(self) -> List[str]: window_width, window_height = get_terminal_size() label_width = 25 @@ -322,7 +302,6 @@ class addModelsForm(npyscreen.FormMultiPage): .scan_directory: Path to a directory of models to scan and import .autoscan_on_startup: True if invokeai should scan and import at startup time .import_model_paths: list of URLs, repo_ids and file paths to import - .convert_to_diffusers: if True, convert legacy checkpoints into diffusers """ # we're using a global here rather than storing the result in the parentapp # due to some bug in npyscreen that is causing attributes to be lost @@ -359,7 +338,6 @@ class addModelsForm(npyscreen.FormMultiPage): # URLs and the like selections.import_model_paths = self.import_model_paths.value.split() - selections.convert_to_diffusers = self.convert_models.value[0] == 1 class AddModelApplication(npyscreen.NPSAppManaged): @@ -372,7 +350,6 @@ class AddModelApplication(npyscreen.NPSAppManaged): scan_directory=None, autoscan_on_startup=None, import_model_paths=None, - convert_to_diffusers=None, ) def onStart(self): @@ -393,7 +370,6 @@ def process_and_execute(opt: Namespace, selections: Namespace): directory_to_scan = selections.scan_directory scan_at_startup = selections.autoscan_on_startup potential_models_to_install = selections.import_model_paths - convert_to_diffusers = selections.convert_to_diffusers install_requested_models( install_initial_models=models_to_install, @@ -401,7 +377,6 @@ def process_and_execute(opt: Namespace, selections: Namespace): scan_directory=Path(directory_to_scan) if directory_to_scan else None, external_models=potential_models_to_install, scan_at_startup=scan_at_startup, - convert_to_diffusers=convert_to_diffusers, precision="float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())), From 7268131f576c639ce556d04afa28d674d53f81ed Mon Sep 17 00:00:00 2001 From: Thomas Date: Thu, 6 Apr 2023 08:14:11 -0400 Subject: [PATCH 2/5] change where !replay looks for its infile !fetch puts its output file into the output directory; it may be beneficial to have !replay look in the output directory as well. --- invokeai/frontend/CLI/CLI.py | 1 + 1 file changed, 1 insertion(+) diff --git a/invokeai/frontend/CLI/CLI.py b/invokeai/frontend/CLI/CLI.py index 22e1bbd49d..cebfe54391 100644 --- a/invokeai/frontend/CLI/CLI.py +++ b/invokeai/frontend/CLI/CLI.py @@ -581,6 +581,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple: elif command.startswith("!replay"): file_path = command.replace("!replay", "", 1).strip() + file_path = os.path.join(opt.outdir, file_path) if infile is None and os.path.isfile(file_path): infile = open(file_path, "r", encoding="utf-8") completer.add_history(command) From 4c339dd4b044be65a3aaf5f949d2409d774d9858 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 6 Apr 2023 17:08:23 -0400 Subject: [PATCH 3/5] refactor get_submodels() into individual methods --- invokeai/backend/__init__.py | 2 +- invokeai/backend/model_management/__init__.py | 2 +- .../backend/model_management/model_manager.py | 55 ++++++++++++++++++- 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index dd126a322d..06066dd6b1 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -10,7 +10,7 @@ from .generator import ( Img2Img, Inpaint ) -from .model_management import ModelManager, SDModelComponent +from .model_management import ModelManager from .safety_checker import SafetyChecker from .args import Args from .globals import Globals diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py index 4a82da849a..1d290050d4 100644 --- a/invokeai/backend/model_management/__init__.py +++ b/invokeai/backend/model_management/__init__.py @@ -5,6 +5,6 @@ from .convert_ckpt_to_diffusers import ( convert_ckpt_to_diffusers, load_pipeline_from_original_stable_diffusion_ckpt, ) -from .model_manager import ModelManager,SDModelComponent +from .model_manager import ModelManager diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index c76be93e8f..a51a2fec22 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -27,6 +27,7 @@ import transformers from diffusers import ( AutoencoderKL, UNet2DConditionModel, + SchedulerMixin, logging as dlogging, ) from huggingface_hub import scan_cache_dir @@ -169,7 +170,55 @@ class ModelManager(object): "hash": hash, } - def get_sub_model( + def get_model_vae(self, model_name: str=None)->AutoencoderKL: + """Given a model name identified in models.yaml, load the model into + GPU if necessary and return its assigned VAE as an + AutoencoderKL object. If no model name is provided, return the + vae from the model currently in the GPU. + """ + return self._get_sub_model(model_name, SDModelComponent.vae) + + def get_model_tokenizer(self, model_name: str=None)->CLIPTokenizer: + """Given a model name identified in models.yaml, load the model into + GPU if necessary and return its assigned CLIPTokenizer. If no + model name is provided, return the tokenizer from the model + currently in the GPU. + """ + return self._get_sub_model(model_name, SDModelComponent.tokenizer) + + def get_model_unet(self, model_name: str=None)->UNet2DConditionModel: + """Given a model name identified in models.yaml, load the model into + GPU if necessary and return its assigned UNet2DConditionModel. If no model + name is provided, return the UNet from the model + currently in the GPU. + """ + return self._get_sub_model(model_name, SDModelComponent.unet) + + def get_model_text_encoder(self, model_name: str=None)->CLIPTextModel: + """Given a model name identified in models.yaml, load the model into + GPU if necessary and return its assigned CLIPTextModel. If no + model name is provided, return the text encoder from the model + currently in the GPU. + """ + return self._get_sub_model(model_name, SDModelComponent.text_encoder) + + def get_model_feature_extractor(self, model_name: str=None)->CLIPFeatureExtractor: + """Given a model name identified in models.yaml, load the model into + GPU if necessary and return its assigned CLIPFeatureExtractor. If no + model name is provided, return the text encoder from the model + currently in the GPU. + """ + return self._get_sub_model(model_name, SDModelComponent.feature_extractor) + + def get_model_scheduler(self, model_name: str=None)->SchedulerMixin: + """Given a model name identified in models.yaml, load the model into + GPU if necessary and return its assigned scheduler. If no + model name is provided, return the text encoder from the model + currently in the GPU. + """ + return self._get_sub_model(model_name, SDModelComponent.scheduler) + + def _get_sub_model( self, model_name: str=None, model_part: SDModelComponent=SDModelComponent.vae, @@ -181,7 +230,7 @@ class ModelManager(object): CLIPTextModel, StableDiffusionSafetyChecker, ]: - """Given a model named identified in models.yaml, and the part of the + """Given a model name identified in models.yaml, and the part of the model you wish to retrieve, return that part. Parts are in an Enum class named SDModelComponent, and consist of: SDModelComponent.vae @@ -190,7 +239,7 @@ class ModelManager(object): SDModelComponent.unet SDModelComponent.scheduler SDModelComponent.safety_checker - SDModelComponent.feature_etractor + SDModelComponent.feature_extractor """ model_dict = self.get_model(model_name) model = model_dict["model"] From 50f5e1bc832d805e965c4a5dca16f4acd14ffcc5 Mon Sep 17 00:00:00 2001 From: Steven Frank Date: Thu, 6 Apr 2023 16:47:57 -0700 Subject: [PATCH 4/5] Fix typo 'hotdot' to 'hotdog'; the world's least important PR :) --- docs/features/PROMPTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/features/PROMPTS.md b/docs/features/PROMPTS.md index 85919a5b29..045e0d658a 100644 --- a/docs/features/PROMPTS.md +++ b/docs/features/PROMPTS.md @@ -268,7 +268,7 @@ model is so good at inpainting, a good substitute is to use the `clipseg` text masking option: ```bash -invoke> a fluffy cat eating a hotdot +invoke> a fluffy cat eating a hotdog Outputs: [1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat From e356f2511b77bb91e980eb23d0c6b2f78f0e8934 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 7 Apr 2023 14:36:46 +1000 Subject: [PATCH 5/5] chore: configure stale bot --- .github/stale.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/stale.yaml diff --git a/.github/stale.yaml b/.github/stale.yaml new file mode 100644 index 0000000000..b9150235fc --- /dev/null +++ b/.github/stale.yaml @@ -0,0 +1,19 @@ +# Number of days of inactivity before an issue becomes stale +daysUntilStale: 28 +# Number of days of inactivity before a stale issue is closed +daysUntilClose: 14 +# Issues with these labels will never be considered stale +exemptLabels: + - pinned + - security +# Label to use when marking an issue as stale +staleLabel: stale +# Comment to post when marking an issue as stale. Set to `false` to disable +markComment: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Please + update the ticket if this is still a problem on the latest release. +# Comment to post when closing a stale issue. Set to `false` to disable +closeComment: > + Due to inactivity, this issue has been automatically closed. If this is + still a problem on the latest release, please recreate the issue.