mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into lstein-improve-ti-frontend
This commit is contained in:
commit
ab675af264
1
.github/workflows/test-invoke-pip.yml
vendored
1
.github/workflows/test-invoke-pip.yml
vendored
@ -119,6 +119,7 @@ jobs:
|
|||||||
run: >
|
run: >
|
||||||
configure_invokeai.py
|
configure_invokeai.py
|
||||||
--yes
|
--yes
|
||||||
|
--default_only
|
||||||
--full-precision # can't use fp16 weights without a GPU
|
--full-precision # can't use fp16 weights without a GPU
|
||||||
|
|
||||||
- name: Run the tests
|
- name: Run the tests
|
||||||
|
@ -1,18 +1,32 @@
|
|||||||
|
stable-diffusion-2.1-768:
|
||||||
|
description: Stable Diffusion version 2.1 diffusers model, trained on 768x768 images (5.21 GB)
|
||||||
|
repo_id: stabilityai/stable-diffusion-2-1
|
||||||
|
format: diffusers
|
||||||
|
recommended: True
|
||||||
|
stable-diffusion-2.1-base:
|
||||||
|
description: Stable Diffusion version 2.1 diffusers base model, trained on 512x512 images (5.21 GB)
|
||||||
|
repo_id: stabilityai/stable-diffusion-2-1-base
|
||||||
|
format: diffusers
|
||||||
|
recommended: False
|
||||||
stable-diffusion-1.5:
|
stable-diffusion-1.5:
|
||||||
description: Stable Diffusion version 1.5 weight file (4.27 GB)
|
description: Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||||
repo_id: runwayml/stable-diffusion-v1-5
|
repo_id: runwayml/stable-diffusion-v1-5
|
||||||
format: diffusers
|
format: diffusers
|
||||||
recommended: True
|
recommended: True
|
||||||
|
default: True
|
||||||
vae:
|
vae:
|
||||||
repo_id: stabilityai/sd-vae-ft-mse
|
repo_id: stabilityai/sd-vae-ft-mse
|
||||||
default: True
|
stable-diffusion-1.4:
|
||||||
stable-diffusion-2.1:
|
description: The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||||
description: Stable Diffusion version 2.1 diffusers model (5.21 GB)
|
repo_id: CompVis/stable-diffusion-v1-4
|
||||||
repo_id: stabilityai/stable-diffusion-2-1
|
recommended: False
|
||||||
format: diffusers
|
format: diffusers
|
||||||
recommended: True
|
vae:
|
||||||
|
repo_id: stabilityai/sd-vae-ft-mse
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
inpainting-1.5:
|
inpainting-1.5:
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
|
description: RunwayML SD 1.5 model optimized for inpainting (ckpt version) (4.27 GB)
|
||||||
repo_id: runwayml/stable-diffusion-inpainting
|
repo_id: runwayml/stable-diffusion-inpainting
|
||||||
config: v1-inpainting-inference.yaml
|
config: v1-inpainting-inference.yaml
|
||||||
file: sd-v1-5-inpainting.ckpt
|
file: sd-v1-5-inpainting.ckpt
|
||||||
@ -23,19 +37,13 @@ inpainting-1.5:
|
|||||||
recommended: True
|
recommended: True
|
||||||
width: 512
|
width: 512
|
||||||
height: 512
|
height: 512
|
||||||
stable-diffusion-1.4:
|
|
||||||
description: The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
|
||||||
repo_id: CompVis/stable-diffusion-v1-4
|
|
||||||
recommended: False
|
|
||||||
format: diffusers
|
|
||||||
vae:
|
|
||||||
repo_id: stabilityai/sd-vae-ft-mse
|
|
||||||
waifu-diffusion-1.4:
|
waifu-diffusion-1.4:
|
||||||
description: Waifu diffusion 1.4
|
description: Latest waifu diffusion 1.4 (diffusers version)
|
||||||
format: diffusers
|
format: diffusers
|
||||||
repo_id: hakurei/waifu-diffusion
|
repo_id: hakurei/waifu-diffusion
|
||||||
|
recommended: True
|
||||||
waifu-diffusion-1.3:
|
waifu-diffusion-1.3:
|
||||||
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
description: Stable Diffusion 1.4 fine tuned on anime-styled images (ckpt version) (4.27 GB)
|
||||||
repo_id: hakurei/waifu-diffusion-v1-3
|
repo_id: hakurei/waifu-diffusion-v1-3
|
||||||
config: v1-inference.yaml
|
config: v1-inference.yaml
|
||||||
file: model-epoch09-float32.ckpt
|
file: model-epoch09-float32.ckpt
|
||||||
@ -53,8 +61,8 @@ trinart-2.0:
|
|||||||
recommended: False
|
recommended: False
|
||||||
vae:
|
vae:
|
||||||
repo_id: stabilityai/sd-vae-ft-mse
|
repo_id: stabilityai/sd-vae-ft-mse
|
||||||
trinart_characters-2.0:
|
trinart_characters-2_0:
|
||||||
description: An SD model finetuned with 19.2M anime/manga style images (4.27 GB)
|
description: An SD model finetuned with 19.2M anime/manga style images (ckpt version) (4.27 GB)
|
||||||
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
|
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
|
||||||
config: v1-inference.yaml
|
config: v1-inference.yaml
|
||||||
file: derrida_final.ckpt
|
file: derrida_final.ckpt
|
||||||
@ -65,6 +73,11 @@ trinart_characters-2.0:
|
|||||||
recommended: False
|
recommended: False
|
||||||
width: 512
|
width: 512
|
||||||
height: 512
|
height: 512
|
||||||
|
anything-4.0:
|
||||||
|
description: High-quality, highly detailed anime style images with just a few prompts
|
||||||
|
format: diffusers
|
||||||
|
repo_id: andite/anything-v4.0
|
||||||
|
recommended: False
|
||||||
papercut-1.0:
|
papercut-1.0:
|
||||||
description: SD 1.5 fine-tuned for papercut art (use "PaperCut" in your prompts) (2.13 GB)
|
description: SD 1.5 fine-tuned for papercut art (use "PaperCut" in your prompts) (2.13 GB)
|
||||||
repo_id: Fictiverse/Stable_Diffusion_PaperCut_Model
|
repo_id: Fictiverse/Stable_Diffusion_PaperCut_Model
|
||||||
@ -72,8 +85,6 @@ papercut-1.0:
|
|||||||
vae:
|
vae:
|
||||||
repo_id: stabilityai/sd-vae-ft-mse
|
repo_id: stabilityai/sd-vae-ft-mse
|
||||||
recommended: False
|
recommended: False
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
voxel_art-1.0:
|
voxel_art-1.0:
|
||||||
description: Stable Diffusion trained on voxel art (use "VoxelArt" in your prompts) (4.27 GB)
|
description: Stable Diffusion trained on voxel art (use "VoxelArt" in your prompts) (4.27 GB)
|
||||||
repo_id: Fictiverse/Stable_Diffusion_VoxelArt_Model
|
repo_id: Fictiverse/Stable_Diffusion_VoxelArt_Model
|
||||||
|
@ -52,7 +52,7 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
|||||||
find python, then open the Python installer again and choose
|
find python, then open the Python installer again and choose
|
||||||
"Modify" existing installation.
|
"Modify" existing installation.
|
||||||
|
|
||||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||||
|
|
||||||
=== "Mac users"
|
=== "Mac users"
|
||||||
|
|
||||||
|
@ -12,17 +12,18 @@ title: Installing Manually
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
You have two choices for manual installation, the [first
|
You have two choices for manual installation.
|
||||||
one](#PIP_method) uses basic Python virtual environment (`venv`)
|
The [first one](#pip-Install) uses basic Python virtual environment (`venv`)
|
||||||
commands and the PIP package manager. The [second one](#Conda_method)
|
command and `pip` package manager.
|
||||||
based on the Anaconda3 package manager (`conda`). Both methods require
|
The [second one](#Conda-method) uses Anaconda3 package manager (`conda`).
|
||||||
you to enter commands on the terminal, also known as the "console".
|
Both methods require you to enter commands on the terminal, also known as the
|
||||||
|
"console".
|
||||||
|
|
||||||
Note that the conda install method is currently deprecated and will not
|
Note that the `conda` installation method is currently deprecated and will
|
||||||
be supported at some point in the future.
|
not be supported at some point in the future.
|
||||||
|
|
||||||
On Windows systems you are encouraged to install and use the
|
On Windows systems, you are encouraged to install and use the
|
||||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||||
which provides compatibility with Linux and Mac shells and nice
|
which provides compatibility with Linux and Mac shells and nice
|
||||||
features such as command-line completion.
|
features such as command-line completion.
|
||||||
|
|
||||||
@ -52,15 +53,15 @@ manager, please follow these steps:
|
|||||||
environment named `invokeai`:
|
environment named `invokeai`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -mvenv invokeai
|
python -m venv invokeai
|
||||||
source invokeai/bin/activate
|
source invokeai/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Make sure that pip is installed in your virtual environment an up to date:
|
4. Make sure that pip is installed in your virtual environment an up to date:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -mensurepip --upgrade
|
python -m ensurepip --upgrade
|
||||||
python -mpip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Pick the correct `requirements*.txt` file for your hardware and operating
|
5. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||||
@ -199,24 +200,24 @@ manager, please follow these steps:
|
|||||||
|
|
||||||
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
||||||
|
|
||||||
9. Render away!
|
9. Render away!
|
||||||
|
|
||||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||||
can do with InvokeAI.
|
can do with InvokeAI.
|
||||||
|
|
||||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||||
card with the ROCm driver, you may have to wait for over a minute the first
|
card with the ROCm driver, you may have to wait for over a minute the first
|
||||||
time you try to generate an image. Fortunately, after the warm up period
|
time you try to generate an image. Fortunately, after the warm-up period
|
||||||
rendering will be fast.
|
rendering will be fast.
|
||||||
|
|
||||||
10. Subsequently, to relaunch the script, be sure to run "conda activate
|
10. Subsequently, to relaunch the script, be sure to enter `InvokeAI` directory,
|
||||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
activate the virtual environment, and then launch `invoke.py` script.
|
||||||
script. If you forget to activate the 'invokeai' environment, the script
|
If you forget to activate the virtual environment,
|
||||||
will fail with multiple `ModuleNotFound` errors.
|
the script will fail with multiple `ModuleNotFound` errors.
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
# Get hardware-appropriate torch/torchvision
|
# Get hardware-appropriate torch/torchvision
|
||||||
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
--extra-index-url https://download.pytorch.org/whl/rocm5.2 --trusted-host https://download.pytorch.org
|
||||||
torch>=1.13.1
|
torch>=1.13.1
|
||||||
torchvision>=0.14.1
|
torchvision>=0.14.1
|
||||||
-e .
|
-e .
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
--trusted-host https://download.pytorch.org
|
||||||
-r environments-and-requirements/requirements-base.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
torch>=1.13.1
|
torch>=1.13.1
|
||||||
torchvision>=0.14.1
|
torchvision>=0.14.1
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
# Get hardware-appropriate torch/torchvision
|
# Get hardware-appropriate torch/torchvision
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
--extra-index-url https://download.pytorch.org/whl/cu117 --trusted-host https://download.pytorch.org
|
||||||
torch==1.13.1
|
torch==1.13.1
|
||||||
torchvision==0.14.1
|
torchvision==0.14.1
|
||||||
-e .
|
-e .
|
||||||
|
@ -29,7 +29,7 @@ from ldm.invoke.concepts_lib import HuggingFaceConceptsLibrary
|
|||||||
from ldm.invoke.conditioning import get_uc_and_c_and_ec
|
from ldm.invoke.conditioning import get_uc_and_c_and_ec
|
||||||
from ldm.invoke.devices import choose_torch_device, choose_precision
|
from ldm.invoke.devices import choose_torch_device, choose_precision
|
||||||
from ldm.invoke.generator.inpaint import infill_methods
|
from ldm.invoke.generator.inpaint import infill_methods
|
||||||
from ldm.invoke.globals import global_cache_dir
|
from ldm.invoke.globals import global_cache_dir, Globals
|
||||||
from ldm.invoke.image_util import InitImageResizer
|
from ldm.invoke.image_util import InitImageResizer
|
||||||
from ldm.invoke.model_manager import ModelManager
|
from ldm.invoke.model_manager import ModelManager
|
||||||
from ldm.invoke.pngwriter import PngWriter
|
from ldm.invoke.pngwriter import PngWriter
|
||||||
@ -201,6 +201,7 @@ class Generate:
|
|||||||
self.precision = 'float32'
|
self.precision = 'float32'
|
||||||
if self.precision == 'auto':
|
if self.precision == 'auto':
|
||||||
self.precision = choose_precision(self.device)
|
self.precision = choose_precision(self.device)
|
||||||
|
Globals.full_precision = self.precision=='float32'
|
||||||
|
|
||||||
# model caching system for fast switching
|
# model caching system for fast switching
|
||||||
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
|
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
|
||||||
|
@ -613,8 +613,6 @@ def import_diffuser_model(path_or_repo:str, gen, opt, completer)->str:
|
|||||||
description = model_description):
|
description = model_description):
|
||||||
print('** model failed to import')
|
print('** model failed to import')
|
||||||
return None
|
return None
|
||||||
if input('Make this the default model? [n] ').startswith(('y','Y')):
|
|
||||||
manager.set_default_model(model_name)
|
|
||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
def import_ckpt_model(path_or_url:str, gen, opt, completer)->str:
|
def import_ckpt_model(path_or_url:str, gen, opt, completer)->str:
|
||||||
@ -647,8 +645,6 @@ def import_ckpt_model(path_or_url:str, gen, opt, completer)->str:
|
|||||||
print('** model failed to import')
|
print('** model failed to import')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if input('Make this the default model? [n] ').startswith(('y','Y')):
|
|
||||||
manager.set_model_default(model_name)
|
|
||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
def _verify_load(model_name:str, gen)->bool:
|
def _verify_load(model_name:str, gen)->bool:
|
||||||
@ -726,6 +722,9 @@ def del_config(model_name:str, gen, opt, completer):
|
|||||||
if model_name == current_model:
|
if model_name == current_model:
|
||||||
print("** Can't delete active model. !switch to another model first. **")
|
print("** Can't delete active model. !switch to another model first. **")
|
||||||
return
|
return
|
||||||
|
if model_name not in gen.model_manager.config:
|
||||||
|
print(f"** Unknown model {model_name}")
|
||||||
|
return
|
||||||
gen.model_manager.del_model(model_name)
|
gen.model_manager.del_model(model_name)
|
||||||
gen.model_manager.commit(opt.conf)
|
gen.model_manager.commit(opt.conf)
|
||||||
print(f'** {model_name} deleted')
|
print(f'** {model_name} deleted')
|
||||||
|
@ -335,4 +335,5 @@ class CkptGenerator():
|
|||||||
os.makedirs(dirname, exist_ok=True)
|
os.makedirs(dirname, exist_ok=True)
|
||||||
image.save(filepath,'PNG')
|
image.save(filepath,'PNG')
|
||||||
|
|
||||||
|
def torch_dtype(self)->torch.dtype:
|
||||||
|
return torch.float16 if self.precision == 'float16' else torch.float32
|
||||||
|
@ -72,16 +72,18 @@ class CkptTxt2Img(CkptGenerator):
|
|||||||
device = self.model.device
|
device = self.model.device
|
||||||
if self.use_mps_noise or device.type == 'mps':
|
if self.use_mps_noise or device.type == 'mps':
|
||||||
x = torch.randn([1,
|
x = torch.randn([1,
|
||||||
self.latent_channels,
|
self.latent_channels,
|
||||||
height // self.downsampling_factor,
|
height // self.downsampling_factor,
|
||||||
width // self.downsampling_factor],
|
width // self.downsampling_factor],
|
||||||
device='cpu').to(device)
|
dtype=self.torch_dtype(),
|
||||||
|
device='cpu').to(device)
|
||||||
else:
|
else:
|
||||||
x = torch.randn([1,
|
x = torch.randn([1,
|
||||||
self.latent_channels,
|
self.latent_channels,
|
||||||
height // self.downsampling_factor,
|
height // self.downsampling_factor,
|
||||||
width // self.downsampling_factor],
|
width // self.downsampling_factor],
|
||||||
device=device)
|
dtype=self.torch_dtype(),
|
||||||
|
device=device)
|
||||||
if self.perlin > 0.0:
|
if self.perlin > 0.0:
|
||||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||||
return x
|
return x
|
||||||
|
@ -21,10 +21,19 @@ def choose_precision(device) -> str:
|
|||||||
return 'float16'
|
return 'float16'
|
||||||
return 'float32'
|
return 'float32'
|
||||||
|
|
||||||
|
def torch_dtype(device) -> torch.dtype:
|
||||||
|
if Globals.full_precision:
|
||||||
|
return torch.float32
|
||||||
|
if choose_precision(device) == 'float16':
|
||||||
|
return torch.float16
|
||||||
|
else:
|
||||||
|
return torch.float32
|
||||||
|
|
||||||
def choose_autocast(precision):
|
def choose_autocast(precision):
|
||||||
'''Returns an autocast context or nullcontext for the given precision string'''
|
'''Returns an autocast context or nullcontext for the given precision string'''
|
||||||
# float16 currently requires autocast to avoid errors like:
|
# float16 currently requires autocast to avoid errors like:
|
||||||
# 'expected scalar type Half but found Float'
|
# 'expected scalar type Half but found Float'
|
||||||
|
print(f'DEBUG: choose_autocast() called')
|
||||||
if precision == 'autocast' or precision == 'float16':
|
if precision == 'autocast' or precision == 'float16':
|
||||||
return autocast
|
return autocast
|
||||||
return nullcontext
|
return nullcontext
|
||||||
|
@ -8,6 +8,7 @@ import os
|
|||||||
import os.path as osp
|
import os.path as osp
|
||||||
import random
|
import random
|
||||||
import traceback
|
import traceback
|
||||||
|
from contextlib import nullcontext
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -18,8 +19,6 @@ from einops import rearrange
|
|||||||
from pytorch_lightning import seed_everything
|
from pytorch_lightning import seed_everything
|
||||||
from tqdm import trange
|
from tqdm import trange
|
||||||
|
|
||||||
from ldm.invoke.devices import choose_autocast
|
|
||||||
from ldm.models.diffusion.cross_attention_map_saving import AttentionMapSaver
|
|
||||||
from ldm.models.diffusion.ddpm import DiffusionWrapper
|
from ldm.models.diffusion.ddpm import DiffusionWrapper
|
||||||
from ldm.util import rand_perlin_2d
|
from ldm.util import rand_perlin_2d
|
||||||
|
|
||||||
@ -64,7 +63,7 @@ class Generator:
|
|||||||
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
||||||
safety_checker:dict=None,
|
safety_checker:dict=None,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
scope = choose_autocast(self.precision)
|
scope = nullcontext
|
||||||
self.safety_checker = safety_checker
|
self.safety_checker = safety_checker
|
||||||
attention_maps_images = []
|
attention_maps_images = []
|
||||||
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
||||||
@ -236,7 +235,8 @@ class Generator:
|
|||||||
|
|
||||||
def get_perlin_noise(self,width,height):
|
def get_perlin_noise(self,width,height):
|
||||||
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
fixdevice = 'cpu' if (self.model.device.type == 'mps') else self.model.device
|
||||||
return torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
noise = torch.stack([rand_perlin_2d((height, width), (8, 8), device = self.model.device).to(fixdevice) for _ in range(self.latent_channels)], dim=0).to(self.model.device)
|
||||||
|
return noise
|
||||||
|
|
||||||
def new_seed(self):
|
def new_seed(self):
|
||||||
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||||
@ -341,3 +341,6 @@ class Generator:
|
|||||||
image.save(filepath,'PNG')
|
image.save(filepath,'PNG')
|
||||||
|
|
||||||
|
|
||||||
|
def torch_dtype(self)->torch.dtype:
|
||||||
|
return torch.float16 if self.precision == 'float16' else torch.float32
|
||||||
|
|
||||||
|
@ -391,7 +391,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
for i, t in enumerate(self.progress_bar(timesteps)):
|
for i, t in enumerate(self.progress_bar(timesteps)):
|
||||||
batched_t.fill_(t)
|
batched_t.fill_(t)
|
||||||
step_output = self.step(batched_t, latents, conditioning_data,
|
step_output = self.step(batched_t, latents, conditioning_data,
|
||||||
i, additional_guidance=additional_guidance)
|
step_index=i,
|
||||||
|
total_step_count=len(timesteps),
|
||||||
|
additional_guidance=additional_guidance)
|
||||||
latents = step_output.prev_sample
|
latents = step_output.prev_sample
|
||||||
predicted_original = getattr(step_output, 'pred_original_sample', None)
|
predicted_original = getattr(step_output, 'pred_original_sample', None)
|
||||||
|
|
||||||
@ -410,7 +412,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
def step(self, t: torch.Tensor, latents: torch.Tensor,
|
def step(self, t: torch.Tensor, latents: torch.Tensor,
|
||||||
conditioning_data: ConditioningData,
|
conditioning_data: ConditioningData,
|
||||||
step_index:int | None = None, additional_guidance: List[Callable] = None):
|
step_index:int, total_step_count:int,
|
||||||
|
additional_guidance: List[Callable] = None):
|
||||||
# invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value
|
# invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value
|
||||||
timestep = t[0]
|
timestep = t[0]
|
||||||
|
|
||||||
@ -427,6 +430,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
conditioning_data.unconditioned_embeddings, conditioning_data.text_embeddings,
|
conditioning_data.unconditioned_embeddings, conditioning_data.text_embeddings,
|
||||||
conditioning_data.guidance_scale,
|
conditioning_data.guidance_scale,
|
||||||
step_index=step_index,
|
step_index=step_index,
|
||||||
|
total_step_count=total_step_count,
|
||||||
threshold=conditioning_data.threshold
|
threshold=conditioning_data.threshold
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,10 +36,9 @@ class Txt2Img(Generator):
|
|||||||
threshold = ThresholdSettings(threshold, warmup=0.2) if threshold else None)
|
threshold = ThresholdSettings(threshold, warmup=0.2) if threshold else None)
|
||||||
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
|
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
|
||||||
|
|
||||||
|
|
||||||
def make_image(x_T) -> PIL.Image.Image:
|
def make_image(x_T) -> PIL.Image.Image:
|
||||||
pipeline_output = pipeline.image_from_embeddings(
|
pipeline_output = pipeline.image_from_embeddings(
|
||||||
latents=torch.zeros_like(x_T),
|
latents=torch.zeros_like(x_T,dtype=self.torch_dtype()),
|
||||||
noise=x_T,
|
noise=x_T,
|
||||||
num_inference_steps=steps,
|
num_inference_steps=steps,
|
||||||
conditioning_data=conditioning_data,
|
conditioning_data=conditioning_data,
|
||||||
@ -59,16 +58,18 @@ class Txt2Img(Generator):
|
|||||||
input_channels = min(self.latent_channels, 4)
|
input_channels = min(self.latent_channels, 4)
|
||||||
if self.use_mps_noise or device.type == 'mps':
|
if self.use_mps_noise or device.type == 'mps':
|
||||||
x = torch.randn([1,
|
x = torch.randn([1,
|
||||||
input_channels,
|
input_channels,
|
||||||
height // self.downsampling_factor,
|
height // self.downsampling_factor,
|
||||||
width // self.downsampling_factor],
|
width // self.downsampling_factor],
|
||||||
device='cpu').to(device)
|
dtype=self.torch_dtype(),
|
||||||
|
device='cpu').to(device)
|
||||||
else:
|
else:
|
||||||
x = torch.randn([1,
|
x = torch.randn([1,
|
||||||
input_channels,
|
input_channels,
|
||||||
height // self.downsampling_factor,
|
height // self.downsampling_factor,
|
||||||
width // self.downsampling_factor],
|
width // self.downsampling_factor],
|
||||||
device=device)
|
dtype=self.torch_dtype(),
|
||||||
|
device=device)
|
||||||
if self.perlin > 0.0:
|
if self.perlin > 0.0:
|
||||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||||
return x
|
return x
|
||||||
|
@ -90,9 +90,9 @@ class Txt2Img2Img(Generator):
|
|||||||
def get_noise_like(self, like: torch.Tensor):
|
def get_noise_like(self, like: torch.Tensor):
|
||||||
device = like.device
|
device = like.device
|
||||||
if device.type == 'mps':
|
if device.type == 'mps':
|
||||||
x = torch.randn_like(like, device='cpu').to(device)
|
x = torch.randn_like(like, device='cpu', dtype=self.torch_dtype()).to(device)
|
||||||
else:
|
else:
|
||||||
x = torch.randn_like(like, device=device)
|
x = torch.randn_like(like, device=device, dtype=self.torch_dtype())
|
||||||
if self.perlin > 0.0:
|
if self.perlin > 0.0:
|
||||||
shape = like.shape
|
shape = like.shape
|
||||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
|
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(shape[3], shape[2])
|
||||||
@ -117,10 +117,12 @@ class Txt2Img2Img(Generator):
|
|||||||
self.latent_channels,
|
self.latent_channels,
|
||||||
scaled_height // self.downsampling_factor,
|
scaled_height // self.downsampling_factor,
|
||||||
scaled_width // self.downsampling_factor],
|
scaled_width // self.downsampling_factor],
|
||||||
device='cpu').to(device)
|
dtype=self.torch_dtype(),
|
||||||
|
device='cpu').to(device)
|
||||||
else:
|
else:
|
||||||
return torch.randn([1,
|
return torch.randn([1,
|
||||||
self.latent_channels,
|
self.latent_channels,
|
||||||
scaled_height // self.downsampling_factor,
|
scaled_height // self.downsampling_factor,
|
||||||
scaled_width // self.downsampling_factor],
|
scaled_width // self.downsampling_factor],
|
||||||
device=device)
|
dtype=self.torch_dtype(),
|
||||||
|
device=device)
|
||||||
|
@ -43,6 +43,9 @@ Globals.always_use_cpu = False
|
|||||||
# The CLI will test connectivity at startup time.
|
# The CLI will test connectivity at startup time.
|
||||||
Globals.internet_available = True
|
Globals.internet_available = True
|
||||||
|
|
||||||
|
# whether we are forcing full precision
|
||||||
|
Globals.full_precision = False
|
||||||
|
|
||||||
def global_config_dir()->Path:
|
def global_config_dir()->Path:
|
||||||
return Path(Globals.root, Globals.config_dir)
|
return Path(Globals.root, Globals.config_dir)
|
||||||
|
|
||||||
@ -63,7 +66,17 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path:
|
|||||||
global_cache_dir('diffusers')
|
global_cache_dir('diffusers')
|
||||||
global_cache_dir('transformers')
|
global_cache_dir('transformers')
|
||||||
'''
|
'''
|
||||||
if (home := os.environ.get('HF_HOME')):
|
home: str = os.getenv('HF_HOME')
|
||||||
|
|
||||||
|
if home is None:
|
||||||
|
home = os.getenv('XDG_CACHE_HOME')
|
||||||
|
|
||||||
|
if home is not None:
|
||||||
|
# Set `home` to $XDG_CACHE_HOME/huggingface, which is the default location mentioned in HuggingFace Hub Client Library.
|
||||||
|
# See: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome
|
||||||
|
home += os.sep + 'huggingface'
|
||||||
|
|
||||||
|
if home is not None:
|
||||||
return Path(home,subdir)
|
return Path(home,subdir)
|
||||||
else:
|
else:
|
||||||
return Path(Globals.root,'models',subdir)
|
return Path(Globals.root,'models',subdir)
|
||||||
|
@ -230,6 +230,9 @@ class ModelManager(object):
|
|||||||
Delete the named model.
|
Delete the named model.
|
||||||
'''
|
'''
|
||||||
omega = self.config
|
omega = self.config
|
||||||
|
if model_name not in omega:
|
||||||
|
print(f'** Unknown model {model_name}')
|
||||||
|
return
|
||||||
del omega[model_name]
|
del omega[model_name]
|
||||||
if model_name in self.stack:
|
if model_name in self.stack:
|
||||||
self.stack.remove(model_name)
|
self.stack.remove(model_name)
|
||||||
@ -253,9 +256,8 @@ class ModelManager(object):
|
|||||||
|
|
||||||
assert (clobber or model_name not in omega), f'attempt to overwrite existing model definition "{model_name}"'
|
assert (clobber or model_name not in omega), f'attempt to overwrite existing model definition "{model_name}"'
|
||||||
|
|
||||||
if model_name not in omega:
|
omega[model_name] = model_attributes
|
||||||
omega[model_name] = dict()
|
|
||||||
OmegaConf.update(omega,model_name,model_attributes,merge=False)
|
|
||||||
if 'weights' in omega[model_name]:
|
if 'weights' in omega[model_name]:
|
||||||
omega[model_name]['weights'].replace('\\','/')
|
omega[model_name]['weights'].replace('\\','/')
|
||||||
|
|
||||||
@ -349,7 +351,7 @@ class ModelManager(object):
|
|||||||
|
|
||||||
if self.precision == 'float16':
|
if self.precision == 'float16':
|
||||||
print(' | Using faster float16 precision')
|
print(' | Using faster float16 precision')
|
||||||
model.to(torch.float16)
|
model = model.to(torch.float16)
|
||||||
else:
|
else:
|
||||||
print(' | Using more accurate float32 precision')
|
print(' | Using more accurate float32 precision')
|
||||||
|
|
||||||
@ -753,19 +755,31 @@ class ModelManager(object):
|
|||||||
|
|
||||||
print('** Legacy version <= 2.2.5 model directory layout detected. Reorganizing.')
|
print('** Legacy version <= 2.2.5 model directory layout detected. Reorganizing.')
|
||||||
print('** This is a quick one-time operation.')
|
print('** This is a quick one-time operation.')
|
||||||
from shutil import move
|
from shutil import move, rmtree
|
||||||
|
|
||||||
# transformer files get moved into the hub directory
|
# transformer files get moved into the hub directory
|
||||||
hub = models_dir / 'hub'
|
if cls._is_huggingface_hub_directory_present():
|
||||||
|
hub = global_cache_dir('hub')
|
||||||
|
else:
|
||||||
|
hub = models_dir / 'hub'
|
||||||
|
|
||||||
os.makedirs(hub, exist_ok=True)
|
os.makedirs(hub, exist_ok=True)
|
||||||
for model in legacy_locations:
|
for model in legacy_locations:
|
||||||
source = models_dir /model
|
source = models_dir / model
|
||||||
|
dest = hub / model.stem
|
||||||
|
print(f'** {source} => {dest}')
|
||||||
if source.exists():
|
if source.exists():
|
||||||
print(f'DEBUG: Moving {models_dir / model} into hub')
|
if dest.exists():
|
||||||
move(models_dir / model, hub)
|
rmtree(source)
|
||||||
|
else:
|
||||||
|
move(source, dest)
|
||||||
|
|
||||||
# anything else gets moved into the diffusers directory
|
# anything else gets moved into the diffusers directory
|
||||||
diffusers = models_dir / 'diffusers'
|
if cls._is_huggingface_hub_directory_present():
|
||||||
|
diffusers = global_cache_dir('diffusers')
|
||||||
|
else:
|
||||||
|
diffusers = models_dir / 'diffusers'
|
||||||
|
|
||||||
os.makedirs(diffusers, exist_ok=True)
|
os.makedirs(diffusers, exist_ok=True)
|
||||||
for root, dirs, _ in os.walk(models_dir, topdown=False):
|
for root, dirs, _ in os.walk(models_dir, topdown=False):
|
||||||
for dir in dirs:
|
for dir in dirs:
|
||||||
@ -773,7 +787,12 @@ class ModelManager(object):
|
|||||||
if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers):
|
if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers):
|
||||||
continue
|
continue
|
||||||
if Path(dir).match('models--*--*'):
|
if Path(dir).match('models--*--*'):
|
||||||
move(full_path,diffusers)
|
dest = diffusers / dir
|
||||||
|
print(f'** {full_path} => {dest}')
|
||||||
|
if dest.exists():
|
||||||
|
rmtree(full_path)
|
||||||
|
else:
|
||||||
|
move(full_path,dest)
|
||||||
|
|
||||||
# now clean up by removing any empty directories
|
# now clean up by removing any empty directories
|
||||||
empty = [root for root, dirs, files, in os.walk(models_dir) if not len(dirs) and not len(files)]
|
empty = [root for root, dirs, files, in os.walk(models_dir) if not len(dirs) and not len(files)]
|
||||||
@ -951,3 +970,7 @@ class ModelManager(object):
|
|||||||
print(f'** Could not load VAE {name_or_path}: {str(deferred_error)}')
|
print(f'** Could not load VAE {name_or_path}: {str(deferred_error)}')
|
||||||
|
|
||||||
return vae
|
return vae
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _is_huggingface_hub_directory_present() -> bool:
|
||||||
|
return os.getenv('HF_HOME') is not None or os.getenv('XDG_CACHE_HOME') is not None
|
||||||
|
@ -7,6 +7,7 @@ import torch
|
|||||||
import diffusers
|
import diffusers
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
from diffusers.models.unet_2d_condition import UNet2DConditionModel
|
||||||
|
from ldm.invoke.devices import torch_dtype
|
||||||
|
|
||||||
# adapted from bloc97's CrossAttentionControl colab
|
# adapted from bloc97's CrossAttentionControl colab
|
||||||
# https://github.com/bloc97/CrossAttentionControl
|
# https://github.com/bloc97/CrossAttentionControl
|
||||||
@ -383,7 +384,7 @@ def inject_attention_function(unet, context: Context):
|
|||||||
remapped_saved_attention_slice = torch.index_select(saved_attention_slice, -1, index_map)
|
remapped_saved_attention_slice = torch.index_select(saved_attention_slice, -1, index_map)
|
||||||
this_attention_slice = suggested_attention_slice
|
this_attention_slice = suggested_attention_slice
|
||||||
|
|
||||||
mask = context.cross_attention_mask
|
mask = context.cross_attention_mask.to(torch_dtype(suggested_attention_slice.device))
|
||||||
saved_mask = mask
|
saved_mask = mask
|
||||||
this_mask = 1 - mask
|
this_mask = 1 - mask
|
||||||
attention_slice = remapped_saved_attention_slice * saved_mask + \
|
attention_slice = remapped_saved_attention_slice * saved_mask + \
|
||||||
|
@ -89,6 +89,7 @@ class InvokeAIDiffuserComponent:
|
|||||||
conditioning: Union[torch.Tensor,dict],
|
conditioning: Union[torch.Tensor,dict],
|
||||||
unconditional_guidance_scale: float,
|
unconditional_guidance_scale: float,
|
||||||
step_index: Optional[int]=None,
|
step_index: Optional[int]=None,
|
||||||
|
total_step_count: Optional[int]=None,
|
||||||
threshold: Optional[ThresholdSettings]=None,
|
threshold: Optional[ThresholdSettings]=None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@ -106,7 +107,15 @@ class InvokeAIDiffuserComponent:
|
|||||||
cross_attention_control_types_to_do = []
|
cross_attention_control_types_to_do = []
|
||||||
context: Context = self.cross_attention_control_context
|
context: Context = self.cross_attention_control_context
|
||||||
if self.cross_attention_control_context is not None:
|
if self.cross_attention_control_context is not None:
|
||||||
percent_through = self.estimate_percent_through(step_index, sigma)
|
if step_index is not None and total_step_count is not None:
|
||||||
|
# 🧨diffusers codepath
|
||||||
|
percent_through = step_index / total_step_count # will never reach 1.0 - this is deliberate
|
||||||
|
else:
|
||||||
|
# legacy compvis codepath
|
||||||
|
# TODO remove when compvis codepath support is dropped
|
||||||
|
if step_index is None and sigma is None:
|
||||||
|
raise ValueError(f"Either step_index or sigma is required when doing cross attention control, but both are None.")
|
||||||
|
percent_through = self.estimate_percent_through(step_index, sigma)
|
||||||
cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through)
|
cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through)
|
||||||
|
|
||||||
wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0)
|
wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0)
|
||||||
|
@ -4,7 +4,7 @@ import torch
|
|||||||
from transformers import CLIPTokenizer, CLIPTextModel
|
from transformers import CLIPTokenizer, CLIPTextModel
|
||||||
|
|
||||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||||
|
from ldm.invoke.devices import torch_dtype
|
||||||
|
|
||||||
class WeightedPromptFragmentsToEmbeddingsConverter():
|
class WeightedPromptFragmentsToEmbeddingsConverter():
|
||||||
|
|
||||||
@ -207,7 +207,7 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
|
|||||||
per_token_weights += [1.0] * pad_length
|
per_token_weights += [1.0] * pad_length
|
||||||
|
|
||||||
all_token_ids_tensor = torch.tensor(all_token_ids, dtype=torch.long, device=device)
|
all_token_ids_tensor = torch.tensor(all_token_ids, dtype=torch.long, device=device)
|
||||||
per_token_weights_tensor = torch.tensor(per_token_weights, dtype=torch.float32, device=device)
|
per_token_weights_tensor = torch.tensor(per_token_weights, dtype=torch_dtype(self.text_encoder.device), device=device)
|
||||||
#print(f"assembled all_token_ids_tensor with shape {all_token_ids_tensor.shape}")
|
#print(f"assembled all_token_ids_tensor with shape {all_token_ids_tensor.shape}")
|
||||||
return all_token_ids_tensor, per_token_weights_tensor
|
return all_token_ids_tensor, per_token_weights_tensor
|
||||||
|
|
||||||
|
@ -111,7 +111,6 @@ class TextualInversionManager():
|
|||||||
if ti.trigger_token_id is not None:
|
if ti.trigger_token_id is not None:
|
||||||
raise ValueError(f"Tokens already injected for textual inversion with trigger '{ti.trigger_string}'")
|
raise ValueError(f"Tokens already injected for textual inversion with trigger '{ti.trigger_string}'")
|
||||||
|
|
||||||
print(f'DEBUG: Injecting token {ti.trigger_string}')
|
|
||||||
trigger_token_id = self._get_or_create_token_id_and_assign_embedding(ti.trigger_string, ti.embedding[0])
|
trigger_token_id = self._get_or_create_token_id_and_assign_embedding(ti.trigger_string, ti.embedding[0])
|
||||||
|
|
||||||
if ti.embedding_vector_length > 1:
|
if ti.embedding_vector_length > 1:
|
||||||
|
@ -8,6 +8,7 @@ from threading import Thread
|
|||||||
from urllib import request
|
from urllib import request
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from ldm.invoke.devices import torch_dtype
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
@ -235,7 +236,8 @@ def rand_perlin_2d(shape, res, device, fade = lambda t: 6*t**5 - 15*t**4 + 10*t*
|
|||||||
n01 = dot(tile_grads([0, -1],[1, None]), [0, -1]).to(device)
|
n01 = dot(tile_grads([0, -1],[1, None]), [0, -1]).to(device)
|
||||||
n11 = dot(tile_grads([1, None], [1, None]), [-1,-1]).to(device)
|
n11 = dot(tile_grads([1, None], [1, None]), [-1,-1]).to(device)
|
||||||
t = fade(grid[:shape[0], :shape[1]])
|
t = fade(grid[:shape[0], :shape[1]])
|
||||||
return math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1]).to(device)
|
noise = math.sqrt(2) * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1]).to(device)
|
||||||
|
return noise.to(dtype=torch_dtype(device))
|
||||||
|
|
||||||
def ask_user(question: str, answers: list):
|
def ask_user(question: str, answers: list):
|
||||||
from itertools import chain, repeat
|
from itertools import chain, repeat
|
||||||
|
@ -197,6 +197,14 @@ def recommended_datasets()->dict:
|
|||||||
datasets[ds]=True
|
datasets[ds]=True
|
||||||
return datasets
|
return datasets
|
||||||
|
|
||||||
|
#---------------------------------------------
|
||||||
|
def default_dataset()->dict:
|
||||||
|
datasets = dict()
|
||||||
|
for ds in Datasets.keys():
|
||||||
|
if Datasets[ds].get('default',False):
|
||||||
|
datasets[ds]=True
|
||||||
|
return datasets
|
||||||
|
|
||||||
#---------------------------------------------
|
#---------------------------------------------
|
||||||
def all_datasets()->dict:
|
def all_datasets()->dict:
|
||||||
datasets = dict()
|
datasets = dict()
|
||||||
@ -646,7 +654,7 @@ def download_weights(opt:dict) -> Union[str, None]:
|
|||||||
precision = 'float32' if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
precision = 'float32' if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||||
|
|
||||||
if opt.yes_to_all:
|
if opt.yes_to_all:
|
||||||
models = recommended_datasets()
|
models = default_dataset() if opt.default_only else recommended_datasets()
|
||||||
access_token = authenticate(opt.yes_to_all)
|
access_token = authenticate(opt.yes_to_all)
|
||||||
if len(models)>0:
|
if len(models)>0:
|
||||||
successfully_downloaded = download_weight_datasets(models, access_token, precision=precision)
|
successfully_downloaded = download_weight_datasets(models, access_token, precision=precision)
|
||||||
@ -808,6 +816,9 @@ def main():
|
|||||||
dest='yes_to_all',
|
dest='yes_to_all',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='answer "yes" to all prompts')
|
help='answer "yes" to all prompts')
|
||||||
|
parser.add_argument('--default_only',
|
||||||
|
action='store_true',
|
||||||
|
help='when --yes specified, only install the default model')
|
||||||
parser.add_argument('--config_file',
|
parser.add_argument('--config_file',
|
||||||
'-c',
|
'-c',
|
||||||
dest='config_file',
|
dest='config_file',
|
||||||
|
Loading…
Reference in New Issue
Block a user