mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
merged and rebuilt
This commit is contained in:
commit
be305588d3
.github/workflows
docs/installation
invokeai
app
backend
frontend/web
.eslintrc.js
dist
App-d3a51b95.mjsThemeLocaleProvider-e939cd27.mjs
assets
App-08e5c546.cssApp-b40e839f.jsThemeLocaleProvider-bd116415.jsfavicon-0d253ced.icoindex-5483945c.cssindex-e1f916bd.jsinter-all-100-normal-2596a8cd.woffinter-all-200-normal-34e907e6.woffinter-all-300-normal-e375e256.woffinter-all-400-normal-f824029b.woffinter-all-500-normal-94e08ad8.woffinter-all-600-normal-ba29c057.woffinter-all-700-normal-9d318ccb.woffinter-all-800-normal-ab496fbe.woffinter-all-900-normal-50daf4f1.woffinter-cyrillic-100-normal-9747741a.woff2inter-cyrillic-200-normal-87d2e1ba.woff2inter-cyrillic-300-normal-cff08766.woff2inter-cyrillic-400-normal-e9493683.woff2inter-cyrillic-500-normal-f6bd191e.woff2inter-cyrillic-600-normal-9bc492f5.woff2inter-cyrillic-700-normal-f6c6dcaf.woff2inter-cyrillic-800-normal-82994ee8.woff2inter-cyrillic-900-normal-768011c3.woff2inter-cyrillic-ext-100-normal-a1f4d02d.woff2inter-cyrillic-ext-200-normal-82562199.woff2inter-cyrillic-ext-300-normal-66b2369e.woff2inter-cyrillic-ext-400-normal-f7666a51.woff2inter-cyrillic-ext-500-normal-8b5f6999.woff2inter-cyrillic-ext-600-normal-2ea11f8c.woff2inter-cyrillic-ext-700-normal-b7bb121f.woff2inter-cyrillic-ext-800-normal-f15d8f83.woff2inter-cyrillic-ext-900-normal-b1c13874.woff2inter-greek-100-normal-a44b9fc9.woff2inter-greek-200-normal-9575e0f8.woff2inter-greek-300-normal-d0749e19.woff2inter-greek-400-normal-2f2d421a.woff2inter-greek-500-normal-ddbf6a70.woff2inter-greek-600-normal-4591e350.woff2inter-greek-700-normal-9e078f49.woff2inter-greek-800-normal-fb5de277.woff2inter-greek-900-normal-ffa82654.woff2inter-greek-ext-100-normal-71976b96.woff2inter-greek-ext-200-normal-45dafb12.woff2inter-greek-ext-300-normal-09d21325.woff2inter-greek-ext-400-normal-d3e30cde.woff2inter-greek-ext-500-normal-528b79aa.woff2inter-greek-ext-600-normal-c37a11b3.woff2inter-greek-ext-700-normal-22174f43.woff2inter-greek-ext-800-normal-bddb6f8e.woff2inter-greek-ext-900-normal-bebcb6fc.woff2inter-latin-100-normal-61cac109.woff2inter-latin-200-normal-74885a0c.woff2inter-latin-300-normal-6b2cee46.woff2inter-latin-400-normal-0364d368.woff2inter-latin-500-normal-d5333670.woff2inter-latin-600-normal-048d136d.woff2inter-latin-700-normal-ced2d8e0.woff2inter-latin-800-normal-a51ac27d.woff2inter-latin-900-normal-f2db7f82.woff2inter-latin-ext-100-normal-d3be20b3.woff2inter-latin-ext-200-normal-4336e69d.woff2inter-latin-ext-300-normal-34623012.woff2inter-latin-ext-400-normal-64a98f58.woff2inter-latin-ext-500-normal-4fba9ae6.woff2inter-latin-ext-600-normal-cc23fe6f.woff2inter-latin-ext-700-normal-1cc47d25.woff2inter-latin-ext-800-normal-b6167428.woff2inter-latin-ext-900-normal-3cff82a5.woff2logo-13003d72.pngstoreHooks-548a355c.js
component-6919b100.mjs
1
.github/workflows/close-inactive-issues.yml
vendored
1
.github/workflows/close-inactive-issues.yml
vendored
@ -24,3 +24,4 @@ jobs:
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
operations-per-run: 500
|
||||
|
12
.github/workflows/test-invoke-pip-skip.yml
vendored
12
.github/workflows/test-invoke-pip-skip.yml
vendored
@ -1,12 +1,12 @@
|
||||
name: Test invoke.py pip
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- 'invokeai/frontend/web/dist/**'
|
||||
paths:
|
||||
- '**'
|
||||
- '!pyproject.toml'
|
||||
- '!invokeai/**'
|
||||
- 'invokeai/frontend/web/**'
|
||||
- '!invokeai/frontend/web/dist/**'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
|
6
.github/workflows/test-invoke-pip.yml
vendored
6
.github/workflows/test-invoke-pip.yml
vendored
@ -6,15 +6,13 @@ on:
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'invokeai/frontend/web/dist/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'pyproject.toml'
|
||||
- 'invokeai/**'
|
||||
- 'invokeai/backend/**'
|
||||
- 'invokeai/configs/**'
|
||||
- '!invokeai/frontend/web/**'
|
||||
- 'invokeai/frontend/web/dist/**'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
|
@ -148,7 +148,7 @@ manager, please follow these steps:
|
||||
=== "CUDA (NVidia)"
|
||||
|
||||
```bash
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
|
@ -4,7 +4,8 @@ import os
|
||||
from argparse import Namespace
|
||||
|
||||
from ...backend import Globals
|
||||
from ..services.generate_initializer import get_generate
|
||||
from ..services.model_manager_initializer import get_model_manager
|
||||
from ..services.restoration_services import RestorationServices
|
||||
from ..services.graph import GraphExecutionState
|
||||
from ..services.image_storage import DiskImageStorage
|
||||
from ..services.invocation_queue import MemoryInvocationQueue
|
||||
@ -37,18 +38,16 @@ class ApiDependencies:
|
||||
invoker: Invoker = None
|
||||
|
||||
@staticmethod
|
||||
def initialize(args, config, event_handler_id: int):
|
||||
Globals.try_patchmatch = args.patchmatch
|
||||
Globals.always_use_cpu = args.always_use_cpu
|
||||
Globals.internet_available = args.internet_available and check_internet()
|
||||
Globals.disable_xformers = not args.xformers
|
||||
Globals.ckpt_convert = args.ckpt_convert
|
||||
def initialize(config, event_handler_id: int):
|
||||
Globals.try_patchmatch = config.patchmatch
|
||||
Globals.always_use_cpu = config.always_use_cpu
|
||||
Globals.internet_available = config.internet_available and check_internet()
|
||||
Globals.disable_xformers = not config.xformers
|
||||
Globals.ckpt_convert = config.ckpt_convert
|
||||
|
||||
# TODO: Use a logger
|
||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||
|
||||
generate = get_generate(args, config)
|
||||
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
|
||||
output_folder = os.path.abspath(
|
||||
@ -61,7 +60,7 @@ class ApiDependencies:
|
||||
db_location = os.path.join(output_folder, "invokeai.db")
|
||||
|
||||
services = InvocationServices(
|
||||
generate=generate,
|
||||
model_manager=get_model_manager(config),
|
||||
events=events,
|
||||
images=images,
|
||||
queue=MemoryInvocationQueue(),
|
||||
@ -69,6 +68,7 @@ class ApiDependencies:
|
||||
filename=db_location, table_name="graph_executions"
|
||||
),
|
||||
processor=DefaultInvocationProcessor(),
|
||||
restoration=RestorationServices(config),
|
||||
)
|
||||
|
||||
ApiDependencies.invoker = Invoker(services)
|
||||
|
@ -1,5 +1,4 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import asyncio
|
||||
from inspect import signature
|
||||
|
||||
@ -53,11 +52,11 @@ config = {}
|
||||
# Add startup event to load dependencies
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
args = Args()
|
||||
config = args.parse_args()
|
||||
config = Args()
|
||||
config.parse_args()
|
||||
|
||||
ApiDependencies.initialize(
|
||||
args=args, config=config, event_handler_id=event_handler_id
|
||||
config=config, event_handler_id=event_handler_id
|
||||
)
|
||||
|
||||
|
||||
@ -113,10 +112,8 @@ def custom_openapi():
|
||||
output_type_title = output_type_titles[output_type.__name__]
|
||||
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
|
||||
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||
if "additionalProperties" not in invoker_schema:
|
||||
invoker_schema["additionalProperties"] = {}
|
||||
|
||||
invoker_schema["additionalProperties"]["outputs"] = outputs_ref
|
||||
invoker_schema["output"] = outputs_ref
|
||||
|
||||
app.openapi_schema = openapi_schema
|
||||
return app.openapi_schema
|
||||
|
@ -17,7 +17,8 @@ from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_gra
|
||||
from .invocations import *
|
||||
from .invocations.baseinvocation import BaseInvocation
|
||||
from .services.events import EventServiceBase
|
||||
from .services.generate_initializer import get_generate
|
||||
from .services.model_manager_initializer import get_model_manager
|
||||
from .services.restoration_services import RestorationServices
|
||||
from .services.graph import EdgeConnection, GraphExecutionState
|
||||
from .services.image_storage import DiskImageStorage
|
||||
from .services.invocation_queue import MemoryInvocationQueue
|
||||
@ -126,14 +127,9 @@ def invoke_all(context: CliContext):
|
||||
|
||||
|
||||
def invoke_cli():
|
||||
args = Args()
|
||||
config = args.parse_args()
|
||||
|
||||
generate = get_generate(args, config)
|
||||
|
||||
# NOTE: load model on first use, uncomment to load at startup
|
||||
# TODO: Make this a config option?
|
||||
# generate.load_model()
|
||||
config = Args()
|
||||
config.parse_args()
|
||||
model_manager = get_model_manager(config)
|
||||
|
||||
events = EventServiceBase()
|
||||
|
||||
@ -145,7 +141,7 @@ def invoke_cli():
|
||||
db_location = os.path.join(output_folder, "invokeai.db")
|
||||
|
||||
services = InvocationServices(
|
||||
generate=generate,
|
||||
model_manager=model_manager,
|
||||
events=events,
|
||||
images=DiskImageStorage(output_folder),
|
||||
queue=MemoryInvocationQueue(),
|
||||
@ -153,6 +149,7 @@ def invoke_cli():
|
||||
filename=db_location, table_name="graph_executions"
|
||||
),
|
||||
processor=DefaultInvocationProcessor(),
|
||||
restoration=RestorationServices(config),
|
||||
)
|
||||
|
||||
invoker = Invoker(services)
|
||||
|
@ -12,12 +12,12 @@ from ..services.image_storage import ImageType
|
||||
from ..services.invocation_services import InvocationServices
|
||||
from .baseinvocation import BaseInvocation, InvocationContext
|
||||
from .image import ImageField, ImageOutput
|
||||
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
|
||||
|
||||
SAMPLER_NAME_VALUES = Literal[
|
||||
"ddim", "plms", "k_lms", "k_dpm_2", "k_dpm_2_a", "k_euler", "k_euler_a", "k_heun"
|
||||
tuple(InvokeAIGenerator.schedulers())
|
||||
]
|
||||
|
||||
|
||||
# Text to image
|
||||
class TextToImageInvocation(BaseInvocation):
|
||||
"""Generates an image using text2img."""
|
||||
@ -57,19 +57,18 @@ class TextToImageInvocation(BaseInvocation):
|
||||
# Handle invalid model parameter
|
||||
# TODO: figure out if this can be done via a validator that uses the model_cache
|
||||
# TODO: How to get the default model name now?
|
||||
if self.model is None or self.model == "":
|
||||
self.model = context.services.generate.model_name
|
||||
|
||||
# Set the model (if already cached, this does nothing)
|
||||
context.services.generate.set_model(self.model)
|
||||
|
||||
results = context.services.generate.prompt2image(
|
||||
# (right now uses whatever current model is set in model manager)
|
||||
model= context.services.model_manager.get_model()
|
||||
outputs = Txt2Img(model).generate(
|
||||
prompt=self.prompt,
|
||||
step_callback=step_callback,
|
||||
**self.dict(
|
||||
exclude={"prompt"}
|
||||
), # Shorthand for passing all of the parameters above manually
|
||||
)
|
||||
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||
# each time it is called. We only need the first one.
|
||||
generate_output = next(outputs)
|
||||
|
||||
# Results are image and seed, unwrap for now and ignore the seed
|
||||
# TODO: pre-seed?
|
||||
@ -78,7 +77,7 @@ class TextToImageInvocation(BaseInvocation):
|
||||
image_name = context.services.images.create_name(
|
||||
context.graph_execution_state_id, self.id
|
||||
)
|
||||
context.services.images.save(image_type, image_name, results[0][0])
|
||||
context.services.images.save(image_type, image_name, generate_output.image)
|
||||
return ImageOutput(
|
||||
image=ImageField(image_type=image_type, image_name=image_name)
|
||||
)
|
||||
@ -115,23 +114,20 @@ class ImageToImageInvocation(TextToImageInvocation):
|
||||
# Handle invalid model parameter
|
||||
# TODO: figure out if this can be done via a validator that uses the model_cache
|
||||
# TODO: How to get the default model name now?
|
||||
if self.model is None or self.model == "":
|
||||
self.model = context.services.generate.model_name
|
||||
|
||||
# Set the model (if already cached, this does nothing)
|
||||
context.services.generate.set_model(self.model)
|
||||
|
||||
results = context.services.generate.prompt2image(
|
||||
prompt=self.prompt,
|
||||
init_img=image,
|
||||
init_mask=mask,
|
||||
step_callback=step_callback,
|
||||
**self.dict(
|
||||
exclude={"prompt", "image", "mask"}
|
||||
), # Shorthand for passing all of the parameters above manually
|
||||
model = context.services.model_manager.get_model()
|
||||
generator_output = next(
|
||||
Img2Img(model).generate(
|
||||
prompt=self.prompt,
|
||||
init_image=image,
|
||||
init_mask=mask,
|
||||
step_callback=step_callback,
|
||||
**self.dict(
|
||||
exclude={"prompt", "image", "mask"}
|
||||
), # Shorthand for passing all of the parameters above manually
|
||||
)
|
||||
)
|
||||
|
||||
result_image = results[0][0]
|
||||
result_image = generator_output.image
|
||||
|
||||
# Results are image and seed, unwrap for now and ignore the seed
|
||||
# TODO: pre-seed?
|
||||
@ -145,7 +141,6 @@ class ImageToImageInvocation(TextToImageInvocation):
|
||||
image=ImageField(image_type=image_type, image_name=image_name)
|
||||
)
|
||||
|
||||
|
||||
class InpaintInvocation(ImageToImageInvocation):
|
||||
"""Generates an image using inpaint."""
|
||||
|
||||
@ -180,23 +175,20 @@ class InpaintInvocation(ImageToImageInvocation):
|
||||
# Handle invalid model parameter
|
||||
# TODO: figure out if this can be done via a validator that uses the model_cache
|
||||
# TODO: How to get the default model name now?
|
||||
if self.model is None or self.model == "":
|
||||
self.model = context.services.generate.model_name
|
||||
|
||||
# Set the model (if already cached, this does nothing)
|
||||
context.services.generate.set_model(self.model)
|
||||
|
||||
results = context.services.generate.prompt2image(
|
||||
prompt=self.prompt,
|
||||
init_img=image,
|
||||
init_mask=mask,
|
||||
step_callback=step_callback,
|
||||
**self.dict(
|
||||
exclude={"prompt", "image", "mask"}
|
||||
), # Shorthand for passing all of the parameters above manually
|
||||
manager = context.services.model_manager.get_model()
|
||||
generator_output = next(
|
||||
Inpaint(model).generate(
|
||||
prompt=self.prompt,
|
||||
init_image=image,
|
||||
mask_image=mask,
|
||||
step_callback=step_callback,
|
||||
**self.dict(
|
||||
exclude={"prompt", "image", "mask"}
|
||||
), # Shorthand for passing all of the parameters above manually
|
||||
)
|
||||
)
|
||||
|
||||
result_image = results[0][0]
|
||||
result_image = generator_output.image
|
||||
|
||||
# Results are image and seed, unwrap for now and ignore the seed
|
||||
# TODO: pre-seed?
|
||||
|
@ -8,7 +8,6 @@ from ..services.invocation_services import InvocationServices
|
||||
from .baseinvocation import BaseInvocation, InvocationContext
|
||||
from .image import ImageField, ImageOutput
|
||||
|
||||
|
||||
class RestoreFaceInvocation(BaseInvocation):
|
||||
"""Restores faces in an image."""
|
||||
#fmt: off
|
||||
@ -23,7 +22,7 @@ class RestoreFaceInvocation(BaseInvocation):
|
||||
image = context.services.images.get(
|
||||
self.image.image_type, self.image.image_name
|
||||
)
|
||||
results = context.services.generate.upscale_and_reconstruct(
|
||||
results = context.services.restoration.upscale_and_reconstruct(
|
||||
image_list=[[image, 0]],
|
||||
upscale=None,
|
||||
strength=self.strength, # GFPGAN strength
|
||||
|
@ -26,7 +26,7 @@ class UpscaleInvocation(BaseInvocation):
|
||||
image = context.services.images.get(
|
||||
self.image.image_type, self.image.image_name
|
||||
)
|
||||
results = context.services.generate.upscale_and_reconstruct(
|
||||
results = context.services.restoration.upscale_and_reconstruct(
|
||||
image_list=[[image, 0]],
|
||||
upscale=(self.level, self.strength),
|
||||
strength=0.0, # GFPGAN strength
|
||||
|
@ -1,255 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
from argparse import Namespace
|
||||
|
||||
import invokeai.version
|
||||
from invokeai.backend import Generate, ModelManager
|
||||
|
||||
from ...backend import Globals
|
||||
|
||||
|
||||
# TODO: most of this code should be split into individual services as the Generate.py code is deprecated
|
||||
def get_generate(args, config) -> Generate:
|
||||
if not args.conf:
|
||||
config_file = os.path.join(Globals.root, "configs", "models.yaml")
|
||||
if not os.path.exists(config_file):
|
||||
report_model_error(
|
||||
args, FileNotFoundError(f"The file {config_file} could not be found.")
|
||||
)
|
||||
|
||||
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
import transformers # type: ignore
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
import diffusers
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
# Loading Face Restoration and ESRGAN Modules
|
||||
gfpgan, codeformer, esrgan = load_face_restoration(args)
|
||||
|
||||
# normalize the config directory relative to root
|
||||
if not os.path.isabs(args.conf):
|
||||
args.conf = os.path.normpath(os.path.join(Globals.root, args.conf))
|
||||
|
||||
if args.embeddings:
|
||||
if not os.path.isabs(args.embedding_path):
|
||||
embedding_path = os.path.normpath(
|
||||
os.path.join(Globals.root, args.embedding_path)
|
||||
)
|
||||
else:
|
||||
embedding_path = args.embedding_path
|
||||
else:
|
||||
embedding_path = None
|
||||
|
||||
# migrate legacy models
|
||||
ModelManager.migrate_models()
|
||||
|
||||
# load the infile as a list of lines
|
||||
if args.infile:
|
||||
try:
|
||||
if os.path.isfile(args.infile):
|
||||
infile = open(args.infile, "r", encoding="utf-8")
|
||||
elif args.infile == "-": # stdin
|
||||
infile = sys.stdin
|
||||
else:
|
||||
raise FileNotFoundError(f"{args.infile} not found.")
|
||||
except (FileNotFoundError, IOError) as e:
|
||||
print(f"{e}. Aborting.")
|
||||
sys.exit(-1)
|
||||
|
||||
# creating a Generate object:
|
||||
try:
|
||||
gen = Generate(
|
||||
conf=args.conf,
|
||||
model=args.model,
|
||||
sampler_name=args.sampler_name,
|
||||
embedding_path=embedding_path,
|
||||
full_precision=args.full_precision,
|
||||
precision=args.precision,
|
||||
gfpgan=gfpgan,
|
||||
codeformer=codeformer,
|
||||
esrgan=esrgan,
|
||||
free_gpu_mem=args.free_gpu_mem,
|
||||
safety_checker=args.safety_checker,
|
||||
max_loaded_models=args.max_loaded_models,
|
||||
)
|
||||
except (FileNotFoundError, TypeError, AssertionError) as e:
|
||||
report_model_error(opt, e)
|
||||
except (IOError, KeyError) as e:
|
||||
print(f"{e}. Aborting.")
|
||||
sys.exit(-1)
|
||||
|
||||
if args.seamless:
|
||||
print(">> changed to seamless tiling mode")
|
||||
|
||||
# preload the model
|
||||
try:
|
||||
gen.load_model()
|
||||
except KeyError:
|
||||
pass
|
||||
except Exception as e:
|
||||
report_model_error(args, e)
|
||||
|
||||
# try to autoconvert new models
|
||||
# autoimport new .ckpt files
|
||||
if path := args.autoconvert:
|
||||
gen.model_manager.autoconvert_weights(
|
||||
conf_path=args.conf,
|
||||
weights_directory=path,
|
||||
)
|
||||
|
||||
return gen
|
||||
|
||||
|
||||
def load_face_restoration(opt):
|
||||
try:
|
||||
gfpgan, codeformer, esrgan = None, None, None
|
||||
if opt.restore or opt.esrgan:
|
||||
from invokeai.backend.restoration import Restoration
|
||||
|
||||
restoration = Restoration()
|
||||
if opt.restore:
|
||||
gfpgan, codeformer = restoration.load_face_restore_models(
|
||||
opt.gfpgan_model_path
|
||||
)
|
||||
else:
|
||||
print(">> Face restoration disabled")
|
||||
if opt.esrgan:
|
||||
esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
|
||||
else:
|
||||
print(">> Upscaling disabled")
|
||||
else:
|
||||
print(">> Face restoration and upscaling disabled")
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
||||
return gfpgan, codeformer, esrgan
|
||||
|
||||
|
||||
def report_model_error(opt: Namespace, e: Exception):
|
||||
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||
print(
|
||||
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
|
||||
)
|
||||
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
|
||||
if yes_to_all:
|
||||
print(
|
||||
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
|
||||
)
|
||||
else:
|
||||
response = input(
|
||||
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
|
||||
)
|
||||
if response.startswith(("n", "N")):
|
||||
return
|
||||
|
||||
print("invokeai-configure is launching....\n")
|
||||
|
||||
# Match arguments that were set on the CLI
|
||||
# only the arguments accepted by the configuration script are parsed
|
||||
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
|
||||
config = ["--config", opt.conf] if opt.conf is not None else []
|
||||
previous_args = sys.argv
|
||||
sys.argv = ["invokeai-configure"]
|
||||
sys.argv.extend(root_dir)
|
||||
sys.argv.extend(config)
|
||||
if yes_to_all is not None:
|
||||
for arg in yes_to_all.split():
|
||||
sys.argv.append(arg)
|
||||
|
||||
from invokeai.frontend.install import invokeai_configure
|
||||
|
||||
invokeai_configure()
|
||||
# TODO: Figure out how to restart
|
||||
# print('** InvokeAI will now restart')
|
||||
# sys.argv = previous_args
|
||||
# main() # would rather do a os.exec(), but doesn't exist?
|
||||
# sys.exit(0)
|
||||
|
||||
|
||||
# Temporary initializer for Generate until we migrate off of it
|
||||
def old_get_generate(args, config) -> Generate:
|
||||
# TODO: Remove the need for globals
|
||||
from invokeai.backend.globals import Globals
|
||||
|
||||
# alert - setting globals here
|
||||
Globals.root = os.path.expanduser(
|
||||
args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".")
|
||||
)
|
||||
Globals.try_patchmatch = args.patchmatch
|
||||
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
import transformers
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
# Loading Face Restoration and ESRGAN Modules
|
||||
gfpgan, codeformer, esrgan = None, None, None
|
||||
try:
|
||||
if config.restore or config.esrgan:
|
||||
from ldm.invoke.restoration import Restoration
|
||||
|
||||
restoration = Restoration()
|
||||
if config.restore:
|
||||
gfpgan, codeformer = restoration.load_face_restore_models(
|
||||
config.gfpgan_model_path
|
||||
)
|
||||
else:
|
||||
print(">> Face restoration disabled")
|
||||
if config.esrgan:
|
||||
esrgan = restoration.load_esrgan(config.esrgan_bg_tile)
|
||||
else:
|
||||
print(">> Upscaling disabled")
|
||||
else:
|
||||
print(">> Face restoration and upscaling disabled")
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
||||
|
||||
# normalize the config directory relative to root
|
||||
if not os.path.isabs(config.conf):
|
||||
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
|
||||
|
||||
if config.embeddings:
|
||||
if not os.path.isabs(config.embedding_path):
|
||||
embedding_path = os.path.normpath(
|
||||
os.path.join(Globals.root, config.embedding_path)
|
||||
)
|
||||
else:
|
||||
embedding_path = None
|
||||
|
||||
# TODO: lazy-initialize this by wrapping it
|
||||
try:
|
||||
generate = Generate(
|
||||
conf=config.conf,
|
||||
model=config.model,
|
||||
sampler_name=config.sampler_name,
|
||||
embedding_path=embedding_path,
|
||||
full_precision=config.full_precision,
|
||||
precision=config.precision,
|
||||
gfpgan=gfpgan,
|
||||
codeformer=codeformer,
|
||||
esrgan=esrgan,
|
||||
free_gpu_mem=config.free_gpu_mem,
|
||||
safety_checker=config.safety_checker,
|
||||
max_loaded_models=config.max_loaded_models,
|
||||
)
|
||||
except (FileNotFoundError, TypeError, AssertionError):
|
||||
# emergency_model_reconfigure() # TODO?
|
||||
sys.exit(-1)
|
||||
except (IOError, KeyError) as e:
|
||||
print(f"{e}. Aborting.")
|
||||
sys.exit(-1)
|
||||
|
||||
generate.free_gpu_mem = config.free_gpu_mem
|
||||
|
||||
return generate
|
@ -1,36 +1,39 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
from invokeai.backend import Generate
|
||||
from invokeai.backend import ModelManager
|
||||
|
||||
from .events import EventServiceBase
|
||||
from .image_storage import ImageStorageBase
|
||||
from .restoration_services import RestorationServices
|
||||
from .invocation_queue import InvocationQueueABC
|
||||
from .item_storage import ItemStorageABC
|
||||
|
||||
|
||||
class InvocationServices:
|
||||
"""Services that can be used by invocations"""
|
||||
|
||||
generate: Generate # TODO: wrap Generate, or split it up from model?
|
||||
events: EventServiceBase
|
||||
images: ImageStorageBase
|
||||
queue: InvocationQueueABC
|
||||
model_manager: ModelManager
|
||||
restoration: RestorationServices
|
||||
|
||||
# NOTE: we must forward-declare any types that include invocations, since invocations can use services
|
||||
graph_execution_manager: ItemStorageABC["GraphExecutionState"]
|
||||
processor: "InvocationProcessorABC"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
generate: Generate,
|
||||
events: EventServiceBase,
|
||||
images: ImageStorageBase,
|
||||
queue: InvocationQueueABC,
|
||||
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
|
||||
processor: "InvocationProcessorABC",
|
||||
self,
|
||||
model_manager: ModelManager,
|
||||
events: EventServiceBase,
|
||||
images: ImageStorageBase,
|
||||
queue: InvocationQueueABC,
|
||||
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
|
||||
processor: "InvocationProcessorABC",
|
||||
restoration: RestorationServices,
|
||||
):
|
||||
self.generate = generate
|
||||
self.model_manager = model_manager
|
||||
self.events = events
|
||||
self.images = images
|
||||
self.queue = queue
|
||||
self.graph_execution_manager = graph_execution_manager
|
||||
self.processor = processor
|
||||
self.restoration = restoration
|
||||
|
120
invokeai/app/services/model_manager_initializer.py
Normal file
120
invokeai/app/services/model_manager_initializer.py
Normal file
@ -0,0 +1,120 @@
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
from argparse import Namespace
|
||||
from invokeai.backend import Args
|
||||
from omegaconf import OmegaConf
|
||||
from pathlib import Path
|
||||
|
||||
import invokeai.version
|
||||
from ...backend import ModelManager
|
||||
from ...backend.util import choose_precision, choose_torch_device
|
||||
from ...backend import Globals
|
||||
|
||||
# TODO: Replace with an abstract class base ModelManagerBase
|
||||
def get_model_manager(config: Args) -> ModelManager:
|
||||
if not config.conf:
|
||||
config_file = os.path.join(Globals.root, "configs", "models.yaml")
|
||||
if not os.path.exists(config_file):
|
||||
report_model_error(
|
||||
config, FileNotFoundError(f"The file {config_file} could not be found.")
|
||||
)
|
||||
|
||||
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
import transformers # type: ignore
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
import diffusers
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
# normalize the config directory relative to root
|
||||
if not os.path.isabs(config.conf):
|
||||
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
|
||||
|
||||
if config.embeddings:
|
||||
if not os.path.isabs(config.embedding_path):
|
||||
embedding_path = os.path.normpath(
|
||||
os.path.join(Globals.root, config.embedding_path)
|
||||
)
|
||||
else:
|
||||
embedding_path = config.embedding_path
|
||||
else:
|
||||
embedding_path = None
|
||||
|
||||
# migrate legacy models
|
||||
ModelManager.migrate_models()
|
||||
|
||||
# creating the model manager
|
||||
try:
|
||||
device = torch.device(choose_torch_device())
|
||||
precision = 'float16' if config.precision=='float16' \
|
||||
else 'float32' if config.precision=='float32' \
|
||||
else choose_precision(device)
|
||||
|
||||
model_manager = ModelManager(
|
||||
OmegaConf.load(config.conf),
|
||||
precision=precision,
|
||||
device_type=device,
|
||||
max_loaded_models=config.max_loaded_models,
|
||||
embedding_path = Path(embedding_path),
|
||||
)
|
||||
except (FileNotFoundError, TypeError, AssertionError) as e:
|
||||
report_model_error(config, e)
|
||||
except (IOError, KeyError) as e:
|
||||
print(f"{e}. Aborting.")
|
||||
sys.exit(-1)
|
||||
|
||||
# try to autoconvert new models
|
||||
# autoimport new .ckpt files
|
||||
if path := config.autoconvert:
|
||||
model_manager.autoconvert_weights(
|
||||
conf_path=config.conf,
|
||||
weights_directory=path,
|
||||
)
|
||||
|
||||
return model_manager
|
||||
|
||||
def report_model_error(opt: Namespace, e: Exception):
|
||||
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||
print(
|
||||
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
|
||||
)
|
||||
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
|
||||
if yes_to_all:
|
||||
print(
|
||||
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
|
||||
)
|
||||
else:
|
||||
response = input(
|
||||
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
|
||||
)
|
||||
if response.startswith(("n", "N")):
|
||||
return
|
||||
|
||||
print("invokeai-configure is launching....\n")
|
||||
|
||||
# Match arguments that were set on the CLI
|
||||
# only the arguments accepted by the configuration script are parsed
|
||||
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
|
||||
config = ["--config", opt.conf] if opt.conf is not None else []
|
||||
previous_config = sys.argv
|
||||
sys.argv = ["invokeai-configure"]
|
||||
sys.argv.extend(root_dir)
|
||||
sys.argv.extend(config.to_dict())
|
||||
if yes_to_all is not None:
|
||||
for arg in yes_to_all.split():
|
||||
sys.argv.append(arg)
|
||||
|
||||
from invokeai.frontend.install import invokeai_configure
|
||||
|
||||
invokeai_configure()
|
||||
# TODO: Figure out how to restart
|
||||
# print('** InvokeAI will now restart')
|
||||
# sys.argv = previous_args
|
||||
# main() # would rather do a os.exec(), but doesn't exist?
|
||||
# sys.exit(0)
|
109
invokeai/app/services/restoration_services.py
Normal file
109
invokeai/app/services/restoration_services.py
Normal file
@ -0,0 +1,109 @@
|
||||
import sys
|
||||
import traceback
|
||||
import torch
|
||||
from ...backend.restoration import Restoration
|
||||
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
|
||||
|
||||
# This should be a real base class for postprocessing functions,
|
||||
# but right now we just instantiate the existing gfpgan, esrgan
|
||||
# and codeformer functions.
|
||||
class RestorationServices:
|
||||
'''Face restoration and upscaling'''
|
||||
|
||||
def __init__(self,args):
|
||||
try:
|
||||
gfpgan, codeformer, esrgan = None, None, None
|
||||
if args.restore or args.esrgan:
|
||||
restoration = Restoration()
|
||||
if args.restore:
|
||||
gfpgan, codeformer = restoration.load_face_restore_models(
|
||||
args.gfpgan_model_path
|
||||
)
|
||||
else:
|
||||
print(">> Face restoration disabled")
|
||||
if args.esrgan:
|
||||
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
|
||||
else:
|
||||
print(">> Upscaling disabled")
|
||||
else:
|
||||
print(">> Face restoration and upscaling disabled")
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
||||
self.device = torch.device(choose_torch_device())
|
||||
self.gfpgan = gfpgan
|
||||
self.codeformer = codeformer
|
||||
self.esrgan = esrgan
|
||||
|
||||
# note that this one method does gfpgan and codepath reconstruction, as well as
|
||||
# esrgan upscaling
|
||||
# TO DO: refactor into separate methods
|
||||
def upscale_and_reconstruct(
|
||||
self,
|
||||
image_list,
|
||||
facetool="gfpgan",
|
||||
upscale=None,
|
||||
upscale_denoise_str=0.75,
|
||||
strength=0.0,
|
||||
codeformer_fidelity=0.75,
|
||||
save_original=False,
|
||||
image_callback=None,
|
||||
prefix=None,
|
||||
):
|
||||
results = []
|
||||
for r in image_list:
|
||||
image, seed = r
|
||||
try:
|
||||
if strength > 0:
|
||||
if self.gfpgan is not None or self.codeformer is not None:
|
||||
if facetool == "gfpgan":
|
||||
if self.gfpgan is None:
|
||||
print(
|
||||
">> GFPGAN not found. Face restoration is disabled."
|
||||
)
|
||||
else:
|
||||
image = self.gfpgan.process(image, strength, seed)
|
||||
if facetool == "codeformer":
|
||||
if self.codeformer is None:
|
||||
print(
|
||||
">> CodeFormer not found. Face restoration is disabled."
|
||||
)
|
||||
else:
|
||||
cf_device = (
|
||||
CPU_DEVICE if self.device == MPS_DEVICE else self.device
|
||||
)
|
||||
image = self.codeformer.process(
|
||||
image=image,
|
||||
strength=strength,
|
||||
device=cf_device,
|
||||
seed=seed,
|
||||
fidelity=codeformer_fidelity,
|
||||
)
|
||||
else:
|
||||
print(">> Face Restoration is disabled.")
|
||||
if upscale is not None:
|
||||
if self.esrgan is not None:
|
||||
if len(upscale) < 2:
|
||||
upscale.append(0.75)
|
||||
image = self.esrgan.process(
|
||||
image,
|
||||
upscale[1],
|
||||
seed,
|
||||
int(upscale[0]),
|
||||
denoise_str=upscale_denoise_str,
|
||||
)
|
||||
else:
|
||||
print(">> ESRGAN is disabled. Image not upscaled.")
|
||||
except Exception as e:
|
||||
print(
|
||||
f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
|
||||
)
|
||||
|
||||
if image_callback is not None:
|
||||
image_callback(image, seed, upscaled=True, use_prefix=prefix)
|
||||
else:
|
||||
r[0] = image
|
||||
|
||||
results.append([image, seed])
|
||||
|
||||
return results
|
@ -2,6 +2,15 @@
|
||||
Initialization file for invokeai.backend
|
||||
"""
|
||||
from .generate import Generate
|
||||
from .generator import (
|
||||
InvokeAIGeneratorBasicParams,
|
||||
InvokeAIGenerator,
|
||||
InvokeAIGeneratorOutput,
|
||||
Txt2Img,
|
||||
Img2Img,
|
||||
Inpaint
|
||||
)
|
||||
from .model_management import ModelManager
|
||||
from .safety_checker import SafetyChecker
|
||||
from .args import Args
|
||||
from .globals import Globals
|
||||
|
@ -25,18 +25,19 @@ from accelerate.utils import set_seed
|
||||
from diffusers.pipeline_utils import DiffusionPipeline
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from omegaconf import OmegaConf
|
||||
from pathlib import Path
|
||||
|
||||
from .args import metadata_from_png
|
||||
from .generator import infill_methods
|
||||
from .globals import Globals, global_cache_dir
|
||||
from .image_util import InitImageResizer, PngWriter, Txt2Mask, configure_model_padding
|
||||
from .model_management import ModelManager
|
||||
from .safety_checker import SafetyChecker
|
||||
from .prompting import get_uc_and_c_and_ec
|
||||
from .prompting.conditioning import log_tokenization
|
||||
from .stable_diffusion import HuggingFaceConceptsLibrary
|
||||
from .util import choose_precision, choose_torch_device
|
||||
|
||||
|
||||
def fix_func(orig):
|
||||
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||
|
||||
@ -222,6 +223,7 @@ class Generate:
|
||||
self.precision,
|
||||
max_loaded_models=max_loaded_models,
|
||||
sequential_offload=self.free_gpu_mem,
|
||||
embedding_path=Path(self.embedding_path),
|
||||
)
|
||||
# don't accept invalid models
|
||||
fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME
|
||||
@ -244,31 +246,8 @@ class Generate:
|
||||
|
||||
# load safety checker if requested
|
||||
if safety_checker:
|
||||
try:
|
||||
print(">> Initializing NSFW checker")
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||
StableDiffusionSafetyChecker,
|
||||
)
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||
safety_model_path = global_cache_dir("hub")
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
||||
safety_model_id,
|
||||
local_files_only=True,
|
||||
cache_dir=safety_model_path,
|
||||
)
|
||||
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
safety_model_id,
|
||||
local_files_only=True,
|
||||
cache_dir=safety_model_path,
|
||||
)
|
||||
self.safety_checker.to(self.device)
|
||||
except Exception:
|
||||
print(
|
||||
"** An error was encountered while installing the safety checker:"
|
||||
)
|
||||
print(traceback.format_exc())
|
||||
print(">> Initializing NSFW checker")
|
||||
self.safety_checker = SafetyChecker(self.device)
|
||||
else:
|
||||
print(">> NSFW checker is disabled")
|
||||
|
||||
@ -495,18 +474,6 @@ class Generate:
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
|
||||
results = list()
|
||||
init_image = None
|
||||
mask_image = None
|
||||
|
||||
try:
|
||||
if (
|
||||
self.free_gpu_mem
|
||||
and self.model.cond_stage_model.device != self.model.device
|
||||
):
|
||||
self.model.cond_stage_model.device = self.model.device
|
||||
self.model.cond_stage_model.to(self.model.device)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
@ -535,15 +502,6 @@ class Generate:
|
||||
generator.set_variation(self.seed, variation_amount, with_variations)
|
||||
generator.use_mps_noise = use_mps_noise
|
||||
|
||||
checker = (
|
||||
{
|
||||
"checker": self.safety_checker,
|
||||
"extractor": self.safety_feature_extractor,
|
||||
}
|
||||
if self.safety_checker
|
||||
else None
|
||||
)
|
||||
|
||||
results = generator.generate(
|
||||
prompt,
|
||||
iterations=iterations,
|
||||
@ -570,7 +528,7 @@ class Generate:
|
||||
embiggen_strength=embiggen_strength,
|
||||
inpaint_replace=inpaint_replace,
|
||||
mask_blur_radius=mask_blur_radius,
|
||||
safety_checker=checker,
|
||||
safety_checker=self.safety_checker,
|
||||
seam_size=seam_size,
|
||||
seam_blur=seam_blur,
|
||||
seam_strength=seam_strength,
|
||||
@ -952,18 +910,6 @@ class Generate:
|
||||
self.generators = {}
|
||||
|
||||
set_seed(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
print(f">> Loading embeddings from {self.embedding_path}")
|
||||
for root, _, files in os.walk(self.embedding_path):
|
||||
for name in files:
|
||||
ti_path = os.path.join(root, name)
|
||||
self.model.textual_inversion_manager.load_textual_inversion(
|
||||
ti_path, defer_injecting_tokens=True
|
||||
)
|
||||
print(
|
||||
f'>> Textual inversion triggers: {", ".join(sorted(self.model.textual_inversion_manager.get_all_trigger_strings()))}'
|
||||
)
|
||||
|
||||
self.model_name = model_name
|
||||
self._set_scheduler() # requires self.model_name to be set first
|
||||
return self.model
|
||||
@ -1010,7 +956,7 @@ class Generate:
|
||||
):
|
||||
results = []
|
||||
for r in image_list:
|
||||
image, seed = r
|
||||
image, seed, _ = r
|
||||
try:
|
||||
if strength > 0:
|
||||
if self.gfpgan is not None or self.codeformer is not None:
|
||||
|
@ -1,5 +1,13 @@
|
||||
"""
|
||||
Initialization file for the invokeai.generator package
|
||||
"""
|
||||
from .base import Generator
|
||||
from .base import (
|
||||
InvokeAIGenerator,
|
||||
InvokeAIGeneratorBasicParams,
|
||||
InvokeAIGeneratorOutput,
|
||||
Txt2Img,
|
||||
Img2Img,
|
||||
Inpaint,
|
||||
Generator,
|
||||
)
|
||||
from .inpaint import infill_methods
|
||||
|
@ -4,11 +4,15 @@ including img2img, txt2img, and inpaint
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import dataclasses
|
||||
import diffusers
|
||||
import os
|
||||
import random
|
||||
import traceback
|
||||
from abc import ABCMeta
|
||||
from argparse import Namespace
|
||||
from contextlib import nullcontext
|
||||
from pathlib import Path
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@ -17,13 +21,258 @@ from PIL import Image, ImageChops, ImageFilter
|
||||
from accelerate.utils import set_seed
|
||||
from diffusers import DiffusionPipeline
|
||||
from tqdm import trange
|
||||
from typing import List, Iterator, Type
|
||||
from dataclasses import dataclass, field
|
||||
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||
|
||||
import invokeai.assets.web as web_assets
|
||||
from ..image_util import configure_model_padding
|
||||
from ..util.util import rand_perlin_2d
|
||||
from ..safety_checker import SafetyChecker
|
||||
from ..prompting.conditioning import get_uc_and_c_and_ec
|
||||
from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeline
|
||||
|
||||
downsampling = 8
|
||||
CAUTION_IMG = "caution.png"
|
||||
|
||||
@dataclass
|
||||
class InvokeAIGeneratorBasicParams:
|
||||
seed: int=None
|
||||
width: int=512
|
||||
height: int=512
|
||||
cfg_scale: int=7.5
|
||||
steps: int=20
|
||||
ddim_eta: float=0.0
|
||||
scheduler: int='ddim'
|
||||
precision: str='float16'
|
||||
perlin: float=0.0
|
||||
threshold: int=0.0
|
||||
seamless: bool=False
|
||||
seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y'])
|
||||
h_symmetry_time_pct: float=None
|
||||
v_symmetry_time_pct: float=None
|
||||
variation_amount: float = 0.0
|
||||
with_variations: list=field(default_factory=list)
|
||||
safety_checker: SafetyChecker=None
|
||||
|
||||
@dataclass
|
||||
class InvokeAIGeneratorOutput:
|
||||
'''
|
||||
InvokeAIGeneratorOutput is a dataclass that contains the outputs of a generation
|
||||
operation, including the image, its seed, the model name used to generate the image
|
||||
and the model hash, as well as all the generate() parameters that went into
|
||||
generating the image (in .params, also available as attributes)
|
||||
'''
|
||||
image: Image
|
||||
seed: int
|
||||
model_hash: str
|
||||
attention_maps_images: List[Image]
|
||||
params: Namespace
|
||||
|
||||
# we are interposing a wrapper around the original Generator classes so that
|
||||
# old code that calls Generate will continue to work.
|
||||
class InvokeAIGenerator(metaclass=ABCMeta):
|
||||
scheduler_map = dict(
|
||||
ddim=diffusers.DDIMScheduler,
|
||||
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
||||
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
|
||||
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
|
||||
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
||||
k_euler=diffusers.EulerDiscreteScheduler,
|
||||
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
|
||||
k_heun=diffusers.HeunDiscreteScheduler,
|
||||
k_lms=diffusers.LMSDiscreteScheduler,
|
||||
plms=diffusers.PNDMScheduler,
|
||||
)
|
||||
|
||||
def __init__(self,
|
||||
model_info: dict,
|
||||
params: InvokeAIGeneratorBasicParams=InvokeAIGeneratorBasicParams(),
|
||||
):
|
||||
self.model_info=model_info
|
||||
self.params=params
|
||||
|
||||
def generate(self,
|
||||
prompt: str='',
|
||||
callback: callable=None,
|
||||
step_callback: callable=None,
|
||||
iterations: int=1,
|
||||
**keyword_args,
|
||||
)->Iterator[InvokeAIGeneratorOutput]:
|
||||
'''
|
||||
Return an iterator across the indicated number of generations.
|
||||
Each time the iterator is called it will return an InvokeAIGeneratorOutput
|
||||
object. Use like this:
|
||||
|
||||
outputs = txt2img.generate(prompt='banana sushi', iterations=5)
|
||||
for result in outputs:
|
||||
print(result.image, result.seed)
|
||||
|
||||
In the typical case of wanting to get just a single image, iterations
|
||||
defaults to 1 and do:
|
||||
|
||||
output = next(txt2img.generate(prompt='banana sushi')
|
||||
|
||||
Pass None to get an infinite iterator.
|
||||
|
||||
outputs = txt2img.generate(prompt='banana sushi', iterations=None)
|
||||
for o in outputs:
|
||||
print(o.image, o.seed)
|
||||
|
||||
'''
|
||||
generator_args = dataclasses.asdict(self.params)
|
||||
generator_args.update(keyword_args)
|
||||
|
||||
model_info = self.model_info
|
||||
model_name = model_info['model_name']
|
||||
model:StableDiffusionGeneratorPipeline = model_info['model']
|
||||
model_hash = model_info['hash']
|
||||
scheduler: Scheduler = self.get_scheduler(
|
||||
model=model,
|
||||
scheduler_name=generator_args.get('scheduler')
|
||||
)
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(prompt,model=model)
|
||||
gen_class = self._generator_class()
|
||||
generator = gen_class(model, self.params.precision)
|
||||
if self.params.variation_amount > 0:
|
||||
generator.set_variation(generator_args.get('seed'),
|
||||
generator_args.get('variation_amount'),
|
||||
generator_args.get('with_variations')
|
||||
)
|
||||
|
||||
if isinstance(model, DiffusionPipeline):
|
||||
for component in [model.unet, model.vae]:
|
||||
configure_model_padding(component,
|
||||
generator_args.get('seamless',False),
|
||||
generator_args.get('seamless_axes')
|
||||
)
|
||||
else:
|
||||
configure_model_padding(model,
|
||||
generator_args.get('seamless',False),
|
||||
generator_args.get('seamless_axes')
|
||||
)
|
||||
|
||||
iteration_count = range(iterations) if iterations else itertools.count(start=0, step=1)
|
||||
for i in iteration_count:
|
||||
results = generator.generate(prompt,
|
||||
conditioning=(uc, c, extra_conditioning_info),
|
||||
sampler=scheduler,
|
||||
**generator_args,
|
||||
)
|
||||
output = InvokeAIGeneratorOutput(
|
||||
image=results[0][0],
|
||||
seed=results[0][1],
|
||||
attention_maps_images=results[0][2],
|
||||
model_hash = model_hash,
|
||||
params=Namespace(model_name=model_name,**generator_args),
|
||||
)
|
||||
if callback:
|
||||
callback(output)
|
||||
yield output
|
||||
|
||||
@classmethod
|
||||
def schedulers(self)->List[str]:
|
||||
'''
|
||||
Return list of all the schedulers that we currently handle.
|
||||
'''
|
||||
return list(self.scheduler_map.keys())
|
||||
|
||||
def load_generator(self, model: StableDiffusionGeneratorPipeline, generator_class: Type[Generator]):
|
||||
return generator_class(model, self.params.precision)
|
||||
|
||||
def get_scheduler(self, scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
||||
scheduler_class = self.scheduler_map.get(scheduler_name,'ddim')
|
||||
scheduler = scheduler_class.from_config(model.scheduler.config)
|
||||
# hack copied over from generate.py
|
||||
if not hasattr(scheduler, 'uses_inpainting_model'):
|
||||
scheduler.uses_inpainting_model = lambda: False
|
||||
return scheduler
|
||||
|
||||
@classmethod
|
||||
def _generator_class(cls)->Type[Generator]:
|
||||
'''
|
||||
In derived classes return the name of the generator to apply.
|
||||
If you don't override will return the name of the derived
|
||||
class, which nicely parallels the generator class names.
|
||||
'''
|
||||
return Generator
|
||||
|
||||
# ------------------------------------
|
||||
class Txt2Img(InvokeAIGenerator):
|
||||
@classmethod
|
||||
def _generator_class(cls):
|
||||
from .txt2img import Txt2Img
|
||||
return Txt2Img
|
||||
|
||||
# ------------------------------------
|
||||
class Img2Img(InvokeAIGenerator):
|
||||
def generate(self,
|
||||
init_image: Image | torch.FloatTensor,
|
||||
strength: float=0.75,
|
||||
**keyword_args
|
||||
)->List[InvokeAIGeneratorOutput]:
|
||||
return super().generate(init_image=init_image,
|
||||
strength=strength,
|
||||
**keyword_args
|
||||
)
|
||||
@classmethod
|
||||
def _generator_class(cls):
|
||||
from .img2img import Img2Img
|
||||
return Img2Img
|
||||
|
||||
# ------------------------------------
|
||||
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
|
||||
class Inpaint(Img2Img):
|
||||
def generate(self,
|
||||
mask_image: Image | torch.FloatTensor,
|
||||
# Seam settings - when 0, doesn't fill seam
|
||||
seam_size: int = 0,
|
||||
seam_blur: int = 0,
|
||||
seam_strength: float = 0.7,
|
||||
seam_steps: int = 10,
|
||||
tile_size: int = 32,
|
||||
inpaint_replace=False,
|
||||
infill_method=None,
|
||||
inpaint_width=None,
|
||||
inpaint_height=None,
|
||||
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
|
||||
**keyword_args
|
||||
)->List[InvokeAIGeneratorOutput]:
|
||||
return super().generate(
|
||||
mask_image=mask_image,
|
||||
seam_size=seam_size,
|
||||
seam_blur=seam_blur,
|
||||
seam_strength=seam_strength,
|
||||
seam_steps=seam_steps,
|
||||
tile_size=tile_size,
|
||||
inpaint_replace=inpaint_replace,
|
||||
infill_method=infill_method,
|
||||
inpaint_width=inpaint_width,
|
||||
inpaint_height=inpaint_height,
|
||||
inpaint_fill=inpaint_fill,
|
||||
**keyword_args
|
||||
)
|
||||
@classmethod
|
||||
def _generator_class(cls):
|
||||
from .inpaint import Inpaint
|
||||
return Inpaint
|
||||
|
||||
# ------------------------------------
|
||||
class Embiggen(Txt2Img):
|
||||
def generate(
|
||||
self,
|
||||
embiggen: list=None,
|
||||
embiggen_tiles: list = None,
|
||||
strength: float=0.75,
|
||||
**kwargs)->List[InvokeAIGeneratorOutput]:
|
||||
return super().generate(embiggen=embiggen,
|
||||
embiggen_tiles=embiggen_tiles,
|
||||
strength=strength,
|
||||
**kwargs)
|
||||
|
||||
@classmethod
|
||||
def _generator_class(cls):
|
||||
from .embiggen import Embiggen
|
||||
return Embiggen
|
||||
|
||||
|
||||
class Generator:
|
||||
downsampling_factor: int
|
||||
@ -44,7 +293,6 @@ class Generator:
|
||||
self.with_variations = []
|
||||
self.use_mps_noise = False
|
||||
self.free_gpu_mem = None
|
||||
self.caution_img = None
|
||||
|
||||
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py
|
||||
def get_make_image(self, prompt, **kwargs):
|
||||
@ -64,10 +312,10 @@ class Generator:
|
||||
def generate(
|
||||
self,
|
||||
prompt,
|
||||
init_image,
|
||||
width,
|
||||
height,
|
||||
sampler,
|
||||
init_image=None,
|
||||
iterations=1,
|
||||
seed=None,
|
||||
image_callback=None,
|
||||
@ -76,7 +324,7 @@ class Generator:
|
||||
perlin=0.0,
|
||||
h_symmetry_time_pct=None,
|
||||
v_symmetry_time_pct=None,
|
||||
safety_checker: dict = None,
|
||||
safety_checker: SafetyChecker=None,
|
||||
free_gpu_mem: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
@ -130,9 +378,9 @@ class Generator:
|
||||
image = make_image(x_T)
|
||||
|
||||
if self.safety_checker is not None:
|
||||
image = self.safety_check(image)
|
||||
image = self.safety_checker.check(image)
|
||||
|
||||
results.append([image, seed])
|
||||
results.append([image, seed, attention_maps_images])
|
||||
|
||||
if image_callback is not None:
|
||||
attention_maps_image = (
|
||||
@ -292,16 +540,6 @@ class Generator:
|
||||
seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
return (seed, initial_noise)
|
||||
|
||||
# returns a tensor filled with random numbers from a normal distribution
|
||||
def get_noise(self, width, height):
|
||||
"""
|
||||
Returns a tensor filled with random numbers, either form a normal distribution
|
||||
(txt2img) or from the latent image (img2img, inpaint)
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"get_noise() must be implemented in a descendent class"
|
||||
)
|
||||
|
||||
def get_perlin_noise(self, width, height):
|
||||
fixdevice = "cpu" if (self.model.device.type == "mps") else self.model.device
|
||||
# limit noise to only the diffusion image channels, not the mask channels
|
||||
@ -361,53 +599,6 @@ class Generator:
|
||||
|
||||
return v2
|
||||
|
||||
def safety_check(self, image: Image.Image):
|
||||
"""
|
||||
If the CompViz safety checker flags an NSFW image, we
|
||||
blur it out.
|
||||
"""
|
||||
import diffusers
|
||||
|
||||
checker = self.safety_checker["checker"]
|
||||
extractor = self.safety_checker["extractor"]
|
||||
features = extractor([image], return_tensors="pt")
|
||||
features.to(self.model.device)
|
||||
|
||||
# unfortunately checker requires the numpy version, so we have to convert back
|
||||
x_image = np.array(image).astype(np.float32) / 255.0
|
||||
x_image = x_image[None].transpose(0, 3, 1, 2)
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
checked_image, has_nsfw_concept = checker(
|
||||
images=x_image, clip_input=features.pixel_values
|
||||
)
|
||||
if has_nsfw_concept[0]:
|
||||
print(
|
||||
"** An image with potential non-safe content has been detected. A blurred image will be returned. **"
|
||||
)
|
||||
return self.blur(image)
|
||||
else:
|
||||
return image
|
||||
|
||||
def blur(self, input):
|
||||
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
try:
|
||||
caution = self.get_caution_img()
|
||||
if caution:
|
||||
blurry.paste(caution, (0, 0), caution)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return blurry
|
||||
|
||||
def get_caution_img(self):
|
||||
path = None
|
||||
if self.caution_img:
|
||||
return self.caution_img
|
||||
path = Path(web_assets.__path__[0]) / CAUTION_IMG
|
||||
caution = Image.open(path)
|
||||
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
|
||||
return self.caution_img
|
||||
|
||||
# this is a handy routine for debugging use. Given a generated sample,
|
||||
# convert it into a PNG image and store it at the indicated path
|
||||
def save_sample(self, sample, filepath):
|
||||
|
@ -1274,7 +1274,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
tokenizer=tokenizer,
|
||||
unet=unet.to(precision),
|
||||
scheduler=scheduler,
|
||||
safety_checker=safety_checker.to(precision),
|
||||
safety_checker=None if return_generator_pipeline else safety_checker.to(precision),
|
||||
feature_extractor=feature_extractor,
|
||||
)
|
||||
else:
|
||||
|
@ -34,8 +34,7 @@ from picklescan.scanner import scan_file_path
|
||||
from invokeai.backend.globals import Globals, global_cache_dir
|
||||
|
||||
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
||||
from ..util import CPU_DEVICE, ask_user, download_with_resume
|
||||
|
||||
from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume
|
||||
|
||||
class SDLegacyType(Enum):
|
||||
V1 = 1
|
||||
@ -51,23 +50,29 @@ VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
|
||||
}
|
||||
|
||||
class ModelManager(object):
|
||||
'''
|
||||
Model manager handles loading, caching, importing, deleting, converting, and editing models.
|
||||
'''
|
||||
def __init__(
|
||||
self,
|
||||
config: OmegaConf,
|
||||
device_type: torch.device = CPU_DEVICE,
|
||||
precision: str = "float16",
|
||||
max_loaded_models=DEFAULT_MAX_MODELS,
|
||||
sequential_offload=False,
|
||||
self,
|
||||
config: OmegaConf|Path,
|
||||
device_type: torch.device = CUDA_DEVICE,
|
||||
precision: str = "float16",
|
||||
max_loaded_models=DEFAULT_MAX_MODELS,
|
||||
sequential_offload=False,
|
||||
embedding_path: Path=None,
|
||||
):
|
||||
"""
|
||||
Initialize with the path to the models.yaml config file,
|
||||
the torch device type, and precision. The optional
|
||||
min_avail_mem argument specifies how much unused system
|
||||
(CPU) memory to preserve. The cache of models in RAM will
|
||||
grow until this value is approached. Default is 2G.
|
||||
Initialize with the path to the models.yaml config file or
|
||||
an initialized OmegaConf dictionary. Optional parameters
|
||||
are the torch device type, precision, max_loaded_models,
|
||||
and sequential_offload boolean. Note that the default device
|
||||
type and precision are set up for a CUDA system running at half precision.
|
||||
"""
|
||||
# prevent nasty-looking CLIP log message
|
||||
transformers.logging.set_verbosity_error()
|
||||
if not isinstance(config, DictConfig):
|
||||
config = OmegaConf.load(config)
|
||||
self.config = config
|
||||
self.precision = precision
|
||||
self.device = torch.device(device_type)
|
||||
@ -76,6 +81,7 @@ class ModelManager(object):
|
||||
self.stack = [] # this is an LRU FIFO
|
||||
self.current_model = None
|
||||
self.sequential_offload = sequential_offload
|
||||
self.embedding_path = embedding_path
|
||||
|
||||
def valid_model(self, model_name: str) -> bool:
|
||||
"""
|
||||
@ -84,12 +90,15 @@ class ModelManager(object):
|
||||
"""
|
||||
return model_name in self.config
|
||||
|
||||
def get_model(self, model_name: str):
|
||||
def get_model(self, model_name: str=None)->dict:
|
||||
"""
|
||||
Given a model named identified in models.yaml, return
|
||||
the model object. If in RAM will load into GPU VRAM.
|
||||
If on disk, will load from there.
|
||||
"""
|
||||
if not model_name:
|
||||
return self.get_model(self.current_model) if self.current_model else self.get_model(self.default_model())
|
||||
|
||||
if not self.valid_model(model_name):
|
||||
print(
|
||||
f'** "{model_name}" is not a known model name. Please check your models.yaml file'
|
||||
@ -104,7 +113,7 @@ class ModelManager(object):
|
||||
if model_name in self.models:
|
||||
requested_model = self.models[model_name]["model"]
|
||||
print(f">> Retrieving model {model_name} from system RAM cache")
|
||||
self.models[model_name]["model"] = self._model_from_cpu(requested_model)
|
||||
requested_model.ready()
|
||||
width = self.models[model_name]["width"]
|
||||
height = self.models[model_name]["height"]
|
||||
hash = self.models[model_name]["hash"]
|
||||
@ -112,6 +121,7 @@ class ModelManager(object):
|
||||
else: # we're about to load a new model, so potentially offload the least recently used one
|
||||
requested_model, width, height, hash = self._load_model(model_name)
|
||||
self.models[model_name] = {
|
||||
"model_name": model_name,
|
||||
"model": requested_model,
|
||||
"width": width,
|
||||
"height": height,
|
||||
@ -121,6 +131,7 @@ class ModelManager(object):
|
||||
self.current_model = model_name
|
||||
self._push_newest_model(model_name)
|
||||
return {
|
||||
"model_name": model_name,
|
||||
"model": requested_model,
|
||||
"width": width,
|
||||
"height": height,
|
||||
@ -425,6 +436,7 @@ class ModelManager(object):
|
||||
height = width
|
||||
|
||||
print(f" | Default image dimensions = {width} x {height}")
|
||||
self._add_embeddings_to_model(pipeline)
|
||||
|
||||
return pipeline, width, height, model_hash
|
||||
|
||||
@ -499,7 +511,7 @@ class ModelManager(object):
|
||||
|
||||
print(f">> Offloading {model_name} to CPU")
|
||||
model = self.models[model_name]["model"]
|
||||
self.models[model_name]["model"] = self._model_to_cpu(model)
|
||||
model.offload_all()
|
||||
|
||||
gc.collect()
|
||||
if self._has_cuda():
|
||||
@ -557,7 +569,7 @@ class ModelManager(object):
|
||||
"""
|
||||
model_name = model_name or Path(repo_or_path).stem
|
||||
model_description = (
|
||||
model_description or f"Imported diffusers model {model_name}"
|
||||
description or f"Imported diffusers model {model_name}"
|
||||
)
|
||||
new_config = dict(
|
||||
description=model_description,
|
||||
@ -1044,43 +1056,6 @@ class ModelManager(object):
|
||||
self.stack.remove(model_name)
|
||||
self.models.pop(model_name, None)
|
||||
|
||||
def _model_to_cpu(self, model):
|
||||
if self.device == CPU_DEVICE:
|
||||
return model
|
||||
|
||||
if isinstance(model, StableDiffusionGeneratorPipeline):
|
||||
model.offload_all()
|
||||
return model
|
||||
|
||||
model.cond_stage_model.device = CPU_DEVICE
|
||||
model.to(CPU_DEVICE)
|
||||
|
||||
for submodel in ("first_stage_model", "cond_stage_model", "model"):
|
||||
try:
|
||||
getattr(model, submodel).to(CPU_DEVICE)
|
||||
except AttributeError:
|
||||
pass
|
||||
return model
|
||||
|
||||
def _model_from_cpu(self, model):
|
||||
if self.device == CPU_DEVICE:
|
||||
return model
|
||||
|
||||
if isinstance(model, StableDiffusionGeneratorPipeline):
|
||||
model.ready()
|
||||
return model
|
||||
|
||||
model.to(self.device)
|
||||
model.cond_stage_model.device = self.device
|
||||
|
||||
for submodel in ("first_stage_model", "cond_stage_model", "model"):
|
||||
try:
|
||||
getattr(model, submodel).to(self.device)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return model
|
||||
|
||||
def _pop_oldest_model(self):
|
||||
"""
|
||||
Remove the first element of the FIFO, which ought
|
||||
@ -1098,6 +1073,19 @@ class ModelManager(object):
|
||||
self.stack.remove(model_name)
|
||||
self.stack.append(model_name)
|
||||
|
||||
def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline):
|
||||
if self.embedding_path is not None:
|
||||
print(f">> Loading embeddings from {self.embedding_path}")
|
||||
for root, _, files in os.walk(self.embedding_path):
|
||||
for name in files:
|
||||
ti_path = os.path.join(root, name)
|
||||
model.textual_inversion_manager.load_textual_inversion(
|
||||
ti_path, defer_injecting_tokens=True
|
||||
)
|
||||
print(
|
||||
f'>> Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}'
|
||||
)
|
||||
|
||||
def _has_cuda(self) -> bool:
|
||||
return self.device.type == "cuda"
|
||||
|
||||
|
@ -3,7 +3,6 @@ Initialization file for invokeai.backend.prompting
|
||||
"""
|
||||
from .conditioning import (
|
||||
get_prompt_structure,
|
||||
get_tokenizer,
|
||||
get_tokens_for_prompt_object,
|
||||
get_uc_and_c_and_ec,
|
||||
split_weighted_subprompts,
|
||||
|
@ -7,7 +7,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
|
||||
|
||||
"""
|
||||
import re
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from compel import Compel
|
||||
from compel.prompt_parser import (
|
||||
@ -17,7 +17,6 @@ from compel.prompt_parser import (
|
||||
Fragment,
|
||||
PromptParser,
|
||||
)
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from invokeai.backend.globals import Globals
|
||||
|
||||
@ -25,36 +24,6 @@ from ..stable_diffusion import InvokeAIDiffuserComponent
|
||||
from ..util import torch_dtype
|
||||
|
||||
|
||||
def get_tokenizer(model) -> CLIPTokenizer:
|
||||
# TODO remove legacy ckpt fallback handling
|
||||
return (
|
||||
getattr(model, "tokenizer", None) # diffusers
|
||||
or model.cond_stage_model.tokenizer
|
||||
) # ldm
|
||||
|
||||
|
||||
def get_text_encoder(model) -> Any:
|
||||
# TODO remove legacy ckpt fallback handling
|
||||
return getattr(
|
||||
model, "text_encoder", None
|
||||
) or UnsqueezingLDMTransformer( # diffusers
|
||||
model.cond_stage_model.transformer
|
||||
) # ldm
|
||||
|
||||
|
||||
class UnsqueezingLDMTransformer:
|
||||
def __init__(self, ldm_transformer):
|
||||
self.ldm_transformer = ldm_transformer
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
return self.ldm_transformer.device
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
insufficiently_unsqueezed_tensor = self.ldm_transformer(*args, **kwargs)
|
||||
return insufficiently_unsqueezed_tensor.unsqueeze(0)
|
||||
|
||||
|
||||
def get_uc_and_c_and_ec(
|
||||
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False
|
||||
):
|
||||
@ -64,11 +33,10 @@ def get_uc_and_c_and_ec(
|
||||
prompt_string
|
||||
)
|
||||
|
||||
tokenizer = get_tokenizer(model)
|
||||
text_encoder = get_text_encoder(model)
|
||||
tokenizer = model.tokenizer
|
||||
compel = Compel(
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
text_encoder=model.text_encoder,
|
||||
textual_inversion_manager=model.textual_inversion_manager,
|
||||
dtype_for_device_getter=torch_dtype,
|
||||
truncate_long_prompts=False
|
||||
|
82
invokeai/backend/safety_checker.py
Normal file
82
invokeai/backend/safety_checker.py
Normal file
@ -0,0 +1,82 @@
|
||||
'''
|
||||
SafetyChecker class - checks images against the StabilityAI NSFW filter
|
||||
and blurs images that contain potential NSFW content.
|
||||
'''
|
||||
import diffusers
|
||||
import numpy as np
|
||||
import torch
|
||||
import traceback
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||
StableDiffusionSafetyChecker,
|
||||
)
|
||||
from pathlib import Path
|
||||
from PIL import Image, ImageFilter
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.assets.web as web_assets
|
||||
from .globals import global_cache_dir
|
||||
from .util import CPU_DEVICE
|
||||
|
||||
class SafetyChecker(object):
|
||||
CAUTION_IMG = "caution.png"
|
||||
|
||||
def __init__(self, device: torch.device):
|
||||
path = Path(web_assets.__path__[0]) / self.CAUTION_IMG
|
||||
caution = Image.open(path)
|
||||
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
|
||||
self.device = device
|
||||
|
||||
try:
|
||||
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||
safety_model_path = global_cache_dir("hub")
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
||||
safety_model_id,
|
||||
local_files_only=True,
|
||||
cache_dir=safety_model_path,
|
||||
)
|
||||
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
safety_model_id,
|
||||
local_files_only=True,
|
||||
cache_dir=safety_model_path,
|
||||
)
|
||||
except Exception:
|
||||
print(
|
||||
"** An error was encountered while installing the safety checker:"
|
||||
)
|
||||
print(traceback.format_exc())
|
||||
|
||||
def check(self, image: Image.Image):
|
||||
"""
|
||||
Check provided image against the StabilityAI safety checker and return
|
||||
|
||||
"""
|
||||
|
||||
self.safety_checker.to(self.device)
|
||||
features = self.safety_feature_extractor([image], return_tensors="pt")
|
||||
features.to(self.device)
|
||||
|
||||
# unfortunately checker requires the numpy version, so we have to convert back
|
||||
x_image = np.array(image).astype(np.float32) / 255.0
|
||||
x_image = x_image[None].transpose(0, 3, 1, 2)
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
checked_image, has_nsfw_concept = self.safety_checker(
|
||||
images=x_image, clip_input=features.pixel_values
|
||||
)
|
||||
self.safety_checker.to(CPU_DEVICE) # offload
|
||||
if has_nsfw_concept[0]:
|
||||
print(
|
||||
"** An image with potential non-safe content has been detected. A blurred image will be returned. **"
|
||||
)
|
||||
return self.blur(image)
|
||||
else:
|
||||
return image
|
||||
|
||||
def blur(self, input):
|
||||
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
try:
|
||||
if caution := self.caution_img:
|
||||
blurry.paste(caution, (0, 0), caution)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return blurry
|
@ -54,16 +54,6 @@ class PipelineIntermediateState:
|
||||
attention_map_saver: Optional[AttentionMapSaver] = None
|
||||
|
||||
|
||||
# copied from configs/stable-diffusion/v1-inference.yaml
|
||||
_default_personalization_config_params = dict(
|
||||
placeholder_strings=["*"],
|
||||
initializer_wods=["sculpture"],
|
||||
per_image_tokens=False,
|
||||
num_vectors_per_token=1,
|
||||
progressive_words=False,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AddsMaskLatents:
|
||||
"""Add the channels required for inpainting model input.
|
||||
@ -175,7 +165,7 @@ def image_resized_to_grid_as_tensor(
|
||||
:param normalize: scale the range to [-1, 1] instead of [0, 1]
|
||||
:param multiple_of: resize the input so both dimensions are a multiple of this
|
||||
"""
|
||||
w, h = trim_to_multiple_of(*image.size)
|
||||
w, h = trim_to_multiple_of(*image.size, multiple_of=multiple_of)
|
||||
transformation = T.Compose(
|
||||
[
|
||||
T.Resize((h, w), T.InterpolationMode.LANCZOS),
|
||||
@ -290,10 +280,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
||||
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
||||
scheduler ([`SchedulerMixin`]):
|
||||
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
||||
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
||||
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
||||
safety_checker ([`StableDiffusionSafetyChecker`]):
|
||||
Classification module that estimates whether generated images could be considered offsensive or harmful.
|
||||
Classification module that estimates whether generated images could be considered offensive or harmful.
|
||||
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
||||
feature_extractor ([`CLIPFeatureExtractor`]):
|
||||
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
||||
@ -436,11 +426,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
"""
|
||||
Ready this pipeline's models.
|
||||
|
||||
i.e. pre-load them to the GPU if appropriate.
|
||||
i.e. preload them to the GPU if appropriate.
|
||||
"""
|
||||
self._model_group.ready()
|
||||
|
||||
def to(self, torch_device: Optional[Union[str, torch.device]] = None):
|
||||
def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings=False):
|
||||
# overridden method; types match the superclass.
|
||||
if torch_device is None:
|
||||
return self
|
||||
@ -917,20 +907,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
device=self._model_group.device_for(self.unet),
|
||||
)
|
||||
|
||||
@property
|
||||
def cond_stage_model(self):
|
||||
return self.embeddings_provider
|
||||
|
||||
@torch.inference_mode()
|
||||
def _tokenize(self, prompt: Union[str, List[str]]):
|
||||
return self.tokenizer(
|
||||
prompt,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
@property
|
||||
def channels(self) -> int:
|
||||
"""Compatible with DiffusionWrapper"""
|
||||
@ -942,11 +918,10 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
return super().decode_latents(latents)
|
||||
|
||||
def debug_latents(self, latents, msg):
|
||||
from invokeai.backend.image_util import debug_image
|
||||
with torch.inference_mode():
|
||||
from ldm.util import debug_image
|
||||
|
||||
decoded = self.numpy_to_pil(self.decode_latents(latents))
|
||||
for i, img in enumerate(decoded):
|
||||
debug_image(
|
||||
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
|
||||
)
|
||||
for i, img in enumerate(decoded):
|
||||
debug_image(
|
||||
img, f"latents {msg} {i+1}/{len(decoded)}", debug_status=True
|
||||
)
|
||||
|
@ -29,7 +29,6 @@ from ..image_util import PngWriter, retrieve_metadata
|
||||
from ...frontend.merge.merge_diffusers import merge_diffusion_models
|
||||
from ..prompting import (
|
||||
get_prompt_structure,
|
||||
get_tokenizer,
|
||||
get_tokens_for_prompt_object,
|
||||
)
|
||||
from ..stable_diffusion import PipelineIntermediateState
|
||||
@ -1274,7 +1273,7 @@ class InvokeAIWebServer:
|
||||
None
|
||||
if type(parsed_prompt) is Blend
|
||||
else get_tokens_for_prompt_object(
|
||||
get_tokenizer(self.generate.model), parsed_prompt
|
||||
self.generate.model.tokenizer, parsed_prompt
|
||||
)
|
||||
)
|
||||
attention_maps_image_base64_url = (
|
||||
|
@ -35,6 +35,7 @@ module.exports = {
|
||||
{ varsIgnorePattern: '^_', argsIgnorePattern: '^_' },
|
||||
],
|
||||
'prettier/prettier': ['error', { endOfLine: 'auto' }],
|
||||
'@typescript-eslint/ban-ts-comment': 'warn',
|
||||
},
|
||||
settings: {
|
||||
react: {
|
||||
|
40610
invokeai/frontend/web/dist/App-d3a51b95.mjs
vendored
40610
invokeai/frontend/web/dist/App-d3a51b95.mjs
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
1
invokeai/frontend/web/dist/assets/App-08e5c546.css
vendored
Normal file
1
invokeai/frontend/web/dist/assets/App-08e5c546.css
vendored
Normal file
@ -0,0 +1 @@
|
||||
.ltr-image-gallery-css-transition-enter{transform:translate(150%)}.ltr-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.ltr-image-gallery-css-transition-exit{transform:translate(0)}.ltr-image-gallery-css-transition-exit-active{transform:translate(150%);transition:all .12s ease-out}.rtl-image-gallery-css-transition-enter{transform:translate(-150%)}.rtl-image-gallery-css-transition-enter-active{transform:translate(0);transition:all .12s ease-out}.rtl-image-gallery-css-transition-exit{transform:translate(0)}.rtl-image-gallery-css-transition-exit-active{transform:translate(-150%);transition:all .12s ease-out}
|
188
invokeai/frontend/web/dist/assets/App-b40e839f.js
vendored
Normal file
188
invokeai/frontend/web/dist/assets/App-b40e839f.js
vendored
Normal file
File diff suppressed because one or more lines are too long
315
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-bd116415.js
vendored
Normal file
315
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-bd116415.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
invokeai/frontend/web/dist/assets/favicon-0d253ced.ico
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/favicon-0d253ced.ico
vendored
Normal file
Binary file not shown.
After (image error) Size: 116 KiB |
1
invokeai/frontend/web/dist/assets/index-5483945c.css
vendored
Normal file
1
invokeai/frontend/web/dist/assets/index-5483945c.css
vendored
Normal file
File diff suppressed because one or more lines are too long
115
invokeai/frontend/web/dist/assets/index-e1f916bd.js
vendored
Normal file
115
invokeai/frontend/web/dist/assets/index-e1f916bd.js
vendored
Normal file
File diff suppressed because one or more lines are too long
BIN
invokeai/frontend/web/dist/assets/inter-all-100-normal-2596a8cd.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-100-normal-2596a8cd.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-200-normal-34e907e6.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-200-normal-34e907e6.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-300-normal-e375e256.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-300-normal-e375e256.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-400-normal-f824029b.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-400-normal-f824029b.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-500-normal-94e08ad8.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-500-normal-94e08ad8.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-600-normal-ba29c057.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-600-normal-ba29c057.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-700-normal-9d318ccb.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-700-normal-9d318ccb.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-800-normal-ab496fbe.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-800-normal-ab496fbe.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-all-900-normal-50daf4f1.woff
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-all-900-normal-50daf4f1.woff
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-100-normal-9747741a.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-100-normal-9747741a.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-200-normal-87d2e1ba.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-200-normal-87d2e1ba.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-300-normal-cff08766.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-300-normal-cff08766.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-400-normal-e9493683.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-400-normal-e9493683.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-500-normal-f6bd191e.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-500-normal-f6bd191e.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-600-normal-9bc492f5.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-600-normal-9bc492f5.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-700-normal-f6c6dcaf.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-700-normal-f6c6dcaf.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-800-normal-82994ee8.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-800-normal-82994ee8.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-900-normal-768011c3.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-900-normal-768011c3.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-100-normal-a1f4d02d.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-100-normal-a1f4d02d.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-200-normal-82562199.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-200-normal-82562199.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-300-normal-66b2369e.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-300-normal-66b2369e.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-400-normal-f7666a51.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-400-normal-f7666a51.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-500-normal-8b5f6999.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-500-normal-8b5f6999.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-600-normal-2ea11f8c.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-600-normal-2ea11f8c.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-700-normal-b7bb121f.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-700-normal-b7bb121f.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-800-normal-f15d8f83.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-800-normal-f15d8f83.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-900-normal-b1c13874.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-900-normal-b1c13874.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-100-normal-a44b9fc9.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-100-normal-a44b9fc9.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-200-normal-9575e0f8.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-200-normal-9575e0f8.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-300-normal-d0749e19.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-300-normal-d0749e19.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-400-normal-2f2d421a.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-400-normal-2f2d421a.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-500-normal-ddbf6a70.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-500-normal-ddbf6a70.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-600-normal-4591e350.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-600-normal-4591e350.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-700-normal-9e078f49.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-700-normal-9e078f49.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-800-normal-fb5de277.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-800-normal-fb5de277.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-900-normal-ffa82654.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-900-normal-ffa82654.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-100-normal-71976b96.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-100-normal-71976b96.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-200-normal-45dafb12.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-200-normal-45dafb12.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-300-normal-09d21325.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-300-normal-09d21325.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-400-normal-d3e30cde.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-400-normal-d3e30cde.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-500-normal-528b79aa.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-500-normal-528b79aa.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-600-normal-c37a11b3.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-600-normal-c37a11b3.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-700-normal-22174f43.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-700-normal-22174f43.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-800-normal-bddb6f8e.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-800-normal-bddb6f8e.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-900-normal-bebcb6fc.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-greek-ext-900-normal-bebcb6fc.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-100-normal-61cac109.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-100-normal-61cac109.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-200-normal-74885a0c.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-200-normal-74885a0c.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-300-normal-6b2cee46.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-300-normal-6b2cee46.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-400-normal-0364d368.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-400-normal-0364d368.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-500-normal-d5333670.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-500-normal-d5333670.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-600-normal-048d136d.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-600-normal-048d136d.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-700-normal-ced2d8e0.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-700-normal-ced2d8e0.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-800-normal-a51ac27d.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-800-normal-a51ac27d.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-900-normal-f2db7f82.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-900-normal-f2db7f82.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-100-normal-d3be20b3.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-100-normal-d3be20b3.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-200-normal-4336e69d.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-200-normal-4336e69d.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-300-normal-34623012.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-300-normal-34623012.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-400-normal-64a98f58.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-400-normal-64a98f58.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-500-normal-4fba9ae6.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-500-normal-4fba9ae6.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-600-normal-cc23fe6f.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-600-normal-cc23fe6f.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-700-normal-1cc47d25.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-700-normal-1cc47d25.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-800-normal-b6167428.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-800-normal-b6167428.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-900-normal-3cff82a5.woff2
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/inter-latin-ext-900-normal-3cff82a5.woff2
vendored
Normal file
Binary file not shown.
BIN
invokeai/frontend/web/dist/assets/logo-13003d72.png
vendored
Normal file
BIN
invokeai/frontend/web/dist/assets/logo-13003d72.png
vendored
Normal file
Binary file not shown.
After ![]() (image error) Size: 43 KiB |
9
invokeai/frontend/web/dist/assets/storeHooks-548a355c.js
vendored
Normal file
9
invokeai/frontend/web/dist/assets/storeHooks-548a355c.js
vendored
Normal file
File diff suppressed because one or more lines are too long
25336
invokeai/frontend/web/dist/component-6919b100.mjs
vendored
25336
invokeai/frontend/web/dist/component-6919b100.mjs
vendored
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user