Merge branch 'main' into refactor/model-manager-3

This commit is contained in:
Lincoln Stein 2023-12-04 17:06:36 -05:00 committed by GitHub
commit f73b678aae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 198 additions and 82 deletions

View File

@ -120,7 +120,7 @@ Generate an image with a given prompt, record the seed of the image, and then
use the `prompt2prompt` syntax to substitute words in the original prompt for
words in a new prompt. This works for `img2img` as well.
For example, consider the prompt `a cat.swap(dog) playing with a ball in the forest`. Normally, because of the word words interact with each other when doing a stable diffusion image generation, these two prompts would generate different compositions:
For example, consider the prompt `a cat.swap(dog) playing with a ball in the forest`. Normally, because the words interact with each other when doing a stable diffusion image generation, these two prompts would generate different compositions:
- `a cat playing with a ball in the forest`
- `a dog playing with a ball in the forest`

View File

@ -1,7 +1,11 @@
import typing
from enum import Enum
from importlib.metadata import PackageNotFoundError, version
from pathlib import Path
from platform import python_version
from typing import Optional
import torch
from fastapi import Body
from fastapi.routing import APIRouter
from pydantic import BaseModel, Field
@ -40,6 +44,24 @@ class AppVersion(BaseModel):
version: str = Field(description="App version")
class AppDependencyVersions(BaseModel):
"""App depencency Versions Response"""
accelerate: str = Field(description="accelerate version")
compel: str = Field(description="compel version")
cuda: Optional[str] = Field(description="CUDA version")
diffusers: str = Field(description="diffusers version")
numpy: str = Field(description="Numpy version")
opencv: str = Field(description="OpenCV version")
onnx: str = Field(description="ONNX version")
pillow: str = Field(description="Pillow (PIL) version")
python: str = Field(description="Python version")
torch: str = Field(description="PyTorch version")
torchvision: str = Field(description="PyTorch Vision version")
transformers: str = Field(description="transformers version")
xformers: Optional[str] = Field(description="xformers version")
class AppConfig(BaseModel):
"""App Config Response"""
@ -54,6 +76,29 @@ async def get_version() -> AppVersion:
return AppVersion(version=__version__)
@app_router.get("/app_deps", operation_id="get_app_deps", status_code=200, response_model=AppDependencyVersions)
async def get_app_deps() -> AppDependencyVersions:
try:
xformers = version("xformers")
except PackageNotFoundError:
xformers = None
return AppDependencyVersions(
accelerate=version("accelerate"),
compel=version("compel"),
cuda=torch.version.cuda,
diffusers=version("diffusers"),
numpy=version("numpy"),
opencv=version("opencv-python"),
onnx=version("onnx"),
pillow=version("pillow"),
python=python_version(),
torch=torch.version.__version__,
torchvision=version("torchvision"),
transformers=version("transformers"),
xformers=xformers,
)
@app_router.get("/config", operation_id="get_config", status_code=200, response_model=AppConfig)
async def get_config() -> AppConfig:
infill_methods = ["tile", "lama", "cv2"]

View File

@ -5,6 +5,8 @@ from typing import Union
import torch
from invokeai.app.services.invoker import Invoker
from .latents_storage_base import LatentsStorageBase
@ -17,6 +19,10 @@ class DiskLatentsStorage(LatentsStorageBase):
self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder)
self.__output_folder.mkdir(parents=True, exist_ok=True)
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
self._delete_all_latents()
def get(self, name: str) -> torch.Tensor:
latent_path = self.get_path(name)
return torch.load(latent_path)
@ -32,3 +38,21 @@ class DiskLatentsStorage(LatentsStorageBase):
def get_path(self, name: str) -> Path:
return self.__output_folder / name
def _delete_all_latents(self) -> None:
"""
Deletes all latents from disk.
Must be called after we have access to `self._invoker` (e.g. in `start()`).
"""
deleted_latents_count = 0
freed_space = 0
for latents_file in Path(self.__output_folder).glob("*"):
if latents_file.is_file():
freed_space += latents_file.stat().st_size
deleted_latents_count += 1
latents_file.unlink()
if deleted_latents_count > 0:
freed_space_in_mb = round(freed_space / 1024 / 1024, 2)
self._invoker.services.logger.info(
f"Deleted {deleted_latents_count} latents files (freed {freed_space_in_mb}MB)"
)

View File

@ -5,6 +5,8 @@ from typing import Dict, Optional
import torch
from invokeai.app.services.invoker import Invoker
from .latents_storage_base import LatentsStorageBase
@ -23,6 +25,18 @@ class ForwardCacheLatentsStorage(LatentsStorageBase):
self.__cache_ids = Queue()
self.__max_cache_size = max_cache_size
def start(self, invoker: Invoker) -> None:
self._invoker = invoker
start_op = getattr(self.__underlying_storage, "start", None)
if callable(start_op):
start_op(invoker)
def stop(self, invoker: Invoker) -> None:
self._invoker = invoker
stop_op = getattr(self.__underlying_storage, "stop", None)
if callable(stop_op):
stop_op(invoker)
def get(self, name: str) -> torch.Tensor:
cache_item = self.__get_cache(name)
if cache_item is not None:

View File

@ -42,7 +42,8 @@ class SqliteSessionQueue(SessionQueueBase):
self._set_in_progress_to_canceled()
prune_result = self.prune(DEFAULT_QUEUE_ID)
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_session_event)
self.__invoker.services.logger.info(f"Pruned {prune_result.deleted} finished queue items")
if prune_result.deleted > 0:
self.__invoker.services.logger.info(f"Pruned {prune_result.deleted} finished queue items")
def __init__(self, db: SqliteDatabase) -> None:
super().__init__()

View File

@ -207,10 +207,12 @@ class IterateInvocationOutput(BaseInvocationOutput):
item: Any = OutputField(
description="The item being iterated over", title="Collection Item", ui_type=UIType._CollectionItem
)
index: int = OutputField(description="The index of the item", title="Index")
total: int = OutputField(description="The total number of items", title="Total")
# TODO: Fill this out and move to invocations
@invocation("iterate", version="1.0.0")
@invocation("iterate", version="1.1.0")
class IterateInvocation(BaseInvocation):
"""Iterates over a list of items"""
@ -221,7 +223,7 @@ class IterateInvocation(BaseInvocation):
def invoke(self, context: InvocationContext) -> IterateInvocationOutput:
"""Produces the outputs as values"""
return IterateInvocationOutput(item=self.collection[self.index])
return IterateInvocationOutput(item=self.collection[self.index], index=self.index, total=len(self.collection))
@invocation_output("collect_output")

View File

@ -1,6 +1,7 @@
import sqlite3
import threading
from logging import Logger
from pathlib import Path
from invokeai.app.services.config import InvokeAIAppConfig
@ -8,25 +9,20 @@ sqlite_memory = ":memory:"
class SqliteDatabase:
conn: sqlite3.Connection
lock: threading.RLock
_logger: Logger
_config: InvokeAIAppConfig
def __init__(self, config: InvokeAIAppConfig, logger: Logger):
self._logger = logger
self._config = config
if self._config.use_memory_db:
location = sqlite_memory
self.db_path = sqlite_memory
logger.info("Using in-memory database")
else:
db_path = self._config.db_path
db_path.parent.mkdir(parents=True, exist_ok=True)
location = str(db_path)
self._logger.info(f"Using database at {location}")
self.db_path = str(db_path)
self._logger.info(f"Using database at {self.db_path}")
self.conn = sqlite3.connect(location, check_same_thread=False)
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
self.lock = threading.RLock()
self.conn.row_factory = sqlite3.Row
@ -37,10 +33,16 @@ class SqliteDatabase:
def clean(self) -> None:
try:
if self.db_path == sqlite_memory:
return
initial_db_size = Path(self.db_path).stat().st_size
self.lock.acquire()
self.conn.execute("VACUUM;")
self.conn.commit()
self._logger.info("Cleaned database")
final_db_size = Path(self.db_path).stat().st_size
freed_space_in_mb = round((initial_db_size - final_db_size) / 1024 / 1024, 2)
if freed_space_in_mb > 0:
self._logger.info(f"Cleaned database (freed {freed_space_in_mb}MB)")
except Exception as e:
self._logger.error(f"Error cleaning database: {e}")
raise e

View File

@ -91,7 +91,19 @@
"controlNet": "ControlNet",
"auto": "Automatico",
"simple": "Semplice",
"details": "Dettagli"
"details": "Dettagli",
"format": "formato",
"unknown": "Sconosciuto",
"folder": "Cartella",
"error": "Errore",
"installed": "Installato",
"template": "Schema",
"outputs": "Uscite",
"data": "Dati",
"somethingWentWrong": "Qualcosa è andato storto",
"copyError": "$t(gallery.copy) Errore",
"input": "Ingresso",
"notInstalled": "Non $t(common.installed)"
},
"gallery": {
"generations": "Generazioni",
@ -122,7 +134,14 @@
"preparingDownload": "Preparazione del download",
"preparingDownloadFailed": "Problema durante la preparazione del download",
"downloadSelection": "Scarica gli elementi selezionati",
"noImageSelected": "Nessuna immagine selezionata"
"noImageSelected": "Nessuna immagine selezionata",
"deleteSelection": "Elimina la selezione",
"image": "immagine",
"drop": "Rilascia",
"unstarImage": "Rimuovi preferenza immagine",
"dropOrUpload": "$t(gallery.drop) o carica",
"starImage": "Immagine preferita",
"dropToUpload": "$t(gallery.drop) per aggiornare"
},
"hotkeys": {
"keyboardShortcuts": "Tasti rapidi",
@ -477,7 +496,8 @@
"modelType": "Tipo di modello",
"customConfigFileLocation": "Posizione del file di configurazione personalizzato",
"vaePrecision": "Precisione VAE",
"noModelSelected": "Nessun modello selezionato"
"noModelSelected": "Nessun modello selezionato",
"conversionNotSupported": "Conversione non supportata"
},
"parameters": {
"images": "Immagini",
@ -838,7 +858,8 @@
"menu": "Menu",
"showGalleryPanel": "Mostra il pannello Galleria",
"loadMore": "Carica altro",
"mode": "Modalità"
"mode": "Modalità",
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente"
},
"ui": {
"hideProgressImages": "Nascondi avanzamento immagini",
@ -1040,7 +1061,15 @@
"updateAllNodes": "Aggiorna tutti i nodi",
"unableToUpdateNodes_one": "Impossibile aggiornare {{count}} nodo",
"unableToUpdateNodes_many": "Impossibile aggiornare {{count}} nodi",
"unableToUpdateNodes_other": "Impossibile aggiornare {{count}} nodi"
"unableToUpdateNodes_other": "Impossibile aggiornare {{count}} nodi",
"addLinearView": "Aggiungi alla vista Lineare",
"outputFieldInInput": "Campo di uscita in ingresso",
"unableToMigrateWorkflow": "Impossibile migrare il flusso di lavoro",
"unableToUpdateNode": "Impossibile aggiornare nodo",
"unknownErrorValidatingWorkflow": "Errore sconosciuto durante la convalida del flusso di lavoro",
"collectionFieldType": "{{name}} Raccolta",
"collectionOrScalarFieldType": "{{name}} Raccolta|Scalare",
"nodeVersion": "Versione Nodo"
},
"boards": {
"autoAddBoard": "Aggiungi automaticamente bacheca",
@ -1062,7 +1091,10 @@
"deleteBoardOnly": "Elimina solo la Bacheca",
"deleteBoard": "Elimina Bacheca",
"deleteBoardAndImages": "Elimina Bacheca e Immagini",
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate"
"deletedBoardsCannotbeRestored": "Le bacheche eliminate non possono essere ripristinate",
"movingImagesToBoard_one": "Spostare {{count}} immagine nella bacheca:",
"movingImagesToBoard_many": "Spostare {{count}} immagini nella bacheca:",
"movingImagesToBoard_other": "Spostare {{count}} immagini nella bacheca:"
},
"controlnet": {
"contentShuffleDescription": "Rimescola il contenuto di un'immagine",
@ -1136,7 +1168,8 @@
"megaControl": "Mega ControlNet",
"minConfidence": "Confidenza minima",
"scribble": "Scribble",
"amult": "Angolo di illuminazione"
"amult": "Angolo di illuminazione",
"coarse": "Approssimativo"
},
"queue": {
"queueFront": "Aggiungi all'inizio della coda",
@ -1204,7 +1237,8 @@
"embedding": {
"noMatchingEmbedding": "Nessun Incorporamento corrispondente",
"addEmbedding": "Aggiungi Incorporamento",
"incompatibleModel": "Modello base incompatibile:"
"incompatibleModel": "Modello base incompatibile:",
"noEmbeddingsLoaded": "Nessun incorporamento caricato"
},
"models": {
"noMatchingModels": "Nessun modello corrispondente",
@ -1217,7 +1251,8 @@
"noRefinerModelsInstalled": "Nessun modello SDXL Refiner installato",
"noLoRAsInstalled": "Nessun LoRA installato",
"esrganModel": "Modello ESRGAN",
"addLora": "Aggiungi LoRA"
"addLora": "Aggiungi LoRA",
"noLoRAsLoaded": "Nessuna LoRA caricata"
},
"invocationCache": {
"disable": "Disabilita",
@ -1233,7 +1268,8 @@
"enable": "Abilita",
"clear": "Svuota",
"maxCacheSize": "Dimensione max cache",
"cacheSize": "Dimensione cache"
"cacheSize": "Dimensione cache",
"useCache": "Usa Cache"
},
"dynamicPrompts": {
"seedBehaviour": {

View File

@ -3,8 +3,8 @@ import { $authToken } from 'app/store/nanostores/authToken';
import { $baseUrl } from 'app/store/nanostores/baseUrl';
import { $isDebugging } from 'app/store/nanostores/isDebugging';
import { useAppDispatch } from 'app/store/storeHooks';
import { MapStore, WritableAtom, atom, map } from 'nanostores';
import { useEffect } from 'react';
import { MapStore, atom, map } from 'nanostores';
import { useEffect, useMemo } from 'react';
import {
ClientToServerEvents,
ServerToClientEvents,
@ -16,57 +16,10 @@ import { ManagerOptions, Socket, SocketOptions, io } from 'socket.io-client';
declare global {
interface Window {
$socketOptions?: MapStore<Partial<ManagerOptions & SocketOptions>>;
$socketUrl?: WritableAtom<string>;
}
}
const makeSocketOptions = (): Partial<ManagerOptions & SocketOptions> => {
const socketOptions: Parameters<typeof io>[0] = {
timeout: 60000,
path: '/ws/socket.io',
autoConnect: false, // achtung! removing this breaks the dynamic middleware
forceNew: true,
};
// if building in package mode, replace socket url with open api base url minus the http protocol
if (['nodes', 'package'].includes(import.meta.env.MODE)) {
const authToken = $authToken.get();
if (authToken) {
// TODO: handle providing jwt to socket.io
socketOptions.auth = { token: authToken };
}
socketOptions.transports = ['websocket', 'polling'];
}
return socketOptions;
};
const makeSocketUrl = (): string => {
const wsProtocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
let socketUrl = `${wsProtocol}://${window.location.host}`;
if (['nodes', 'package'].includes(import.meta.env.MODE)) {
const baseUrl = $baseUrl.get();
if (baseUrl) {
//eslint-disable-next-line
socketUrl = baseUrl.replace(/^https?\:\/\//i, '');
}
}
return socketUrl;
};
const makeSocket = (): Socket<ServerToClientEvents, ClientToServerEvents> => {
const socketOptions = makeSocketOptions();
const socketUrl = $socketUrl.get();
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(
socketUrl,
{ ...socketOptions, ...$socketOptions.get() }
);
return socket;
};
export const $socketOptions = map<Partial<ManagerOptions & SocketOptions>>({});
export const $socketUrl = atom<string>(makeSocketUrl());
export const $isSocketInitialized = atom<boolean>(false);
/**
@ -74,23 +27,50 @@ export const $isSocketInitialized = atom<boolean>(false);
*/
export const useSocketIO = () => {
const dispatch = useAppDispatch();
const socketOptions = useStore($socketOptions);
const socketUrl = useStore($socketUrl);
const baseUrl = useStore($baseUrl);
const authToken = useStore($authToken);
const addlSocketOptions = useStore($socketOptions);
const socketUrl = useMemo(() => {
const wsProtocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
if (baseUrl) {
return baseUrl.replace(/^https?:\/\//i, '');
}
return `${wsProtocol}://${window.location.host}`;
}, [baseUrl]);
const socketOptions = useMemo(() => {
const options: Parameters<typeof io>[0] = {
timeout: 60000,
path: '/ws/socket.io',
autoConnect: false, // achtung! removing this breaks the dynamic middleware
forceNew: true,
};
if (authToken) {
options.auth = { token: authToken };
options.transports = ['websocket', 'polling'];
}
return { ...options, ...addlSocketOptions };
}, [authToken, addlSocketOptions]);
useEffect(() => {
if ($isSocketInitialized.get()) {
// Singleton!
return;
}
const socket = makeSocket();
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(
socketUrl,
socketOptions
);
setEventListeners({ dispatch, socket });
socket.connect();
if ($isDebugging.get()) {
window.$socketOptions = $socketOptions;
window.$socketUrl = $socketUrl;
console.log('Socket initialized', socket);
}
@ -99,11 +79,10 @@ export const useSocketIO = () => {
return () => {
if ($isDebugging.get()) {
window.$socketOptions = undefined;
window.$socketUrl = undefined;
console.log('Socket teardown', socket);
}
socket.disconnect();
$isSocketInitialized.set(false);
};
}, [dispatch, socketOptions, socketUrl, baseUrl, authToken]);
}, [dispatch, socketOptions, socketUrl]);
};

View File

@ -73,7 +73,13 @@ const BoardContextMenu = ({
addToast({
title: t('gallery.preparingDownload'),
status: 'success',
...(response.response ? { description: response.response } : {}),
...(response.response
? {
description: response.response,
duration: null,
isClosable: true,
}
: {}),
})
);
} catch {

View File

@ -59,7 +59,13 @@ const MultipleSelectionMenuItems = () => {
addToast({
title: t('gallery.preparingDownload'),
status: 'success',
...(response.response ? { description: response.response } : {}),
...(response.response
? {
description: response.response,
duration: null,
isClosable: true,
}
: {}),
})
);
} catch {

View File

@ -54,7 +54,8 @@ dependencies = [
"invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids
"matplotlib", # needed for plotting of Penner easing functions
"mediapipe", # needed for "mediapipeface" controlnet model
"numpy",
# Minimum numpy version of 1.24.0 is needed to use the 'strict' argument to np.testing.assert_array_equal().
"numpy>=1.24.0",
"npyscreen",
"omegaconf",
"onnx",