Merge branch 'main' into feat/use-custom-vaes

This commit is contained in:
Lincoln Stein 2023-03-24 17:45:02 -04:00 committed by GitHub
commit 9ceec40b76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 219 additions and 95 deletions

View File

@ -1,6 +0,0 @@
[run]
omit='.env/*'
source='.'
[report]
show_missing = true

View File

@ -6,7 +6,6 @@ on:
- '!pyproject.toml' - '!pyproject.toml'
- '!invokeai/**' - '!invokeai/**'
- 'invokeai/frontend/web/**' - 'invokeai/frontend/web/**'
- '!invokeai/frontend/web/dist/**'
merge_group: merge_group:
workflow_dispatch: workflow_dispatch:

View File

@ -7,13 +7,11 @@ on:
- 'pyproject.toml' - 'pyproject.toml'
- 'invokeai/**' - 'invokeai/**'
- '!invokeai/frontend/web/**' - '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
pull_request: pull_request:
paths: paths:
- 'pyproject.toml' - 'pyproject.toml'
- 'invokeai/**' - 'invokeai/**'
- '!invokeai/frontend/web/**' - '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
types: types:
- 'ready_for_review' - 'ready_for_review'
- 'opened' - 'opened'

2
.gitignore vendored
View File

@ -63,6 +63,7 @@ pip-delete-this-directory.txt
htmlcov/ htmlcov/
.tox/ .tox/
.nox/ .nox/
.coveragerc
.coverage .coverage
.coverage.* .coverage.*
.cache .cache
@ -73,6 +74,7 @@ cov.xml
*.py,cover *.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/ .pytest_cache/
.pytest.ini
cover/ cover/
junit/ junit/

View File

@ -1,5 +0,0 @@
[pytest]
DJANGO_SETTINGS_MODULE = webtas.settings
; python_files = tests.py test_*.py *_tests.py
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml

4
coverage/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 457 KiB

View File

@ -0,0 +1,83 @@
# Local Development
If you are looking to contribute you will need to have a local development
environment. See the
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
full details.
Broadly this involves cloning the repository, installing the pre-reqs, and
InvokeAI (in editable form). Assuming this is working, choose your area of
focus.
## Documentation
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
written in markdown files under the `./docs` folder and then built into a static
website for hosting with GitHub Pages at
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
To contribute to the documentation you'll need to install the dependencies. Note
the use of `"`.
```zsh
pip install ".[docs]"
```
Now, to run the documentation locally with hot-reloading for changes made.
```zsh
mkdocs serve
```
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
access.
## Backend
The backend is contained within the `./invokeai/backend` folder structure. To
get started however please install the development dependencies.
From the root of the repository run the following command. Note the use of `"`.
```zsh
pip install ".[test]"
```
This in an optional group of packages which is defined within the
`pyproject.toml` and will be required for testing the changes you make the the
code.
### Running Tests
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
be found under the `./tests` folder and can be run with a single `pytest`
command. Optionally, to review test coverage you can append `--cov`.
```zsh
pytest --cov
```
Test outcomes and coverage will be reported in the terminal. In addition a more
detailed report is created in both XML and HTML format in the `./coverage`
folder. The HTML one in particular can help identify missing statements
requiring tests to ensure coverage. This can be run by opening
`./coverage/html/index.html`.
For example.
```zsh
pytest --cov; open ./coverage/html/index.html
```
??? info "HTML coverage report output"
![html-overview](../assets/contributing/html-overview.png)
![html-detail](../assets/contributing/html-detail.png)
## Front End
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
--8<-- "invokeai/frontend/web/README.md"

View File

@ -154,6 +154,7 @@ class InvokeAIGenerator(metaclass=ABCMeta):
for i in iteration_count: for i in iteration_count:
results = generator.generate(prompt, results = generator.generate(prompt,
conditioning=(uc, c, extra_conditioning_info), conditioning=(uc, c, extra_conditioning_info),
step_callback=step_callback,
sampler=scheduler, sampler=scheduler,
**generator_args, **generator_args,
) )

View File

@ -378,16 +378,26 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
for key in keys: for key in keys:
if key.startswith("model.diffusion_model"): if key.startswith("model.diffusion_model"):
flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
flat_ema_key_alt = "model_ema." + "".join(key.split(".")[2:])
if flat_ema_key in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop( unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key flat_ema_key
) )
elif flat_ema_key_alt in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key_alt
)
else:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
key
)
else: else:
print( print(
" | Extracting only the non-EMA weights (usually better for fine-tuning)" " | Extracting only the non-EMA weights (usually better for fine-tuning)"
) )
for key in keys: for key in keys:
if key.startswith(unet_key): if key.startswith("model.diffusion_model") and key in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
new_checkpoint = {} new_checkpoint = {}
@ -1050,6 +1060,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
vae_path: str = None, vae_path: str = None,
precision: torch.dtype = torch.float32, precision: torch.dtype = torch.float32,
return_generator_pipeline: bool = False, return_generator_pipeline: bool = False,
scan_needed:bool=True,
) -> Union[StableDiffusionPipeline, StableDiffusionGeneratorPipeline]: ) -> Union[StableDiffusionPipeline, StableDiffusionGeneratorPipeline]:
""" """
Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
@ -1086,12 +1097,13 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
verbosity = dlogging.get_verbosity() verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error() dlogging.set_verbosity_error()
checkpoint = ( if Path(checkpoint_path).suffix == '.ckpt':
torch.load(checkpoint_path) if scan_needed:
if Path(checkpoint_path).suffix == ".ckpt" ModelManager.scan_model(checkpoint_path,checkpoint_path)
else load_file(checkpoint_path) checkpoint = torch.load(checkpoint_path)
else:
checkpoint = load_file(checkpoint_path)
)
cache_dir = global_cache_dir("hub") cache_dir = global_cache_dir("hub")
pipeline_class = ( pipeline_class = (
StableDiffusionGeneratorPipeline StableDiffusionGeneratorPipeline

View File

@ -34,7 +34,7 @@ from picklescan.scanner import scan_file_path
from invokeai.backend.globals import Globals, global_cache_dir from invokeai.backend.globals import Globals, global_cache_dir
from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume from ..util import CUDA_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum): class SDLegacyType(Enum):
V1 = 1 V1 = 1
@ -282,13 +282,13 @@ class ModelManager(object):
self.stack.remove(model_name) self.stack.remove(model_name)
if delete_files: if delete_files:
if weights: if weights:
print(f"** deleting file {weights}") print(f"** Deleting file {weights}")
Path(weights).unlink(missing_ok=True) Path(weights).unlink(missing_ok=True)
elif path: elif path:
print(f"** deleting directory {path}") print(f"** Deleting directory {path}")
rmtree(path, ignore_errors=True) rmtree(path, ignore_errors=True)
elif repo_id: elif repo_id:
print(f"** deleting the cached model directory for {repo_id}") print(f"** Deleting the cached model directory for {repo_id}")
self._delete_model_from_cache(repo_id) self._delete_model_from_cache(repo_id)
def add_model( def add_model(
@ -359,6 +359,7 @@ class ModelManager(object):
raise NotImplementedError( raise NotImplementedError(
f"Unknown model format {model_name}: {model_format}" f"Unknown model format {model_name}: {model_format}"
) )
self._add_embeddings_to_model(model)
# usage statistics # usage statistics
toc = time.time() toc = time.time()
@ -431,10 +432,9 @@ class ModelManager(object):
# square images??? # square images???
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
height = width height = width
print(f" | Default image dimensions = {width} x {height}") print(f" | Default image dimensions = {width} x {height}")
self._add_embeddings_to_model(pipeline)
self._add_embeddings_to_model(pipeline)
return pipeline, width, height, model_hash return pipeline, width, height, model_hash
def _load_ckpt_model(self, model_name, mconfig): def _load_ckpt_model(self, model_name, mconfig):
@ -521,13 +521,14 @@ class ModelManager(object):
if self._has_cuda(): if self._has_cuda():
torch.cuda.empty_cache() torch.cuda.empty_cache()
@classmethod
def scan_model(self, model_name, checkpoint): def scan_model(self, model_name, checkpoint):
""" """
v Apply picklescanner to the indicated checkpoint and issue a warning Apply picklescanner to the indicated checkpoint and issue a warning
and option to exit if an infected file is identified. and option to exit if an infected file is identified.
""" """
# scan model # scan model
print(f">> Scanning Model: {model_name}") print(f" | Scanning Model: {model_name}")
scan_result = scan_file_path(checkpoint) scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0: if scan_result.infected_files != 0:
if scan_result.infected_files == 1: if scan_result.infected_files == 1:
@ -550,7 +551,7 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
print("### Exiting InvokeAI") print("### Exiting InvokeAI")
sys.exit() sys.exit()
else: else:
print(">> Model scanned ok") print(" | Model scanned ok")
def import_diffuser_model( def import_diffuser_model(
self, self,
@ -735,11 +736,12 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
return model_path.stem return model_path.stem
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
checkpoint = ( checkpoint = None
torch.load(model_path) if model_path.suffix.endswith((".ckpt",".pt")):
if model_path.suffix == ".ckpt" self.scan_model(model_path,model_path)
else safetensors.torch.load_file(model_path) checkpoint = torch.load(model_path)
) else:
checkpoint = safetensors.torch.load_file(model_path)
# additional probing needed if no config file provided # additional probing needed if no config file provided
if model_config_file is None: if model_config_file is None:
@ -792,6 +794,7 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
model_description=description, model_description=description,
original_config_file=model_config_file, original_config_file=model_config_file,
commit_to_conf=commit_to_conf, commit_to_conf=commit_to_conf,
scan_needed=False,
) )
return model_name return model_name
@ -805,6 +808,7 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
vae_path:Path=None, vae_path:Path=None,
original_config_file: Path = None, original_config_file: Path = None,
commit_to_conf: Path = None, commit_to_conf: Path = None,
scan_needed: bool=True,
) -> str: ) -> str:
""" """
Convert a legacy ckpt weights file to diffuser model and import Convert a legacy ckpt weights file to diffuser model and import
@ -843,6 +847,7 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
original_config_file=original_config_file, original_config_file=original_config_file,
vae=vae_model, vae=vae_model,
vae_path=vae_path, vae_path=vae_path,
scan_needed=scan_needed,
) )
print( print(
f" | Success. Optimized model is now located at {str(diffusers_path)}" f" | Success. Optimized model is now located at {str(diffusers_path)}"
@ -858,7 +863,7 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
self.add_model(model_name, new_config, True) self.add_model(model_name, new_config, True)
if commit_to_conf: if commit_to_conf:
self.commit(commit_to_conf) self.commit(commit_to_conf)
print(">> Conversion succeeded") print(" | Conversion succeeded")
except Exception as e: except Exception as e:
print(f"** Conversion failed: {str(e)}") print(f"** Conversion failed: {str(e)}")
print( print(
@ -1187,7 +1192,7 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
hashes_to_delete.add(revision.commit_hash) hashes_to_delete.add(revision.commit_hash)
strategy = cache_info.delete_revisions(*hashes_to_delete) strategy = cache_info.delete_revisions(*hashes_to_delete)
print( print(
f"** deletion of this model is expected to free {strategy.expected_freed_size_str}" f"** Deletion of this model is expected to free {strategy.expected_freed_size_str}"
) )
strategy.execute() strategy.execute()

View File

@ -6,7 +6,6 @@ The interface is through the Concepts() object.
""" """
import os import os
import re import re
import traceback
from typing import Callable from typing import Callable
from urllib import error as ul_error from urllib import error as ul_error
from urllib import request from urllib import request
@ -15,7 +14,6 @@ from huggingface_hub import (
HfApi, HfApi,
HfFolder, HfFolder,
ModelFilter, ModelFilter,
ModelSearchArguments,
hf_hub_url, hf_hub_url,
) )
@ -84,7 +82,7 @@ class HuggingFaceConceptsLibrary(object):
""" """
if not concept_name in self.list_concepts(): if not concept_name in self.list_concepts():
print( print(
f"This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept." f"{concept_name} is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept."
) )
return None return None
return self.get_concept_file(concept_name.lower(), "learned_embeds.bin") return self.get_concept_file(concept_name.lower(), "learned_embeds.bin")
@ -236,7 +234,7 @@ class HuggingFaceConceptsLibrary(object):
except ul_error.HTTPError as e: except ul_error.HTTPError as e:
if e.code == 404: if e.code == 404:
print( print(
f"This concept is not known to the Hugging Face library. Generation will continue without the concept." f"Concept {concept_name} is not known to the Hugging Face library. Generation will continue without the concept."
) )
else: else:
print( print(
@ -246,7 +244,7 @@ class HuggingFaceConceptsLibrary(object):
return False return False
except ul_error.URLError as e: except ul_error.URLError as e:
print( print(
f"ERROR: {str(e)}. This may reflect a network issue. Generation will continue without the concept." f"ERROR while downloading {concept_name}: {str(e)}. This may reflect a network issue. Generation will continue without the concept."
) )
os.rmdir(dest) os.rmdir(dest)
return False return False

View File

@ -8,7 +8,6 @@
"darkTheme": "داكن", "darkTheme": "داكن",
"lightTheme": "فاتح", "lightTheme": "فاتح",
"greenTheme": "أخضر", "greenTheme": "أخضر",
"text2img": "نص إلى صورة",
"img2img": "صورة إلى صورة", "img2img": "صورة إلى صورة",
"unifiedCanvas": "لوحة موحدة", "unifiedCanvas": "لوحة موحدة",
"nodes": "عقد", "nodes": "عقد",

View File

@ -7,7 +7,6 @@
"darkTheme": "Dunkel", "darkTheme": "Dunkel",
"lightTheme": "Hell", "lightTheme": "Hell",
"greenTheme": "Grün", "greenTheme": "Grün",
"text2img": "Text zu Bild",
"img2img": "Bild zu Bild", "img2img": "Bild zu Bild",
"nodes": "Knoten", "nodes": "Knoten",
"langGerman": "Deutsch", "langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Oscuro", "darkTheme": "Oscuro",
"lightTheme": "Claro", "lightTheme": "Claro",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Texto a Imagen",
"img2img": "Imagen a Imagen", "img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado", "unifiedCanvas": "Lienzo Unificado",
"nodes": "Nodos", "nodes": "Nodos",
@ -70,7 +69,11 @@
"langHebrew": "Hebreo", "langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones", "pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando", "loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA" "loadingInvokeAI": "Cargando invocar a la IA",
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -404,7 +407,8 @@
"none": "ninguno", "none": "ninguno",
"pickModelType": "Elige el tipo de modelo", "pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)", "v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia" "addDifference": "Añadir una diferencia",
"scanForModels": "Buscar modelos"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Guardar automáticamente en galería", "autoSaveToGallery": "Guardar automáticamente en galería",
"saveBoxRegionOnly": "Guardar solo región dentro de la caja", "saveBoxRegionOnly": "Guardar solo región dentro de la caja",
"limitStrokesToBox": "Limitar trazos a la caja", "limitStrokesToBox": "Limitar trazos a la caja",
"showCanvasDebugInfo": "Mostrar información de depuración de lienzo", "showCanvasDebugInfo": "Mostrar la información adicional del lienzo",
"clearCanvasHistory": "Limpiar historial de lienzo", "clearCanvasHistory": "Limpiar historial de lienzo",
"clearHistory": "Limpiar historial", "clearHistory": "Limpiar historial",
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.", "clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",

View File

@ -8,7 +8,6 @@
"darkTheme": "Sombre", "darkTheme": "Sombre",
"lightTheme": "Clair", "lightTheme": "Clair",
"greenTheme": "Vert", "greenTheme": "Vert",
"text2img": "Texte en image",
"img2img": "Image en image", "img2img": "Image en image",
"unifiedCanvas": "Canvas unifié", "unifiedCanvas": "Canvas unifié",
"nodes": "Nœuds", "nodes": "Nœuds",
@ -47,7 +46,19 @@
"statusLoadingModel": "Chargement du modèle", "statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé", "statusModelChanged": "Modèle changé",
"discordLabel": "Discord", "discordLabel": "Discord",
"githubLabel": "Github" "githubLabel": "Github",
"accept": "Accepter",
"statusMergingModels": "Mélange des modèles",
"loadingInvokeAI": "Chargement de Invoke AI",
"cancel": "Annuler",
"langEnglish": "Anglais",
"statusConvertingModel": "Conversion du modèle",
"statusModelConverted": "Modèle converti",
"loading": "Chargement",
"pinOptionsPanel": "Épingler la page d'options",
"statusMergedModels": "Modèles mélangés",
"txt2img": "Texte vers image",
"postprocessing": "Post-Traitement"
}, },
"gallery": { "gallery": {
"generations": "Générations", "generations": "Générations",
@ -518,5 +529,15 @@
"betaDarkenOutside": "Assombrir à l'extérieur", "betaDarkenOutside": "Assombrir à l'extérieur",
"betaLimitToBox": "Limiter à la boîte", "betaLimitToBox": "Limiter à la boîte",
"betaPreserveMasked": "Conserver masqué" "betaPreserveMasked": "Conserver masqué"
},
"accessibility": {
"uploadImage": "Charger une image",
"reset": "Réinitialiser",
"nextImage": "Image suivante",
"previousImage": "Image précédente",
"useThisParameter": "Utiliser ce paramètre",
"zoomIn": "Zoom avant",
"zoomOut": "Zoom arrière",
"showOptionsPanel": "Montrer la page d'options"
} }
} }

View File

@ -125,7 +125,6 @@
"langSimplifiedChinese": "סינית", "langSimplifiedChinese": "סינית",
"langUkranian": "אוקראינית", "langUkranian": "אוקראינית",
"langSpanish": "ספרדית", "langSpanish": "ספרדית",
"text2img": "טקסט לתמונה",
"img2img": "תמונה לתמונה", "img2img": "תמונה לתמונה",
"unifiedCanvas": "קנבס מאוחד", "unifiedCanvas": "קנבס מאוחד",
"nodes": "צמתים", "nodes": "צמתים",

View File

@ -8,7 +8,6 @@
"darkTheme": "Scuro", "darkTheme": "Scuro",
"lightTheme": "Chiaro", "lightTheme": "Chiaro",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Testo a Immagine",
"img2img": "Immagine a Immagine", "img2img": "Immagine a Immagine",
"unifiedCanvas": "Tela unificata", "unifiedCanvas": "Tela unificata",
"nodes": "Nodi", "nodes": "Nodi",
@ -70,7 +69,11 @@
"loading": "Caricamento in corso", "loading": "Caricamento in corso",
"oceanTheme": "Oceano", "oceanTheme": "Oceano",
"langHebrew": "Ebraico", "langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI" "loadingInvokeAI": "Caricamento Invoke AI",
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
"cancel": "Annulla"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -404,7 +407,8 @@
"v2_768": "v2 (768px)", "v2_768": "v2 (768px)",
"none": "niente", "none": "niente",
"addDifference": "Aggiungi differenza", "addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello" "pickModelType": "Scegli il tipo di modello",
"scanForModels": "Cerca modelli"
}, },
"parameters": { "parameters": {
"images": "Immagini", "images": "Immagini",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Salvataggio automatico nella Galleria", "autoSaveToGallery": "Salvataggio automatico nella Galleria",
"saveBoxRegionOnly": "Salva solo l'area di selezione", "saveBoxRegionOnly": "Salva solo l'area di selezione",
"limitStrokesToBox": "Limita i tratti all'area di selezione", "limitStrokesToBox": "Limita i tratti all'area di selezione",
"showCanvasDebugInfo": "Mostra informazioni di debug della Tela", "showCanvasDebugInfo": "Mostra ulteriori informazioni sulla Tela",
"clearCanvasHistory": "Cancella cronologia Tela", "clearCanvasHistory": "Cancella cronologia Tela",
"clearHistory": "Cancella la cronologia", "clearHistory": "Cancella la cronologia",
"clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.", "clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.",
@ -612,7 +616,7 @@
"copyMetadataJson": "Copia i metadati JSON", "copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore", "exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti", "zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro", "zoomOut": "Zoom indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario", "rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario", "rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente", "flipHorizontally": "Capovolgi orizzontalmente",

View File

@ -11,7 +11,6 @@
"langArabic": "العربية", "langArabic": "العربية",
"langEnglish": "English", "langEnglish": "English",
"langDutch": "Nederlands", "langDutch": "Nederlands",
"text2img": "텍스트->이미지",
"unifiedCanvas": "통합 캔버스", "unifiedCanvas": "통합 캔버스",
"langFrench": "Français", "langFrench": "Français",
"langGerman": "Deutsch", "langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Donker", "darkTheme": "Donker",
"lightTheme": "Licht", "lightTheme": "Licht",
"greenTheme": "Groen", "greenTheme": "Groen",
"text2img": "Tekst naar afbeelding",
"img2img": "Afbeelding naar afbeelding", "img2img": "Afbeelding naar afbeelding",
"unifiedCanvas": "Centraal canvas", "unifiedCanvas": "Centraal canvas",
"nodes": "Knooppunten", "nodes": "Knooppunten",

View File

@ -8,7 +8,6 @@
"darkTheme": "Ciemny", "darkTheme": "Ciemny",
"lightTheme": "Jasny", "lightTheme": "Jasny",
"greenTheme": "Zielony", "greenTheme": "Zielony",
"text2img": "Tekst na obraz",
"img2img": "Obraz na obraz", "img2img": "Obraz na obraz",
"unifiedCanvas": "Tryb uniwersalny", "unifiedCanvas": "Tryb uniwersalny",
"nodes": "Węzły", "nodes": "Węzły",

View File

@ -20,7 +20,6 @@
"langSpanish": "Espanhol", "langSpanish": "Espanhol",
"langRussian": "Русский", "langRussian": "Русский",
"langUkranian": "Украї́нська", "langUkranian": "Украї́нська",
"text2img": "Texto para Imagem",
"img2img": "Imagem para Imagem", "img2img": "Imagem para Imagem",
"unifiedCanvas": "Tela Unificada", "unifiedCanvas": "Tela Unificada",
"nodes": "Nós", "nodes": "Nós",

View File

@ -8,7 +8,6 @@
"darkTheme": "Noite", "darkTheme": "Noite",
"lightTheme": "Dia", "lightTheme": "Dia",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Texto Para Imagem",
"img2img": "Imagem Para Imagem", "img2img": "Imagem Para Imagem",
"unifiedCanvas": "Tela Unificada", "unifiedCanvas": "Tela Unificada",
"nodes": "Nódulos", "nodes": "Nódulos",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темная", "darkTheme": "Темная",
"lightTheme": "Светлая", "lightTheme": "Светлая",
"greenTheme": "Зеленая", "greenTheme": "Зеленая",
"text2img": "Изображение из текста (text2img)",
"img2img": "Изображение в изображение (img2img)", "img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Универсальный холст", "unifiedCanvas": "Универсальный холст",
"nodes": "Ноды", "nodes": "Ноды",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темна", "darkTheme": "Темна",
"lightTheme": "Світла", "lightTheme": "Світла",
"greenTheme": "Зелена", "greenTheme": "Зелена",
"text2img": "Зображення із тексту (text2img)",
"img2img": "Зображення із зображення (img2img)", "img2img": "Зображення із зображення (img2img)",
"unifiedCanvas": "Універсальне полотно", "unifiedCanvas": "Універсальне полотно",
"nodes": "Вузли", "nodes": "Вузли",

View File

@ -8,7 +8,6 @@
"darkTheme": "暗色", "darkTheme": "暗色",
"lightTheme": "亮色", "lightTheme": "亮色",
"greenTheme": "绿色", "greenTheme": "绿色",
"text2img": "文字到图像",
"img2img": "图像到图像", "img2img": "图像到图像",
"unifiedCanvas": "统一画布", "unifiedCanvas": "统一画布",
"nodes": "节点", "nodes": "节点",

View File

@ -33,7 +33,6 @@
"langBrPortuguese": "巴西葡萄牙語", "langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語", "langRussian": "俄語",
"langSpanish": "西班牙語", "langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布" "unifiedCanvas": "統一畫布"
} }
} }

View File

@ -38,7 +38,7 @@ dependencies = [
"albumentations", "albumentations",
"click", "click",
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel==1.0.1", "compel==1.0.4",
"datasets", "datasets",
"diffusers[torch]~=0.14", "diffusers[torch]~=0.14",
"dnspython==2.2.1", "dnspython==2.2.1",
@ -139,8 +139,24 @@ version = { attr = "invokeai.version.__version__" }
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] "invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
"invokeai.frontend.web.dist" = ["**"] "invokeai.frontend.web.dist" = ["**"]
#=== Begin: PyTest and Coverage
[tool.pytest.ini_options] [tool.pytest.ini_options]
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" addopts = "--cov-report term --cov-report html --cov-report xml"
[tool.coverage.run]
branch = true
source = ["invokeai"]
omit = ["*tests*", "*migrations*", ".venv/*", "*.env"]
[tool.coverage.report]
show_missing = true
fail_under = 85 # let's set something sensible on Day 1 ...
[tool.coverage.json]
output = "coverage/coverage.json"
pretty_print = true
[tool.coverage.html]
directory = "coverage/html"
[tool.coverage.xml]
output = "coverage/index.xml"
#=== End: PyTest and Coverage
[flake8] [flake8]
max-line-length = 120 max-line-length = 120