Merge remote-tracking branch 'origin' into i18n-build-mode

This commit is contained in:
Mary Hipp 2023-03-27 10:57:41 -04:00
commit dbc0093b31
52 changed files with 594 additions and 287 deletions

View File

@ -1,6 +0,0 @@
[run]
omit='.env/*'
source='.'
[report]
show_missing = true

8
.github/CODEOWNERS vendored
View File

@ -1,16 +1,16 @@
# continuous integration
/.github/workflows/ @mauwii @lstein
/.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation
/docs/ @lstein @mauwii @tildebyte
/mkdocs.yml @lstein @mauwii
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
/mkdocs.yml @lstein @mauwii @blessedcoolant
# nodes
/invokeai/app/ @Kyle0654 @blessedcoolant
# installation and configuration
/pyproject.toml @mauwii @lstein @blessedcoolant
/docker/ @mauwii @lstein
/docker/ @mauwii @lstein @blessedcoolant
/scripts/ @ebr @lstein
/installer/ @lstein @ebr
/invokeai/assets @lstein @ebr

View File

@ -16,6 +16,9 @@ on:
- 'v*.*.*'
workflow_dispatch:
permissions:
contents: write
jobs:
docker:
if: github.event.pull_request.draft == false

View File

@ -5,6 +5,9 @@ on:
- 'main'
- 'development'
permissions:
contents: write
jobs:
mkdocs-material:
if: github.event.pull_request.draft == false

View File

@ -6,7 +6,6 @@ on:
- '!pyproject.toml'
- '!invokeai/**'
- 'invokeai/frontend/web/**'
- '!invokeai/frontend/web/dist/**'
merge_group:
workflow_dispatch:

View File

@ -7,13 +7,11 @@ on:
- 'pyproject.toml'
- 'invokeai/**'
- '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
pull_request:
paths:
- 'pyproject.toml'
- 'invokeai/**'
- '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
types:
- 'ready_for_review'
- 'opened'

2
.gitignore vendored
View File

@ -63,6 +63,7 @@ pip-delete-this-directory.txt
htmlcov/
.tox/
.nox/
.coveragerc
.coverage
.coverage.*
.cache
@ -73,6 +74,7 @@ cov.xml
*.py,cover
.hypothesis/
.pytest_cache/
.pytest.ini
cover/
junit/

View File

@ -1,5 +0,0 @@
[pytest]
DJANGO_SETTINGS_MODULE = webtas.settings
; python_files = tests.py test_*.py *_tests.py
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml

View File

@ -139,7 +139,7 @@ not supported.
_For Windows/Linux with an NVIDIA GPU:_
```terminal
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
_For Linux with an AMD GPU:_

4
coverage/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 457 KiB

View File

@ -0,0 +1,83 @@
# Local Development
If you are looking to contribute you will need to have a local development
environment. See the
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
full details.
Broadly this involves cloning the repository, installing the pre-reqs, and
InvokeAI (in editable form). Assuming this is working, choose your area of
focus.
## Documentation
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
written in markdown files under the `./docs` folder and then built into a static
website for hosting with GitHub Pages at
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
To contribute to the documentation you'll need to install the dependencies. Note
the use of `"`.
```zsh
pip install ".[docs]"
```
Now, to run the documentation locally with hot-reloading for changes made.
```zsh
mkdocs serve
```
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
access.
## Backend
The backend is contained within the `./invokeai/backend` folder structure. To
get started however please install the development dependencies.
From the root of the repository run the following command. Note the use of `"`.
```zsh
pip install ".[test]"
```
This in an optional group of packages which is defined within the
`pyproject.toml` and will be required for testing the changes you make the the
code.
### Running Tests
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
be found under the `./tests` folder and can be run with a single `pytest`
command. Optionally, to review test coverage you can append `--cov`.
```zsh
pytest --cov
```
Test outcomes and coverage will be reported in the terminal. In addition a more
detailed report is created in both XML and HTML format in the `./coverage`
folder. The HTML one in particular can help identify missing statements
requiring tests to ensure coverage. This can be run by opening
`./coverage/html/index.html`.
For example.
```zsh
pytest --cov; open ./coverage/html/index.html
```
??? info "HTML coverage report output"
![html-overview](../assets/contributing/html-overview.png)
![html-detail](../assets/contributing/html-detail.png)
## Front End
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
--8<-- "invokeai/frontend/web/README.md"

View File

@ -168,11 +168,15 @@ used by Stable Diffusion 1.4 and 1.5.
After installation, your `models.yaml` should contain an entry that looks like
this one:
inpainting-1.5: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
description: SD inpainting v1.5 config:
configs/stable-diffusion/v1-inpainting-inference.yaml vae:
models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt width: 512
height: 512
```yml
inpainting-1.5:
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
description: SD inpainting v1.5
config: configs/stable-diffusion/v1-inpainting-inference.yaml
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
width: 512
height: 512
```
As shown in the example, you may include a VAE fine-tuning weights file as well.
This is strongly recommended.

View File

@ -24,9 +24,9 @@ if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
while true
do
if [ "$0" != "bash" ]; then
while true
do
echo "Do you want to generate images using the"
echo "1. command-line interface"
echo "2. browser-based UI"
@ -67,29 +67,29 @@ if [ "$0" != "bash" ]; then
;;
7)
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;;
8)
echo "Developer Console:"
;;
8)
echo "Developer Console:"
file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name"
;;
9)
echo "Update:"
echo "Update:"
invokeai-update
;;
10)
invokeai --help
;;
[qQ])
[qQ])
exit 0
;;
*)
echo "Invalid selection"
exit;;
esac
done
else # in developer console
python --version
echo "Press ^D to exit"
export PS1="(InvokeAI) \u@\h \w> "
fi
done

View File

@ -270,3 +270,18 @@ async def invoke_session(
ApiDependencies.invoker.invoke(session, invoke_all=all)
return Response(status_code=202)
@session_router.delete(
"/{session_id}/invoke",
operation_id="cancel_session_invoke",
responses={
202: {"description": "The invocation is canceled"}
},
)
async def cancel_session_invoke(
session_id: str = Path(description="The id of the session to cancel"),
) -> None:
"""Invokes a session"""
ApiDependencies.invoker.cancel(session_id)
return Response(status_code=202)

View File

@ -1,22 +1,19 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone
from typing import Any, Literal, Optional, Union
from functools import partial
from typing import Literal, Optional, Union
import numpy as np
from torch import Tensor
from PIL import Image
from pydantic import Field
from skimage.exposure.histogram_matching import match_histograms
from ..services.image_storage import ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator, Generator
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.util.util import image_to_dataURL
from ..util.util import diffusers_step_callback_adapter, CanceledException
SAMPLER_NAME_VALUES = Literal[
tuple(InvokeAIGenerator.schedulers())
@ -45,32 +42,26 @@ class TextToImageInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress(
self, context: InvocationContext, sample: Tensor, step: int
) -> None:
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
(width, height) = image.size
width *= 8
height *= 8
dataURL = image_to_dataURL(image, image_format="JPEG")
context.services.events.emit_generator_progress(
context.graph_execution_state_id,
self.id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step,
self.steps,
)
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state.latents, state.step)
# def step_callback(state: PipelineIntermediateState):
# if (context.services.queue.is_canceled(context.graph_execution_state_id)):
# raise CanceledException
# self.dispatch_progress(context, state.latents, state.step)
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
@ -79,7 +70,7 @@ class TextToImageInvocation(BaseInvocation):
model= context.services.model_manager.get_model()
outputs = Txt2Img(model).generate(
prompt=self.prompt,
step_callback=step_callback,
step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually
@ -116,6 +107,22 @@ class ImageToImageInvocation(TextToImageInvocation):
description="Whether or not the result should be fit to the aspect ratio of the input image",
)
def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
None
@ -126,24 +133,23 @@ class ImageToImageInvocation(TextToImageInvocation):
)
mask = None
def step_callback(sample, step=0):
self.dispatch_progress(context, sample, step)
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
model = context.services.model_manager.get_model()
generator_output = next(
Img2Img(model).generate(
outputs = Img2Img(model).generate(
prompt=self.prompt,
init_image=image,
init_mask=mask,
step_callback=step_callback,
step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generator_output = next(outputs)
result_image = generator_output.image
@ -173,6 +179,22 @@ class InpaintInvocation(ImageToImageInvocation):
description="The amount by which to replace masked areas with latent noise",
)
def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
None
@ -187,24 +209,23 @@ class InpaintInvocation(ImageToImageInvocation):
else context.services.images.get(self.mask.image_type, self.mask.image_name)
)
def step_callback(sample, step=0):
self.dispatch_progress(context, sample, step)
# Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now?
manager = context.services.model_manager.get_model()
generator_output = next(
Inpaint(model).generate(
model = context.services.model_manager.get_model()
outputs = Inpaint(model).generate(
prompt=self.prompt,
init_image=image,
mask_image=mask,
step_callback=step_callback,
init_img=image,
init_mask=mask,
step_callback=partial(self.dispatch_progress, context),
**self.dict(
exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually
)
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generator_output = next(outputs)
result_image = generator_output.image

View File

@ -28,12 +28,28 @@ class ImageOutput(BaseInvocationOutput):
image: ImageField = Field(default=None, description="The output image")
#fmt: on
class Config:
schema_extra = {
'required': [
'type',
'image',
]
}
class MaskOutput(BaseInvocationOutput):
"""Base class for invocations that output a mask"""
#fmt: off
type: Literal["mask"] = "mask"
mask: ImageField = Field(default=None, description="The output mask")
#fomt: on
#fmt: on
class Config:
schema_extra = {
'required': [
'type',
'mask',
]
}
# TODO: this isn't really necessary anymore
class LoadImageInvocation(BaseInvocation):

View File

@ -12,3 +12,11 @@ class PromptOutput(BaseInvocationOutput):
prompt: str = Field(default=None, description="The output prompt")
#fmt: on
class Config:
schema_extra = {
'required': [
'type',
'prompt',
]
}

View File

@ -127,6 +127,13 @@ class NodeAlreadyExecutedError(Exception):
class GraphInvocationOutput(BaseInvocationOutput):
type: Literal["graph_output"] = "graph_output"
class Config:
schema_extra = {
'required': [
'type',
'image',
]
}
# TODO: Fill this out and move to invocations
class GraphInvocation(BaseInvocation):
@ -147,6 +154,13 @@ class IterateInvocationOutput(BaseInvocationOutput):
item: Any = Field(description="The item being iterated over")
class Config:
schema_extra = {
'required': [
'type',
'item',
]
}
# TODO: Fill this out and move to invocations
class IterateInvocation(BaseInvocation):
@ -169,6 +183,13 @@ class CollectInvocationOutput(BaseInvocationOutput):
collection: list[Any] = Field(description="The collection of input items")
class Config:
schema_extra = {
'required': [
'type',
'collection',
]
}
class CollectInvocation(BaseInvocation):
"""Collects values into a collection"""

View File

@ -2,6 +2,7 @@
from abc import ABC, abstractmethod
from queue import Queue
import time
# TODO: make this serializable
@ -10,6 +11,7 @@ class InvocationQueueItem:
graph_execution_state_id: str
invocation_id: str
invoke_all: bool
timestamp: float
def __init__(
self,
@ -22,6 +24,7 @@ class InvocationQueueItem:
self.graph_execution_state_id = graph_execution_state_id
self.invocation_id = invocation_id
self.invoke_all = invoke_all
self.timestamp = time.time()
class InvocationQueueABC(ABC):
@ -35,15 +38,44 @@ class InvocationQueueABC(ABC):
def put(self, item: InvocationQueueItem | None) -> None:
pass
@abstractmethod
def cancel(self, graph_execution_state_id: str) -> None:
pass
@abstractmethod
def is_canceled(self, graph_execution_state_id: str) -> bool:
pass
class MemoryInvocationQueue(InvocationQueueABC):
__queue: Queue
__cancellations: dict[str, float]
def __init__(self):
self.__queue = Queue()
self.__cancellations = dict()
def get(self) -> InvocationQueueItem:
return self.__queue.get()
item = self.__queue.get()
while isinstance(item, InvocationQueueItem) \
and item.graph_execution_state_id in self.__cancellations \
and self.__cancellations[item.graph_execution_state_id] > item.timestamp:
item = self.__queue.get()
# Clear old items
for graph_execution_state_id in list(self.__cancellations.keys()):
if self.__cancellations[graph_execution_state_id] < item.timestamp:
del self.__cancellations[graph_execution_state_id]
return item
def put(self, item: InvocationQueueItem | None) -> None:
self.__queue.put(item)
def cancel(self, graph_execution_state_id: str) -> None:
if graph_execution_state_id not in self.__cancellations:
self.__cancellations[graph_execution_state_id] = time.time()
def is_canceled(self, graph_execution_state_id: str) -> bool:
return graph_execution_state_id in self.__cancellations

View File

@ -50,6 +50,10 @@ class Invoker:
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
self.services.graph_execution_manager.set(new_state)
return new_state
def cancel(self, graph_execution_state_id: str) -> None:
"""Cancels the given execution state"""
self.services.queue.cancel(graph_execution_state_id)
def __start_service(self, service) -> None:
# Call start() method on any services that have it

View File

@ -4,7 +4,7 @@ from threading import Event, Thread
from ..invocations.baseinvocation import InvocationContext
from .invocation_queue import InvocationQueueItem
from .invoker import InvocationProcessorABC, Invoker
from ..util.util import CanceledException
class DefaultInvocationProcessor(InvocationProcessorABC):
__invoker_thread: Thread
@ -58,6 +58,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
)
)
# Check queue to see if this is canceled, and skip if so
if self.__invoker.services.queue.is_canceled(
graph_execution_state.id
):
continue
# Save outputs and history
graph_execution_state.complete(invocation.id, outputs)
@ -76,6 +82,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
except KeyboardInterrupt:
pass
except CanceledException:
pass
except Exception as e:
error = traceback.format_exc()
@ -95,6 +104,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
)
pass
# Check queue to see if this is canceled, and skip if so
if self.__invoker.services.queue.is_canceled(
graph_execution_state.id
):
continue
# Queue any further commands if invoking all
is_complete = graph_execution_state.is_complete()

42
invokeai/app/util/util.py Normal file
View File

@ -0,0 +1,42 @@
import torch
from PIL import Image
from ..invocations.baseinvocation import InvocationContext
from ...backend.util.util import image_to_dataURL
from ...backend.generator.base import Generator
from ...backend.stable_diffusion import PipelineIntermediateState
class CanceledException(Exception):
pass
def fast_latents_step_callback(sample: torch.Tensor, step: int, steps: int, id: str, context: InvocationContext, ):
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
(width, height) = image.size
width *= 8
height *= 8
dataURL = image_to_dataURL(image, image_format="JPEG")
context.services.events.emit_generator_progress(
context.graph_execution_state_id,
id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step,
steps,
)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
"""
txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
This adapter grabs the needed data and passes it along to the callback function.
"""
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return fast_latents_step_callback(progress_state.latents, progress_state.step, **kwargs)
else:
return fast_latents_step_callback(*cb_args, **kwargs)

View File

@ -21,7 +21,7 @@ from PIL import Image, ImageChops, ImageFilter
from accelerate.utils import set_seed
from diffusers import DiffusionPipeline
from tqdm import trange
from typing import List, Iterator, Type
from typing import Callable, List, Iterator, Optional, Type
from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler
@ -35,23 +35,23 @@ downsampling = 8
@dataclass
class InvokeAIGeneratorBasicParams:
seed: int=None
seed: Optional[int]=None
width: int=512
height: int=512
cfg_scale: int=7.5
cfg_scale: float=7.5
steps: int=20
ddim_eta: float=0.0
scheduler: int='ddim'
scheduler: str='ddim'
precision: str='float16'
perlin: float=0.0
threshold: int=0.0
threshold: float=0.0
seamless: bool=False
seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y'])
h_symmetry_time_pct: float=None
v_symmetry_time_pct: float=None
h_symmetry_time_pct: Optional[float]=None
v_symmetry_time_pct: Optional[float]=None
variation_amount: float = 0.0
with_variations: list=field(default_factory=list)
safety_checker: SafetyChecker=None
safety_checker: Optional[SafetyChecker]=None
@dataclass
class InvokeAIGeneratorOutput:
@ -61,10 +61,10 @@ class InvokeAIGeneratorOutput:
and the model hash, as well as all the generate() parameters that went into
generating the image (in .params, also available as attributes)
'''
image: Image
image: Image.Image
seed: int
model_hash: str
attention_maps_images: List[Image]
attention_maps_images: List[Image.Image]
params: Namespace
# we are interposing a wrapper around the original Generator classes so that
@ -92,8 +92,8 @@ class InvokeAIGenerator(metaclass=ABCMeta):
def generate(self,
prompt: str='',
callback: callable=None,
step_callback: callable=None,
callback: Optional[Callable]=None,
step_callback: Optional[Callable]=None,
iterations: int=1,
**keyword_args,
)->Iterator[InvokeAIGeneratorOutput]:
@ -154,6 +154,7 @@ class InvokeAIGenerator(metaclass=ABCMeta):
for i in iteration_count:
results = generator.generate(prompt,
conditioning=(uc, c, extra_conditioning_info),
step_callback=step_callback,
sampler=scheduler,
**generator_args,
)
@ -205,10 +206,10 @@ class Txt2Img(InvokeAIGenerator):
# ------------------------------------
class Img2Img(InvokeAIGenerator):
def generate(self,
init_image: Image | torch.FloatTensor,
init_image: Image.Image | torch.FloatTensor,
strength: float=0.75,
**keyword_args
)->List[InvokeAIGeneratorOutput]:
)->Iterator[InvokeAIGeneratorOutput]:
return super().generate(init_image=init_image,
strength=strength,
**keyword_args
@ -222,7 +223,7 @@ class Img2Img(InvokeAIGenerator):
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
class Inpaint(Img2Img):
def generate(self,
mask_image: Image | torch.FloatTensor,
mask_image: Image.Image | torch.FloatTensor,
# Seam settings - when 0, doesn't fill seam
seam_size: int = 0,
seam_blur: int = 0,
@ -235,7 +236,7 @@ class Inpaint(Img2Img):
inpaint_height=None,
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
**keyword_args
)->List[InvokeAIGeneratorOutput]:
)->Iterator[InvokeAIGeneratorOutput]:
return super().generate(
mask_image=mask_image,
seam_size=seam_size,
@ -262,7 +263,7 @@ class Embiggen(Txt2Img):
embiggen: list=None,
embiggen_tiles: list = None,
strength: float=0.75,
**kwargs)->List[InvokeAIGeneratorOutput]:
**kwargs)->Iterator[InvokeAIGeneratorOutput]:
return super().generate(embiggen=embiggen,
embiggen_tiles=embiggen_tiles,
strength=strength,

View File

@ -372,22 +372,32 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
unet_key = "model.diffusion_model."
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
if sum(k.startswith("model_ema") for k in keys) > 100:
print(f" | Checkpoint {path} has both EMA and non-EMA weights.")
print(f" | Checkpoint {path} has both EMA and non-EMA weights.")
if extract_ema:
print(" | Extracting EMA weights (usually better for inference)")
print(" | Extracting EMA weights (usually better for inference)")
for key in keys:
if key.startswith("model.diffusion_model"):
flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key
)
flat_ema_key_alt = "model_ema." + "".join(key.split(".")[2:])
if flat_ema_key in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key
)
elif flat_ema_key_alt in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key_alt
)
else:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
key
)
else:
print(
" | Extracting only the non-EMA weights (usually better for fine-tuning)"
" | Extracting only the non-EMA weights (usually better for fine-tuning)"
)
for key in keys:
if key.startswith(unet_key):
if key.startswith("model.diffusion_model") and key in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
new_checkpoint = {}
@ -1026,6 +1036,15 @@ def convert_open_clip_checkpoint(checkpoint):
return text_model
def replace_checkpoint_vae(checkpoint, vae_path:str):
if vae_path.endswith(".safetensors"):
vae_ckpt = load_file(vae_path)
else:
vae_ckpt = torch.load(vae_path, map_location="cpu")
state_dict = vae_ckpt['state_dict'] if "state_dict" in vae_ckpt else vae_ckpt
for vae_key in state_dict:
new_key = f'first_stage_model.{vae_key}'
checkpoint[new_key] = state_dict[vae_key]
def load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path: str,
@ -1038,8 +1057,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
extract_ema: bool = True,
upcast_attn: bool = False,
vae: AutoencoderKL = None,
vae_path: str = None,
precision: torch.dtype = torch.float32,
return_generator_pipeline: bool = False,
scan_needed:bool=True,
) -> Union[StableDiffusionPipeline, StableDiffusionGeneratorPipeline]:
"""
Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
@ -1067,6 +1088,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
:param precision: precision to use - torch.float16, torch.float32 or torch.autocast
:param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when
running stable diffusion 2.1.
:param vae: A diffusers VAE to load into the pipeline.
:param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline.
"""
with warnings.catch_warnings():
@ -1074,12 +1097,13 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error()
checkpoint = (
torch.load(checkpoint_path)
if Path(checkpoint_path).suffix == ".ckpt"
else load_file(checkpoint_path)
)
if Path(checkpoint_path).suffix == '.ckpt':
if scan_needed:
ModelManager.scan_model(checkpoint_path,checkpoint_path)
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = load_file(checkpoint_path)
cache_dir = global_cache_dir("hub")
pipeline_class = (
StableDiffusionGeneratorPipeline
@ -1091,7 +1115,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
if "global_step" in checkpoint:
global_step = checkpoint["global_step"]
else:
print(" | global_step key not found in model")
print(" | global_step key not found in model")
global_step = None
# sometimes there is a state_dict key and sometimes not
@ -1202,9 +1226,19 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
unet.load_state_dict(converted_unet_checkpoint)
# Convert the VAE model, or use the one passed
if not vae:
print(" | Using checkpoint model's original VAE")
# If a replacement VAE path was specified, we'll incorporate that into
# the checkpoint model and then convert it
if vae_path:
print(f" | Converting VAE {vae_path}")
replace_checkpoint_vae(checkpoint,vae_path)
# otherwise we use the original VAE, provided that
# an externally loaded diffusers VAE was not passed
elif not vae:
print(" | Using checkpoint model's original VAE")
if vae:
print(" | Using replacement diffusers VAE")
else: # convert the original or replacement VAE
vae_config = create_vae_diffusers_config(
original_config, image_size=image_size
)
@ -1214,8 +1248,6 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
vae = AutoencoderKL(**vae_config)
vae.load_state_dict(converted_vae_checkpoint)
else:
print(" | Using external VAE specified in config")
# Convert the text model.
model_type = pipeline_type

View File

@ -34,7 +34,7 @@ from picklescan.scanner import scan_file_path
from invokeai.backend.globals import Globals, global_cache_dir
from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume
from ..util import CUDA_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum):
V1 = 1
@ -45,9 +45,6 @@ class SDLegacyType(Enum):
UNKNOWN = 99
DEFAULT_MAX_MODELS = 2
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
"vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse",
}
class ModelManager(object):
'''
@ -285,13 +282,13 @@ class ModelManager(object):
self.stack.remove(model_name)
if delete_files:
if weights:
print(f"** deleting file {weights}")
print(f"** Deleting file {weights}")
Path(weights).unlink(missing_ok=True)
elif path:
print(f"** deleting directory {path}")
print(f"** Deleting directory {path}")
rmtree(path, ignore_errors=True)
elif repo_id:
print(f"** deleting the cached model directory for {repo_id}")
print(f"** Deleting the cached model directory for {repo_id}")
self._delete_model_from_cache(repo_id)
def add_model(
@ -362,6 +359,7 @@ class ModelManager(object):
raise NotImplementedError(
f"Unknown model format {model_name}: {model_format}"
)
self._add_embeddings_to_model(model)
# usage statistics
toc = time.time()
@ -381,9 +379,9 @@ class ModelManager(object):
print(f">> Loading diffusers model from {name_or_path}")
if using_fp16:
print(" | Using faster float16 precision")
print(" | Using faster float16 precision")
else:
print(" | Using more accurate float32 precision")
print(" | Using more accurate float32 precision")
# TODO: scan weights maybe?
pipeline_args: dict[str, Any] = dict(
@ -434,10 +432,8 @@ class ModelManager(object):
# square images???
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
height = width
print(f" | Default image dimensions = {width} x {height}")
self._add_embeddings_to_model(pipeline)
print(f" | Default image dimensions = {width} x {height}")
return pipeline, width, height, model_hash
def _load_ckpt_model(self, model_name, mconfig):
@ -457,15 +453,21 @@ class ModelManager(object):
from . import load_pipeline_from_original_stable_diffusion_ckpt
self.offload_model(self.current_model)
if vae_config := self._choose_diffusers_vae(model_name):
vae = self._load_vae(vae_config)
try:
if self.list_models()[self.current_model]['status'] == 'active':
self.offload_model(self.current_model)
except Exception as e:
pass
vae_path = None
if vae:
vae_path = vae if os.path.isabs(vae) else os.path.normpath(os.path.join(Globals.root, vae))
if self._has_cuda():
torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path=weights,
original_config_file=config,
vae=vae,
vae_path=vae_path,
return_generator_pipeline=True,
precision=torch.float16 if self.precision == "float16" else torch.float32,
)
@ -473,7 +475,6 @@ class ModelManager(object):
pipeline.enable_offload_submodels(self.device)
else:
pipeline.to(self.device)
return (
pipeline,
width,
@ -512,18 +513,20 @@ class ModelManager(object):
print(f">> Offloading {model_name} to CPU")
model = self.models[model_name]["model"]
model.offload_all()
self.current_model = None
gc.collect()
if self._has_cuda():
torch.cuda.empty_cache()
@classmethod
def scan_model(self, model_name, checkpoint):
"""
Apply picklescanner to the indicated checkpoint and issue a warning
and option to exit if an infected file is identified.
"""
# scan model
print(f">> Scanning Model: {model_name}")
print(f" | Scanning Model: {model_name}")
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
if scan_result.infected_files == 1:
@ -546,7 +549,7 @@ class ModelManager(object):
print("### Exiting InvokeAI")
sys.exit()
else:
print(">> Model scanned ok")
print(" | Model scanned ok")
def import_diffuser_model(
self,
@ -665,7 +668,7 @@ class ModelManager(object):
print(f">> Probing {thing} for import")
if thing.startswith(("http:", "https:", "ftp:")):
print(f" | {thing} appears to be a URL")
print(f" | {thing} appears to be a URL")
model_path = self._resolve_path(
thing, "models/ldm/stable-diffusion-v1"
) # _resolve_path does a download if needed
@ -673,15 +676,15 @@ class ModelManager(object):
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
print(
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
)
return
else:
print(f" | {thing} appears to be a checkpoint file on disk")
print(f" | {thing} appears to be a checkpoint file on disk")
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
print(f" | {thing} appears to be a diffusers file on disk")
print(f" | {thing} appears to be a diffusers file on disk")
model_name = self.import_diffuser_model(
thing,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
@ -692,13 +695,13 @@ class ModelManager(object):
elif Path(thing).is_dir():
if (Path(thing) / "model_index.json").exists():
print(f" | {thing} appears to be a diffusers model.")
print(f" | {thing} appears to be a diffusers model.")
model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf
)
else:
print(
f" |{thing} appears to be a directory. Will scan for models to import"
f" |{thing} appears to be a directory. Will scan for models to import"
)
for m in list(Path(thing).rglob("*.ckpt")) + list(
Path(thing).rglob("*.safetensors")
@ -710,7 +713,7 @@ class ModelManager(object):
return model_name
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
print(f" | {thing} appears to be a HuggingFace diffusers repo_id")
print(f" | {thing} appears to be a HuggingFace diffusers repo_id")
model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf
)
@ -727,32 +730,33 @@ class ModelManager(object):
return
if model_path.stem in self.config: # already imported
print(" | Already imported. Skipping")
print(" | Already imported. Skipping")
return model_path.stem
# another round of heuristics to guess the correct config file.
checkpoint = (
torch.load(model_path)
if model_path.suffix == ".ckpt"
else safetensors.torch.load_file(model_path)
)
checkpoint = None
if model_path.suffix.endswith((".ckpt",".pt")):
self.scan_model(model_path,model_path)
checkpoint = torch.load(model_path)
else:
checkpoint = safetensors.torch.load_file(model_path)
# additional probing needed if no config file provided
if model_config_file is None:
model_type = self.probe_model_type(checkpoint)
if model_type == SDLegacyType.V1:
print(" | SD-v1 model detected")
print(" | SD-v1 model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected")
print(" | SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
)
elif model_type == SDLegacyType.V2_v:
print(
" | SD-v2-v model detected; model will be converted to diffusers format"
" | SD-v2-v model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
@ -760,7 +764,7 @@ class ModelManager(object):
convert = True
elif model_type == SDLegacyType.V2_e:
print(
" | SD-v2-e model detected; model will be converted to diffusers format"
" | SD-v2-e model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
@ -788,18 +792,21 @@ class ModelManager(object):
model_description=description,
original_config_file=model_config_file,
commit_to_conf=commit_to_conf,
scan_needed=False,
)
return model_name
def convert_and_import(
self,
ckpt_path: Path,
diffusers_path: Path,
model_name=None,
model_description=None,
vae=None,
original_config_file: Path = None,
commit_to_conf: Path = None,
self,
ckpt_path: Path,
diffusers_path: Path,
model_name=None,
model_description=None,
vae:dict=None,
vae_path:Path=None,
original_config_file: Path = None,
commit_to_conf: Path = None,
scan_needed: bool=True,
) -> str:
"""
Convert a legacy ckpt weights file to diffuser model and import
@ -827,18 +834,23 @@ class ModelManager(object):
try:
# By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file
vae_model = self._load_vae(vae) if vae else None
vae_model=None
if vae:
vae_model=self._load_vae(vae)
vae_path=None
convert_ckpt_to_diffusers(
ckpt_path,
diffusers_path,
extract_ema=True,
original_config_file=original_config_file,
vae=vae_model,
vae_path=vae_path,
scan_needed=scan_needed,
)
print(
f" | Success. Optimized model is now located at {str(diffusers_path)}"
f" | Success. Optimized model is now located at {str(diffusers_path)}"
)
print(f" | Writing new config file entry for {model_name}")
print(f" | Writing new config file entry for {model_name}")
new_config = dict(
path=str(diffusers_path),
description=model_description,
@ -849,7 +861,7 @@ class ModelManager(object):
self.add_model(model_name, new_config, True)
if commit_to_conf:
self.commit(commit_to_conf)
print(">> Conversion succeeded")
print(" | Conversion succeeded")
except Exception as e:
print(f"** Conversion failed: {str(e)}")
print(
@ -879,36 +891,6 @@ class ModelManager(object):
return search_folder, found_models
def _choose_diffusers_vae(
self, model_name: str, vae: str = None
) -> Union[dict, str]:
# In the event that the original entry is using a custom ckpt VAE, we try to
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
# I would prefer to do this differently: We load the ckpt model into memory, swap the
# VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped
# VAE is built into the model. However, when I tried this I got obscure key errors.
if vae:
return vae
if model_name in self.config and (
vae_ckpt_path := self.model_info(model_name).get("vae", None)
):
vae_basename = Path(vae_ckpt_path).stem
diffusers_vae = None
if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None):
print(
f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version"
)
vae = {"repo_id": diffusers_vae}
else:
print(
f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown'
)
print(
'** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config'
)
vae = {"repo_id": "stabilityai/sd-vae-ft-mse"}
return vae
def _make_cache_room(self) -> None:
num_loaded_models = len(self.models)
if num_loaded_models >= self.max_loaded_models:
@ -1105,7 +1087,7 @@ class ModelManager(object):
with open(hashpath) as f:
hash = f.read()
return hash
print(" | Calculating sha256 hash of model files")
print(" | Calculating sha256 hash of model files")
tic = time.time()
sha = hashlib.sha256()
count = 0
@ -1117,7 +1099,7 @@ class ModelManager(object):
sha.update(chunk)
hash = sha.hexdigest()
toc = time.time()
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
with open(hashpath, "w") as f:
f.write(hash)
return hash
@ -1162,12 +1144,12 @@ class ModelManager(object):
local_files_only=not Globals.internet_available,
)
print(f" | Loading diffusers VAE from {name_or_path}")
print(f" | Loading diffusers VAE from {name_or_path}")
if using_fp16:
vae_args.update(torch_dtype=torch.float16)
fp_args_list = [{"revision": "fp16"}, {}]
else:
print(" | Using more accurate float32 precision")
print(" | Using more accurate float32 precision")
fp_args_list = [{}]
vae = None
@ -1208,7 +1190,7 @@ class ModelManager(object):
hashes_to_delete.add(revision.commit_hash)
strategy = cache_info.delete_revisions(*hashes_to_delete)
print(
f"** deletion of this model is expected to free {strategy.expected_freed_size_str}"
f"** Deletion of this model is expected to free {strategy.expected_freed_size_str}"
)
strategy.execute()

View File

@ -6,7 +6,6 @@ The interface is through the Concepts() object.
"""
import os
import re
import traceback
from typing import Callable
from urllib import error as ul_error
from urllib import request
@ -15,7 +14,6 @@ from huggingface_hub import (
HfApi,
HfFolder,
ModelFilter,
ModelSearchArguments,
hf_hub_url,
)
@ -84,7 +82,7 @@ class HuggingFaceConceptsLibrary(object):
"""
if not concept_name in self.list_concepts():
print(
f"This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept."
f"{concept_name} is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept."
)
return None
return self.get_concept_file(concept_name.lower(), "learned_embeds.bin")
@ -236,7 +234,7 @@ class HuggingFaceConceptsLibrary(object):
except ul_error.HTTPError as e:
if e.code == 404:
print(
f"This concept is not known to the Hugging Face library. Generation will continue without the concept."
f"Concept {concept_name} is not known to the Hugging Face library. Generation will continue without the concept."
)
else:
print(
@ -246,7 +244,7 @@ class HuggingFaceConceptsLibrary(object):
return False
except ul_error.URLError as e:
print(
f"ERROR: {str(e)}. This may reflect a network issue. Generation will continue without the concept."
f"ERROR while downloading {concept_name}: {str(e)}. This may reflect a network issue. Generation will continue without the concept."
)
os.rmdir(dest)
return False

View File

@ -1022,7 +1022,7 @@ class InvokeAIWebServer:
"RGB"
)
def image_progress(sample, step):
def image_progress(intermediate_state: PipelineIntermediateState):
if self.canceled.is_set():
raise CanceledException
@ -1030,6 +1030,14 @@ class InvokeAIWebServer:
nonlocal generation_parameters
nonlocal progress
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
generation_messages = {
"txt2img": "common.statusGeneratingTextToImage",
"img2img": "common.statusGeneratingImageToImage",
@ -1302,16 +1310,9 @@ class InvokeAIWebServer:
progress.set_current_iteration(progress.current_iteration + 1)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return image_progress(progress_state.latents, progress_state.step)
else:
return image_progress(*cb_args, **kwargs)
self.generate.prompt2image(
**generation_parameters,
step_callback=diffusers_step_callback_adapter,
step_callback=image_progress,
image_callback=image_done,
)

View File

@ -772,16 +772,10 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
original_config_file = Path(model_info["config"])
model_name = model_name_or_path
model_description = model_info["description"]
vae = model_info["vae"]
vae_path = model_info.get("vae")
else:
print(f"** {model_name_or_path} is not a legacy .ckpt weights file")
return
if vae_repo := invokeai.backend.model_management.model_manager.VAE_TO_REPO_ID.get(
Path(vae).stem
):
vae_repo = dict(repo_id=vae_repo)
else:
vae_repo = None
model_name = manager.convert_and_import(
ckpt_path,
diffusers_path=Path(
@ -790,7 +784,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_name=model_name,
model_description=model_description,
original_config_file=original_config_file,
vae=vae_repo,
vae_path=vae_path,
)
else:
try:

View File

@ -1,4 +1,6 @@
import React, { PropsWithChildren } from 'react';
import { IAIPopoverProps } from '../web/src/common/components/IAIPopover';
import { IAIIconButtonProps } from '../web/src/common/components/IAIIconButton';
export {};
@ -50,9 +52,27 @@ declare module '@invoke-ai/invoke-ai-ui' {
declare class InvokeAiLogoComponent extends React.Component<InvokeAILogoComponentProps> {
public constructor(props: InvokeAILogoComponentProps);
}
declare class IAIPopover extends React.Component<IAIPopoverProps> {
public constructor(props: IAIPopoverProps);
}
declare class IAIIconButton extends React.Component<IAIIconButtonProps> {
public constructor(props: IAIIconButtonProps);
}
declare class SettingsModal extends React.Component<SettingsModalProps> {
public constructor(props: SettingsModalProps);
}
}
declare function Invoke(props: PropsWithChildren): JSX.Element;
export { ThemeChanger, InvokeAiLogoComponent };
export {
ThemeChanger,
InvokeAiLogoComponent,
IAIPopover,
IAIIconButton,
SettingsModal,
};
export = Invoke;

View File

@ -6,7 +6,6 @@
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
"build": "yarn run lint && vite build",
"build:package": "vite build --mode=package",
"preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0 .",

View File

@ -8,7 +8,6 @@
"darkTheme": "داكن",
"lightTheme": "فاتح",
"greenTheme": "أخضر",
"text2img": "نص إلى صورة",
"img2img": "صورة إلى صورة",
"unifiedCanvas": "لوحة موحدة",
"nodes": "عقد",

View File

@ -7,7 +7,6 @@
"darkTheme": "Dunkel",
"lightTheme": "Hell",
"greenTheme": "Grün",
"text2img": "Text zu Bild",
"img2img": "Bild zu Bild",
"nodes": "Knoten",
"langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Oscuro",
"lightTheme": "Claro",
"greenTheme": "Verde",
"text2img": "Texto a Imagen",
"img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado",
"nodes": "Nodos",
@ -70,7 +69,11 @@
"langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA"
"loadingInvokeAI": "Cargando invocar a la IA",
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar"
},
"gallery": {
"generations": "Generaciones",
@ -404,7 +407,8 @@
"none": "ninguno",
"pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia"
"addDifference": "Añadir una diferencia",
"scanForModels": "Buscar modelos"
},
"parameters": {
"images": "Imágenes",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Guardar automáticamente en galería",
"saveBoxRegionOnly": "Guardar solo región dentro de la caja",
"limitStrokesToBox": "Limitar trazos a la caja",
"showCanvasDebugInfo": "Mostrar información de depuración de lienzo",
"showCanvasDebugInfo": "Mostrar la información adicional del lienzo",
"clearCanvasHistory": "Limpiar historial de lienzo",
"clearHistory": "Limpiar historial",
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",

View File

@ -8,7 +8,6 @@
"darkTheme": "Sombre",
"lightTheme": "Clair",
"greenTheme": "Vert",
"text2img": "Texte en image",
"img2img": "Image en image",
"unifiedCanvas": "Canvas unifié",
"nodes": "Nœuds",
@ -47,7 +46,19 @@
"statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé",
"discordLabel": "Discord",
"githubLabel": "Github"
"githubLabel": "Github",
"accept": "Accepter",
"statusMergingModels": "Mélange des modèles",
"loadingInvokeAI": "Chargement de Invoke AI",
"cancel": "Annuler",
"langEnglish": "Anglais",
"statusConvertingModel": "Conversion du modèle",
"statusModelConverted": "Modèle converti",
"loading": "Chargement",
"pinOptionsPanel": "Épingler la page d'options",
"statusMergedModels": "Modèles mélangés",
"txt2img": "Texte vers image",
"postprocessing": "Post-Traitement"
},
"gallery": {
"generations": "Générations",
@ -518,5 +529,15 @@
"betaDarkenOutside": "Assombrir à l'extérieur",
"betaLimitToBox": "Limiter à la boîte",
"betaPreserveMasked": "Conserver masqué"
},
"accessibility": {
"uploadImage": "Charger une image",
"reset": "Réinitialiser",
"nextImage": "Image suivante",
"previousImage": "Image précédente",
"useThisParameter": "Utiliser ce paramètre",
"zoomIn": "Zoom avant",
"zoomOut": "Zoom arrière",
"showOptionsPanel": "Montrer la page d'options"
}
}

View File

@ -125,7 +125,6 @@
"langSimplifiedChinese": "סינית",
"langUkranian": "אוקראינית",
"langSpanish": "ספרדית",
"text2img": "טקסט לתמונה",
"img2img": "תמונה לתמונה",
"unifiedCanvas": "קנבס מאוחד",
"nodes": "צמתים",

View File

@ -8,7 +8,6 @@
"darkTheme": "Scuro",
"lightTheme": "Chiaro",
"greenTheme": "Verde",
"text2img": "Testo a Immagine",
"img2img": "Immagine a Immagine",
"unifiedCanvas": "Tela unificata",
"nodes": "Nodi",
@ -70,7 +69,11 @@
"loading": "Caricamento in corso",
"oceanTheme": "Oceano",
"langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI"
"loadingInvokeAI": "Caricamento Invoke AI",
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
"cancel": "Annulla"
},
"gallery": {
"generations": "Generazioni",
@ -404,7 +407,8 @@
"v2_768": "v2 (768px)",
"none": "niente",
"addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello"
"pickModelType": "Scegli il tipo di modello",
"scanForModels": "Cerca modelli"
},
"parameters": {
"images": "Immagini",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Salvataggio automatico nella Galleria",
"saveBoxRegionOnly": "Salva solo l'area di selezione",
"limitStrokesToBox": "Limita i tratti all'area di selezione",
"showCanvasDebugInfo": "Mostra informazioni di debug della Tela",
"showCanvasDebugInfo": "Mostra ulteriori informazioni sulla Tela",
"clearCanvasHistory": "Cancella cronologia Tela",
"clearHistory": "Cancella la cronologia",
"clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.",
@ -612,7 +616,7 @@
"copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro",
"zoomOut": "Zoom indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente",

View File

@ -11,7 +11,6 @@
"langArabic": "العربية",
"langEnglish": "English",
"langDutch": "Nederlands",
"text2img": "텍스트->이미지",
"unifiedCanvas": "통합 캔버스",
"langFrench": "Français",
"langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Donker",
"lightTheme": "Licht",
"greenTheme": "Groen",
"text2img": "Tekst naar afbeelding",
"img2img": "Afbeelding naar afbeelding",
"unifiedCanvas": "Centraal canvas",
"nodes": "Knooppunten",

View File

@ -8,7 +8,6 @@
"darkTheme": "Ciemny",
"lightTheme": "Jasny",
"greenTheme": "Zielony",
"text2img": "Tekst na obraz",
"img2img": "Obraz na obraz",
"unifiedCanvas": "Tryb uniwersalny",
"nodes": "Węzły",

View File

@ -20,7 +20,6 @@
"langSpanish": "Espanhol",
"langRussian": "Русский",
"langUkranian": "Украї́нська",
"text2img": "Texto para Imagem",
"img2img": "Imagem para Imagem",
"unifiedCanvas": "Tela Unificada",
"nodes": "Nós",

View File

@ -8,7 +8,6 @@
"darkTheme": "Noite",
"lightTheme": "Dia",
"greenTheme": "Verde",
"text2img": "Texto Para Imagem",
"img2img": "Imagem Para Imagem",
"unifiedCanvas": "Tela Unificada",
"nodes": "Nódulos",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темная",
"lightTheme": "Светлая",
"greenTheme": "Зеленая",
"text2img": "Изображение из текста (text2img)",
"img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Универсальный холст",
"nodes": "Ноды",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темна",
"lightTheme": "Світла",
"greenTheme": "Зелена",
"text2img": "Зображення із тексту (text2img)",
"img2img": "Зображення із зображення (img2img)",
"unifiedCanvas": "Універсальне полотно",
"nodes": "Вузли",

View File

@ -8,7 +8,6 @@
"darkTheme": "暗色",
"lightTheme": "亮色",
"greenTheme": "绿色",
"text2img": "文字到图像",
"img2img": "图像到图像",
"unifiedCanvas": "统一画布",
"nodes": "节点",

View File

@ -33,7 +33,6 @@
"langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語",
"langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布"
}
}

View File

@ -8,7 +8,7 @@ import {
} from '@chakra-ui/react';
import { memo, ReactNode } from 'react';
type IAIPopoverProps = PopoverProps & {
export type IAIPopoverProps = PopoverProps & {
triggerComponent: ReactNode;
triggerContainerProps?: BoxProps;
children: ReactNode;

View File

@ -2,6 +2,15 @@ import Component from './component';
import InvokeAiLogoComponent from './features/system/components/InvokeAILogoComponent';
import ThemeChanger from './features/system/components/ThemeChanger';
import IAIPopover from './common/components/IAIPopover';
import IAIIconButton from './common/components/IAIIconButton';
import SettingsModal from './features/system/components/SettingsModal/SettingsModal';
export default Component;
export { InvokeAiLogoComponent, ThemeChanger };
export {
InvokeAiLogoComponent,
ThemeChanger,
IAIPopover,
IAIIconButton,
SettingsModal,
};

View File

@ -34,7 +34,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiZoomIn />}
aria-label={t('accessibility.zoomIn')}
tooltip="Zoom In"
tooltip={t('accessibility.zoomIn')}
onClick={() => zoomIn()}
fontSize={20}
/>
@ -42,7 +42,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiZoomOut />}
aria-label={t('accessibility.zoomOut')}
tooltip="Zoom Out"
tooltip={t('accessibility.zoomOut')}
onClick={() => zoomOut()}
fontSize={20}
/>
@ -50,7 +50,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiRotateLeft />}
aria-label={t('accessibility.rotateCounterClockwise')}
tooltip="Rotate Counter-Clockwise"
tooltip={t('accessibility.rotateCounterClockwise')}
onClick={rotateCounterClockwise}
fontSize={20}
/>
@ -58,7 +58,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiRotateRight />}
aria-label={t('accessibility.rotateClockwise')}
tooltip="Rotate Clockwise"
tooltip={t('accessibility.rotateClockwise')}
onClick={rotateClockwise}
fontSize={20}
/>
@ -66,7 +66,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<MdFlip />}
aria-label={t('accessibility.flipHorizontally')}
tooltip="Flip Horizontally"
tooltip={t('accessibility.flipHorizontally')}
onClick={flipHorizontally}
fontSize={20}
/>
@ -74,7 +74,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<MdFlip style={{ transform: 'rotate(90deg)' }} />}
aria-label={t('accessibility.flipVertically')}
tooltip="Flip Vertically"
tooltip={t('accessibility.flipVertically')}
onClick={flipVertically}
fontSize={20}
/>
@ -82,7 +82,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton
icon={<BiReset />}
aria-label={t('accessibility.reset')}
tooltip="Reset"
tooltip={t('accessibility.reset')}
onClick={() => {
resetTransform();
reset();

View File

@ -1,4 +1,3 @@
import path from 'path';
import react from '@vitejs/plugin-react-swc';
import { visualizer } from 'rollup-plugin-visualizer';
import { defineConfig, PluginOption } from 'vite';
@ -58,26 +57,6 @@ export default defineConfig(({ mode }) => {
// sourcemap: true, // this can be enabled if needed, it adds ovwer 15MB to the commit
},
};
} else if (mode === 'package') {
return {
...common,
build: {
...common.build,
lib: {
entry: path.resolve(__dirname, 'src/exports.tsx'),
name: 'InvokeAI UI',
fileName: (format) => `invoke-ai-ui.${format}.js`,
},
rollupOptions: {
external: ['react', 'react-dom'],
output: {
globals: {
react: 'React',
},
},
},
},
};
} else {
return {
...common,

View File

@ -38,16 +38,16 @@ dependencies = [
"albumentations",
"click",
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel==1.0.1",
"compel==1.0.4",
"datasets",
"diffusers[torch]~=0.14",
"dnspython==2.2.1",
"einops",
"eventlet",
"facexlib",
"fastapi==0.85.0",
"fastapi-events==0.6.0",
"fastapi-socketio==0.0.9",
"fastapi==0.94.1",
"fastapi-events==0.8.0",
"fastapi-socketio==0.0.10",
"flask==2.1.3",
"flask_cors==3.0.10",
"flask_socketio==5.3.0",
@ -75,7 +75,7 @@ dependencies = [
"torchvision>=0.14.1",
"torchmetrics",
"transformers~=4.26",
"uvicorn[standard]==0.20.0",
"uvicorn[standard]==0.21.1",
"windows-curses; sys_platform=='win32'",
]
@ -139,8 +139,24 @@ version = { attr = "invokeai.version.__version__" }
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
"invokeai.frontend.web.dist" = ["**"]
#=== Begin: PyTest and Coverage
[tool.pytest.ini_options]
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch"
addopts = "--cov-report term --cov-report html --cov-report xml"
[tool.coverage.run]
branch = true
source = ["invokeai"]
omit = ["*tests*", "*migrations*", ".venv/*", "*.env"]
[tool.coverage.report]
show_missing = true
fail_under = 85 # let's set something sensible on Day 1 ...
[tool.coverage.json]
output = "coverage/coverage.json"
pretty_print = true
[tool.coverage.html]
directory = "coverage/html"
[tool.coverage.xml]
output = "coverage/index.xml"
#=== End: PyTest and Coverage
[flake8]
max-line-length = 120