Merge remote-tracking branch 'origin' into i18n-build-mode

This commit is contained in:
Mary Hipp 2023-03-27 10:57:41 -04:00
commit dbc0093b31
52 changed files with 594 additions and 287 deletions

View File

@ -1,6 +0,0 @@
[run]
omit='.env/*'
source='.'
[report]
show_missing = true

8
.github/CODEOWNERS vendored
View File

@ -1,16 +1,16 @@
# continuous integration # continuous integration
/.github/workflows/ @mauwii @lstein /.github/workflows/ @mauwii @lstein @blessedcoolant
# documentation # documentation
/docs/ @lstein @mauwii @tildebyte /docs/ @lstein @mauwii @tildebyte @blessedcoolant
/mkdocs.yml @lstein @mauwii /mkdocs.yml @lstein @mauwii @blessedcoolant
# nodes # nodes
/invokeai/app/ @Kyle0654 @blessedcoolant /invokeai/app/ @Kyle0654 @blessedcoolant
# installation and configuration # installation and configuration
/pyproject.toml @mauwii @lstein @blessedcoolant /pyproject.toml @mauwii @lstein @blessedcoolant
/docker/ @mauwii @lstein /docker/ @mauwii @lstein @blessedcoolant
/scripts/ @ebr @lstein /scripts/ @ebr @lstein
/installer/ @lstein @ebr /installer/ @lstein @ebr
/invokeai/assets @lstein @ebr /invokeai/assets @lstein @ebr

View File

@ -16,6 +16,9 @@ on:
- 'v*.*.*' - 'v*.*.*'
workflow_dispatch: workflow_dispatch:
permissions:
contents: write
jobs: jobs:
docker: docker:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false

View File

@ -5,6 +5,9 @@ on:
- 'main' - 'main'
- 'development' - 'development'
permissions:
contents: write
jobs: jobs:
mkdocs-material: mkdocs-material:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false

View File

@ -6,7 +6,6 @@ on:
- '!pyproject.toml' - '!pyproject.toml'
- '!invokeai/**' - '!invokeai/**'
- 'invokeai/frontend/web/**' - 'invokeai/frontend/web/**'
- '!invokeai/frontend/web/dist/**'
merge_group: merge_group:
workflow_dispatch: workflow_dispatch:

View File

@ -7,13 +7,11 @@ on:
- 'pyproject.toml' - 'pyproject.toml'
- 'invokeai/**' - 'invokeai/**'
- '!invokeai/frontend/web/**' - '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
pull_request: pull_request:
paths: paths:
- 'pyproject.toml' - 'pyproject.toml'
- 'invokeai/**' - 'invokeai/**'
- '!invokeai/frontend/web/**' - '!invokeai/frontend/web/**'
- 'invokeai/frontend/web/dist/**'
types: types:
- 'ready_for_review' - 'ready_for_review'
- 'opened' - 'opened'

2
.gitignore vendored
View File

@ -63,6 +63,7 @@ pip-delete-this-directory.txt
htmlcov/ htmlcov/
.tox/ .tox/
.nox/ .nox/
.coveragerc
.coverage .coverage
.coverage.* .coverage.*
.cache .cache
@ -73,6 +74,7 @@ cov.xml
*.py,cover *.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/ .pytest_cache/
.pytest.ini
cover/ cover/
junit/ junit/

View File

@ -1,5 +0,0 @@
[pytest]
DJANGO_SETTINGS_MODULE = webtas.settings
; python_files = tests.py test_*.py *_tests.py
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml

View File

@ -139,7 +139,7 @@ not supported.
_For Windows/Linux with an NVIDIA GPU:_ _For Windows/Linux with an NVIDIA GPU:_
```terminal ```terminal
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
``` ```
_For Linux with an AMD GPU:_ _For Linux with an AMD GPU:_

4
coverage/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

Binary file not shown.

After

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 457 KiB

View File

@ -0,0 +1,83 @@
# Local Development
If you are looking to contribute you will need to have a local development
environment. See the
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
full details.
Broadly this involves cloning the repository, installing the pre-reqs, and
InvokeAI (in editable form). Assuming this is working, choose your area of
focus.
## Documentation
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
written in markdown files under the `./docs` folder and then built into a static
website for hosting with GitHub Pages at
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
To contribute to the documentation you'll need to install the dependencies. Note
the use of `"`.
```zsh
pip install ".[docs]"
```
Now, to run the documentation locally with hot-reloading for changes made.
```zsh
mkdocs serve
```
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
access.
## Backend
The backend is contained within the `./invokeai/backend` folder structure. To
get started however please install the development dependencies.
From the root of the repository run the following command. Note the use of `"`.
```zsh
pip install ".[test]"
```
This in an optional group of packages which is defined within the
`pyproject.toml` and will be required for testing the changes you make the the
code.
### Running Tests
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
be found under the `./tests` folder and can be run with a single `pytest`
command. Optionally, to review test coverage you can append `--cov`.
```zsh
pytest --cov
```
Test outcomes and coverage will be reported in the terminal. In addition a more
detailed report is created in both XML and HTML format in the `./coverage`
folder. The HTML one in particular can help identify missing statements
requiring tests to ensure coverage. This can be run by opening
`./coverage/html/index.html`.
For example.
```zsh
pytest --cov; open ./coverage/html/index.html
```
??? info "HTML coverage report output"
![html-overview](../assets/contributing/html-overview.png)
![html-detail](../assets/contributing/html-detail.png)
## Front End
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
--8<-- "invokeai/frontend/web/README.md"

View File

@ -168,11 +168,15 @@ used by Stable Diffusion 1.4 and 1.5.
After installation, your `models.yaml` should contain an entry that looks like After installation, your `models.yaml` should contain an entry that looks like
this one: this one:
inpainting-1.5: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt ```yml
description: SD inpainting v1.5 config: inpainting-1.5:
configs/stable-diffusion/v1-inpainting-inference.yaml vae: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt width: 512 description: SD inpainting v1.5
height: 512 config: configs/stable-diffusion/v1-inpainting-inference.yaml
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
width: 512
height: 512
```
As shown in the example, you may include a VAE fine-tuning weights file as well. As shown in the example, you may include a VAE fine-tuning weights file as well.
This is strongly recommended. This is strongly recommended.

View File

@ -24,9 +24,9 @@ if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1 export PYTORCH_ENABLE_MPS_FALLBACK=1
fi fi
while true
do
if [ "$0" != "bash" ]; then if [ "$0" != "bash" ]; then
while true
do
echo "Do you want to generate images using the" echo "Do you want to generate images using the"
echo "1. command-line interface" echo "1. command-line interface"
echo "2. browser-based UI" echo "2. browser-based UI"
@ -67,29 +67,29 @@ if [ "$0" != "bash" ]; then
;; ;;
7) 7)
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;; ;;
8) 8)
echo "Developer Console:" echo "Developer Console:"
file_name=$(basename "${BASH_SOURCE[0]}") file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name" bash --init-file "$file_name"
;; ;;
9) 9)
echo "Update:" echo "Update:"
invokeai-update invokeai-update
;; ;;
10) 10)
invokeai --help invokeai --help
;; ;;
[qQ]) [qQ])
exit 0 exit 0
;; ;;
*) *)
echo "Invalid selection" echo "Invalid selection"
exit;; exit;;
esac esac
done
else # in developer console else # in developer console
python --version python --version
echo "Press ^D to exit" echo "Press ^D to exit"
export PS1="(InvokeAI) \u@\h \w> " export PS1="(InvokeAI) \u@\h \w> "
fi fi
done

View File

@ -270,3 +270,18 @@ async def invoke_session(
ApiDependencies.invoker.invoke(session, invoke_all=all) ApiDependencies.invoker.invoke(session, invoke_all=all)
return Response(status_code=202) return Response(status_code=202)
@session_router.delete(
"/{session_id}/invoke",
operation_id="cancel_session_invoke",
responses={
202: {"description": "The invocation is canceled"}
},
)
async def cancel_session_invoke(
session_id: str = Path(description="The id of the session to cancel"),
) -> None:
"""Invokes a session"""
ApiDependencies.invoker.cancel(session_id)
return Response(status_code=202)

View File

@ -1,22 +1,19 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
from datetime import datetime, timezone from functools import partial
from typing import Any, Literal, Optional, Union from typing import Literal, Optional, Union
import numpy as np import numpy as np
from torch import Tensor from torch import Tensor
from PIL import Image
from pydantic import Field from pydantic import Field
from skimage.exposure.histogram_matching import match_histograms
from ..services.image_storage import ImageType from ..services.image_storage import ImageType
from ..services.invocation_services import InvocationServices
from .baseinvocation import BaseInvocation, InvocationContext from .baseinvocation import BaseInvocation, InvocationContext
from .image import ImageField, ImageOutput from .image import ImageField, ImageOutput
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator, Generator from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.util.util import image_to_dataURL from ..util.util import diffusers_step_callback_adapter, CanceledException
SAMPLER_NAME_VALUES = Literal[ SAMPLER_NAME_VALUES = Literal[
tuple(InvokeAIGenerator.schedulers()) tuple(InvokeAIGenerator.schedulers())
@ -45,32 +42,26 @@ class TextToImageInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching? # TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, sample: Tensor, step: int self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None: ) -> None:
# TODO: only output a preview image when requested if (context.services.queue.is_canceled(context.graph_execution_state_id)):
image = Generator.sample_to_lowres_estimated_image(sample) raise CanceledException
(width, height) = image.size step = intermediate_state.step
width *= 8 if intermediate_state.predicted_original is not None:
height *= 8 # Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
dataURL = image_to_dataURL(image, image_format="JPEG") diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
context.services.events.emit_generator_progress(
context.graph_execution_state_id,
self.id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step,
self.steps,
)
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
def step_callback(state: PipelineIntermediateState): # def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state.latents, state.step) # if (context.services.queue.is_canceled(context.graph_execution_state_id)):
# raise CanceledException
# self.dispatch_progress(context, state.latents, state.step)
# Handle invalid model parameter # Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache # TODO: figure out if this can be done via a validator that uses the model_cache
@ -79,7 +70,7 @@ class TextToImageInvocation(BaseInvocation):
model= context.services.model_manager.get_model() model= context.services.model_manager.get_model()
outputs = Txt2Img(model).generate( outputs = Txt2Img(model).generate(
prompt=self.prompt, prompt=self.prompt,
step_callback=step_callback, step_callback=partial(self.dispatch_progress, context),
**self.dict( **self.dict(
exclude={"prompt"} exclude={"prompt"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
@ -116,6 +107,22 @@ class ImageToImageInvocation(TextToImageInvocation):
description="Whether or not the result should be fit to the aspect ratio of the input image", description="Whether or not the result should be fit to the aspect ratio of the input image",
) )
def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = ( image = (
None None
@ -126,24 +133,23 @@ class ImageToImageInvocation(TextToImageInvocation):
) )
mask = None mask = None
def step_callback(sample, step=0):
self.dispatch_progress(context, sample, step)
# Handle invalid model parameter # Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache # TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now? # TODO: How to get the default model name now?
model = context.services.model_manager.get_model() model = context.services.model_manager.get_model()
generator_output = next( outputs = Img2Img(model).generate(
Img2Img(model).generate(
prompt=self.prompt, prompt=self.prompt,
init_image=image, init_image=image,
init_mask=mask, init_mask=mask,
step_callback=step_callback, step_callback=partial(self.dispatch_progress, context),
**self.dict( **self.dict(
exclude={"prompt", "image", "mask"} exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
) )
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generator_output = next(outputs)
result_image = generator_output.image result_image = generator_output.image
@ -173,6 +179,22 @@ class InpaintInvocation(ImageToImageInvocation):
description="The amount by which to replace masked areas with latent noise", description="The amount by which to replace masked areas with latent noise",
) )
def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def invoke(self, context: InvocationContext) -> ImageOutput: def invoke(self, context: InvocationContext) -> ImageOutput:
image = ( image = (
None None
@ -187,24 +209,23 @@ class InpaintInvocation(ImageToImageInvocation):
else context.services.images.get(self.mask.image_type, self.mask.image_name) else context.services.images.get(self.mask.image_type, self.mask.image_name)
) )
def step_callback(sample, step=0):
self.dispatch_progress(context, sample, step)
# Handle invalid model parameter # Handle invalid model parameter
# TODO: figure out if this can be done via a validator that uses the model_cache # TODO: figure out if this can be done via a validator that uses the model_cache
# TODO: How to get the default model name now? # TODO: How to get the default model name now?
manager = context.services.model_manager.get_model() model = context.services.model_manager.get_model()
generator_output = next( outputs = Inpaint(model).generate(
Inpaint(model).generate(
prompt=self.prompt, prompt=self.prompt,
init_image=image, init_img=image,
mask_image=mask, init_mask=mask,
step_callback=step_callback, step_callback=partial(self.dispatch_progress, context),
**self.dict( **self.dict(
exclude={"prompt", "image", "mask"} exclude={"prompt", "image", "mask"}
), # Shorthand for passing all of the parameters above manually ), # Shorthand for passing all of the parameters above manually
) )
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generator_output = next(outputs)
result_image = generator_output.image result_image = generator_output.image

View File

@ -28,12 +28,28 @@ class ImageOutput(BaseInvocationOutput):
image: ImageField = Field(default=None, description="The output image") image: ImageField = Field(default=None, description="The output image")
#fmt: on #fmt: on
class Config:
schema_extra = {
'required': [
'type',
'image',
]
}
class MaskOutput(BaseInvocationOutput): class MaskOutput(BaseInvocationOutput):
"""Base class for invocations that output a mask""" """Base class for invocations that output a mask"""
#fmt: off #fmt: off
type: Literal["mask"] = "mask" type: Literal["mask"] = "mask"
mask: ImageField = Field(default=None, description="The output mask") mask: ImageField = Field(default=None, description="The output mask")
#fomt: on #fmt: on
class Config:
schema_extra = {
'required': [
'type',
'mask',
]
}
# TODO: this isn't really necessary anymore # TODO: this isn't really necessary anymore
class LoadImageInvocation(BaseInvocation): class LoadImageInvocation(BaseInvocation):

View File

@ -12,3 +12,11 @@ class PromptOutput(BaseInvocationOutput):
prompt: str = Field(default=None, description="The output prompt") prompt: str = Field(default=None, description="The output prompt")
#fmt: on #fmt: on
class Config:
schema_extra = {
'required': [
'type',
'prompt',
]
}

View File

@ -127,6 +127,13 @@ class NodeAlreadyExecutedError(Exception):
class GraphInvocationOutput(BaseInvocationOutput): class GraphInvocationOutput(BaseInvocationOutput):
type: Literal["graph_output"] = "graph_output" type: Literal["graph_output"] = "graph_output"
class Config:
schema_extra = {
'required': [
'type',
'image',
]
}
# TODO: Fill this out and move to invocations # TODO: Fill this out and move to invocations
class GraphInvocation(BaseInvocation): class GraphInvocation(BaseInvocation):
@ -147,6 +154,13 @@ class IterateInvocationOutput(BaseInvocationOutput):
item: Any = Field(description="The item being iterated over") item: Any = Field(description="The item being iterated over")
class Config:
schema_extra = {
'required': [
'type',
'item',
]
}
# TODO: Fill this out and move to invocations # TODO: Fill this out and move to invocations
class IterateInvocation(BaseInvocation): class IterateInvocation(BaseInvocation):
@ -169,6 +183,13 @@ class CollectInvocationOutput(BaseInvocationOutput):
collection: list[Any] = Field(description="The collection of input items") collection: list[Any] = Field(description="The collection of input items")
class Config:
schema_extra = {
'required': [
'type',
'collection',
]
}
class CollectInvocation(BaseInvocation): class CollectInvocation(BaseInvocation):
"""Collects values into a collection""" """Collects values into a collection"""

View File

@ -2,6 +2,7 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from queue import Queue from queue import Queue
import time
# TODO: make this serializable # TODO: make this serializable
@ -10,6 +11,7 @@ class InvocationQueueItem:
graph_execution_state_id: str graph_execution_state_id: str
invocation_id: str invocation_id: str
invoke_all: bool invoke_all: bool
timestamp: float
def __init__( def __init__(
self, self,
@ -22,6 +24,7 @@ class InvocationQueueItem:
self.graph_execution_state_id = graph_execution_state_id self.graph_execution_state_id = graph_execution_state_id
self.invocation_id = invocation_id self.invocation_id = invocation_id
self.invoke_all = invoke_all self.invoke_all = invoke_all
self.timestamp = time.time()
class InvocationQueueABC(ABC): class InvocationQueueABC(ABC):
@ -35,15 +38,44 @@ class InvocationQueueABC(ABC):
def put(self, item: InvocationQueueItem | None) -> None: def put(self, item: InvocationQueueItem | None) -> None:
pass pass
@abstractmethod
def cancel(self, graph_execution_state_id: str) -> None:
pass
@abstractmethod
def is_canceled(self, graph_execution_state_id: str) -> bool:
pass
class MemoryInvocationQueue(InvocationQueueABC): class MemoryInvocationQueue(InvocationQueueABC):
__queue: Queue __queue: Queue
__cancellations: dict[str, float]
def __init__(self): def __init__(self):
self.__queue = Queue() self.__queue = Queue()
self.__cancellations = dict()
def get(self) -> InvocationQueueItem: def get(self) -> InvocationQueueItem:
return self.__queue.get() item = self.__queue.get()
while isinstance(item, InvocationQueueItem) \
and item.graph_execution_state_id in self.__cancellations \
and self.__cancellations[item.graph_execution_state_id] > item.timestamp:
item = self.__queue.get()
# Clear old items
for graph_execution_state_id in list(self.__cancellations.keys()):
if self.__cancellations[graph_execution_state_id] < item.timestamp:
del self.__cancellations[graph_execution_state_id]
return item
def put(self, item: InvocationQueueItem | None) -> None: def put(self, item: InvocationQueueItem | None) -> None:
self.__queue.put(item) self.__queue.put(item)
def cancel(self, graph_execution_state_id: str) -> None:
if graph_execution_state_id not in self.__cancellations:
self.__cancellations[graph_execution_state_id] = time.time()
def is_canceled(self, graph_execution_state_id: str) -> bool:
return graph_execution_state_id in self.__cancellations

View File

@ -51,6 +51,10 @@ class Invoker:
self.services.graph_execution_manager.set(new_state) self.services.graph_execution_manager.set(new_state)
return new_state return new_state
def cancel(self, graph_execution_state_id: str) -> None:
"""Cancels the given execution state"""
self.services.queue.cancel(graph_execution_state_id)
def __start_service(self, service) -> None: def __start_service(self, service) -> None:
# Call start() method on any services that have it # Call start() method on any services that have it
start_op = getattr(service, "start", None) start_op = getattr(service, "start", None)

View File

@ -4,7 +4,7 @@ from threading import Event, Thread
from ..invocations.baseinvocation import InvocationContext from ..invocations.baseinvocation import InvocationContext
from .invocation_queue import InvocationQueueItem from .invocation_queue import InvocationQueueItem
from .invoker import InvocationProcessorABC, Invoker from .invoker import InvocationProcessorABC, Invoker
from ..util.util import CanceledException
class DefaultInvocationProcessor(InvocationProcessorABC): class DefaultInvocationProcessor(InvocationProcessorABC):
__invoker_thread: Thread __invoker_thread: Thread
@ -58,6 +58,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
) )
) )
# Check queue to see if this is canceled, and skip if so
if self.__invoker.services.queue.is_canceled(
graph_execution_state.id
):
continue
# Save outputs and history # Save outputs and history
graph_execution_state.complete(invocation.id, outputs) graph_execution_state.complete(invocation.id, outputs)
@ -76,6 +82,9 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
except CanceledException:
pass
except Exception as e: except Exception as e:
error = traceback.format_exc() error = traceback.format_exc()
@ -96,6 +105,12 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
pass pass
# Check queue to see if this is canceled, and skip if so
if self.__invoker.services.queue.is_canceled(
graph_execution_state.id
):
continue
# Queue any further commands if invoking all # Queue any further commands if invoking all
is_complete = graph_execution_state.is_complete() is_complete = graph_execution_state.is_complete()
if queue_item.invoke_all and not is_complete: if queue_item.invoke_all and not is_complete:

42
invokeai/app/util/util.py Normal file
View File

@ -0,0 +1,42 @@
import torch
from PIL import Image
from ..invocations.baseinvocation import InvocationContext
from ...backend.util.util import image_to_dataURL
from ...backend.generator.base import Generator
from ...backend.stable_diffusion import PipelineIntermediateState
class CanceledException(Exception):
pass
def fast_latents_step_callback(sample: torch.Tensor, step: int, steps: int, id: str, context: InvocationContext, ):
# TODO: only output a preview image when requested
image = Generator.sample_to_lowres_estimated_image(sample)
(width, height) = image.size
width *= 8
height *= 8
dataURL = image_to_dataURL(image, image_format="JPEG")
context.services.events.emit_generator_progress(
context.graph_execution_state_id,
id,
{
"width": width,
"height": height,
"dataURL": dataURL
},
step,
steps,
)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
"""
txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
This adapter grabs the needed data and passes it along to the callback function.
"""
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return fast_latents_step_callback(progress_state.latents, progress_state.step, **kwargs)
else:
return fast_latents_step_callback(*cb_args, **kwargs)

View File

@ -21,7 +21,7 @@ from PIL import Image, ImageChops, ImageFilter
from accelerate.utils import set_seed from accelerate.utils import set_seed
from diffusers import DiffusionPipeline from diffusers import DiffusionPipeline
from tqdm import trange from tqdm import trange
from typing import List, Iterator, Type from typing import Callable, List, Iterator, Optional, Type
from dataclasses import dataclass, field from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler from diffusers.schedulers import SchedulerMixin as Scheduler
@ -35,23 +35,23 @@ downsampling = 8
@dataclass @dataclass
class InvokeAIGeneratorBasicParams: class InvokeAIGeneratorBasicParams:
seed: int=None seed: Optional[int]=None
width: int=512 width: int=512
height: int=512 height: int=512
cfg_scale: int=7.5 cfg_scale: float=7.5
steps: int=20 steps: int=20
ddim_eta: float=0.0 ddim_eta: float=0.0
scheduler: int='ddim' scheduler: str='ddim'
precision: str='float16' precision: str='float16'
perlin: float=0.0 perlin: float=0.0
threshold: int=0.0 threshold: float=0.0
seamless: bool=False seamless: bool=False
seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y']) seamless_axes: List[str]=field(default_factory=lambda: ['x', 'y'])
h_symmetry_time_pct: float=None h_symmetry_time_pct: Optional[float]=None
v_symmetry_time_pct: float=None v_symmetry_time_pct: Optional[float]=None
variation_amount: float = 0.0 variation_amount: float = 0.0
with_variations: list=field(default_factory=list) with_variations: list=field(default_factory=list)
safety_checker: SafetyChecker=None safety_checker: Optional[SafetyChecker]=None
@dataclass @dataclass
class InvokeAIGeneratorOutput: class InvokeAIGeneratorOutput:
@ -61,10 +61,10 @@ class InvokeAIGeneratorOutput:
and the model hash, as well as all the generate() parameters that went into and the model hash, as well as all the generate() parameters that went into
generating the image (in .params, also available as attributes) generating the image (in .params, also available as attributes)
''' '''
image: Image image: Image.Image
seed: int seed: int
model_hash: str model_hash: str
attention_maps_images: List[Image] attention_maps_images: List[Image.Image]
params: Namespace params: Namespace
# we are interposing a wrapper around the original Generator classes so that # we are interposing a wrapper around the original Generator classes so that
@ -92,8 +92,8 @@ class InvokeAIGenerator(metaclass=ABCMeta):
def generate(self, def generate(self,
prompt: str='', prompt: str='',
callback: callable=None, callback: Optional[Callable]=None,
step_callback: callable=None, step_callback: Optional[Callable]=None,
iterations: int=1, iterations: int=1,
**keyword_args, **keyword_args,
)->Iterator[InvokeAIGeneratorOutput]: )->Iterator[InvokeAIGeneratorOutput]:
@ -154,6 +154,7 @@ class InvokeAIGenerator(metaclass=ABCMeta):
for i in iteration_count: for i in iteration_count:
results = generator.generate(prompt, results = generator.generate(prompt,
conditioning=(uc, c, extra_conditioning_info), conditioning=(uc, c, extra_conditioning_info),
step_callback=step_callback,
sampler=scheduler, sampler=scheduler,
**generator_args, **generator_args,
) )
@ -205,10 +206,10 @@ class Txt2Img(InvokeAIGenerator):
# ------------------------------------ # ------------------------------------
class Img2Img(InvokeAIGenerator): class Img2Img(InvokeAIGenerator):
def generate(self, def generate(self,
init_image: Image | torch.FloatTensor, init_image: Image.Image | torch.FloatTensor,
strength: float=0.75, strength: float=0.75,
**keyword_args **keyword_args
)->List[InvokeAIGeneratorOutput]: )->Iterator[InvokeAIGeneratorOutput]:
return super().generate(init_image=init_image, return super().generate(init_image=init_image,
strength=strength, strength=strength,
**keyword_args **keyword_args
@ -222,7 +223,7 @@ class Img2Img(InvokeAIGenerator):
# Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff # Takes all the arguments of Img2Img and adds the mask image and the seam/infill stuff
class Inpaint(Img2Img): class Inpaint(Img2Img):
def generate(self, def generate(self,
mask_image: Image | torch.FloatTensor, mask_image: Image.Image | torch.FloatTensor,
# Seam settings - when 0, doesn't fill seam # Seam settings - when 0, doesn't fill seam
seam_size: int = 0, seam_size: int = 0,
seam_blur: int = 0, seam_blur: int = 0,
@ -235,7 +236,7 @@ class Inpaint(Img2Img):
inpaint_height=None, inpaint_height=None,
inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF), inpaint_fill: tuple(int) = (0x7F, 0x7F, 0x7F, 0xFF),
**keyword_args **keyword_args
)->List[InvokeAIGeneratorOutput]: )->Iterator[InvokeAIGeneratorOutput]:
return super().generate( return super().generate(
mask_image=mask_image, mask_image=mask_image,
seam_size=seam_size, seam_size=seam_size,
@ -262,7 +263,7 @@ class Embiggen(Txt2Img):
embiggen: list=None, embiggen: list=None,
embiggen_tiles: list = None, embiggen_tiles: list = None,
strength: float=0.75, strength: float=0.75,
**kwargs)->List[InvokeAIGeneratorOutput]: **kwargs)->Iterator[InvokeAIGeneratorOutput]:
return super().generate(embiggen=embiggen, return super().generate(embiggen=embiggen,
embiggen_tiles=embiggen_tiles, embiggen_tiles=embiggen_tiles,
strength=strength, strength=strength,

View File

@ -372,22 +372,32 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
unet_key = "model.diffusion_model." unet_key = "model.diffusion_model."
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
if sum(k.startswith("model_ema") for k in keys) > 100: if sum(k.startswith("model_ema") for k in keys) > 100:
print(f" | Checkpoint {path} has both EMA and non-EMA weights.") print(f" | Checkpoint {path} has both EMA and non-EMA weights.")
if extract_ema: if extract_ema:
print(" | Extracting EMA weights (usually better for inference)") print(" | Extracting EMA weights (usually better for inference)")
for key in keys: for key in keys:
if key.startswith("model.diffusion_model"): if key.startswith("model.diffusion_model"):
flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop( flat_ema_key_alt = "model_ema." + "".join(key.split(".")[2:])
flat_ema_key if flat_ema_key in checkpoint:
) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key
)
elif flat_ema_key_alt in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
flat_ema_key_alt
)
else:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(
key
)
else: else:
print( print(
" | Extracting only the non-EMA weights (usually better for fine-tuning)" " | Extracting only the non-EMA weights (usually better for fine-tuning)"
) )
for key in keys: for key in keys:
if key.startswith(unet_key): if key.startswith("model.diffusion_model") and key in checkpoint:
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
new_checkpoint = {} new_checkpoint = {}
@ -1026,6 +1036,15 @@ def convert_open_clip_checkpoint(checkpoint):
return text_model return text_model
def replace_checkpoint_vae(checkpoint, vae_path:str):
if vae_path.endswith(".safetensors"):
vae_ckpt = load_file(vae_path)
else:
vae_ckpt = torch.load(vae_path, map_location="cpu")
state_dict = vae_ckpt['state_dict'] if "state_dict" in vae_ckpt else vae_ckpt
for vae_key in state_dict:
new_key = f'first_stage_model.{vae_key}'
checkpoint[new_key] = state_dict[vae_key]
def load_pipeline_from_original_stable_diffusion_ckpt( def load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path: str, checkpoint_path: str,
@ -1038,8 +1057,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
extract_ema: bool = True, extract_ema: bool = True,
upcast_attn: bool = False, upcast_attn: bool = False,
vae: AutoencoderKL = None, vae: AutoencoderKL = None,
vae_path: str = None,
precision: torch.dtype = torch.float32, precision: torch.dtype = torch.float32,
return_generator_pipeline: bool = False, return_generator_pipeline: bool = False,
scan_needed:bool=True,
) -> Union[StableDiffusionPipeline, StableDiffusionGeneratorPipeline]: ) -> Union[StableDiffusionPipeline, StableDiffusionGeneratorPipeline]:
""" """
Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
@ -1067,6 +1088,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
:param precision: precision to use - torch.float16, torch.float32 or torch.autocast :param precision: precision to use - torch.float16, torch.float32 or torch.autocast
:param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when :param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when
running stable diffusion 2.1. running stable diffusion 2.1.
:param vae: A diffusers VAE to load into the pipeline.
:param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline.
""" """
with warnings.catch_warnings(): with warnings.catch_warnings():
@ -1074,12 +1097,13 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
verbosity = dlogging.get_verbosity() verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error() dlogging.set_verbosity_error()
checkpoint = ( if Path(checkpoint_path).suffix == '.ckpt':
torch.load(checkpoint_path) if scan_needed:
if Path(checkpoint_path).suffix == ".ckpt" ModelManager.scan_model(checkpoint_path,checkpoint_path)
else load_file(checkpoint_path) checkpoint = torch.load(checkpoint_path)
else:
checkpoint = load_file(checkpoint_path)
)
cache_dir = global_cache_dir("hub") cache_dir = global_cache_dir("hub")
pipeline_class = ( pipeline_class = (
StableDiffusionGeneratorPipeline StableDiffusionGeneratorPipeline
@ -1091,7 +1115,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
if "global_step" in checkpoint: if "global_step" in checkpoint:
global_step = checkpoint["global_step"] global_step = checkpoint["global_step"]
else: else:
print(" | global_step key not found in model") print(" | global_step key not found in model")
global_step = None global_step = None
# sometimes there is a state_dict key and sometimes not # sometimes there is a state_dict key and sometimes not
@ -1202,9 +1226,19 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
unet.load_state_dict(converted_unet_checkpoint) unet.load_state_dict(converted_unet_checkpoint)
# Convert the VAE model, or use the one passed # If a replacement VAE path was specified, we'll incorporate that into
if not vae: # the checkpoint model and then convert it
print(" | Using checkpoint model's original VAE") if vae_path:
print(f" | Converting VAE {vae_path}")
replace_checkpoint_vae(checkpoint,vae_path)
# otherwise we use the original VAE, provided that
# an externally loaded diffusers VAE was not passed
elif not vae:
print(" | Using checkpoint model's original VAE")
if vae:
print(" | Using replacement diffusers VAE")
else: # convert the original or replacement VAE
vae_config = create_vae_diffusers_config( vae_config = create_vae_diffusers_config(
original_config, image_size=image_size original_config, image_size=image_size
) )
@ -1214,8 +1248,6 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
vae = AutoencoderKL(**vae_config) vae = AutoencoderKL(**vae_config)
vae.load_state_dict(converted_vae_checkpoint) vae.load_state_dict(converted_vae_checkpoint)
else:
print(" | Using external VAE specified in config")
# Convert the text model. # Convert the text model.
model_type = pipeline_type model_type = pipeline_type

View File

@ -34,7 +34,7 @@ from picklescan.scanner import scan_file_path
from invokeai.backend.globals import Globals, global_cache_dir from invokeai.backend.globals import Globals, global_cache_dir
from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util import CUDA_DEVICE, CPU_DEVICE, ask_user, download_with_resume from ..util import CUDA_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum): class SDLegacyType(Enum):
V1 = 1 V1 = 1
@ -45,9 +45,6 @@ class SDLegacyType(Enum):
UNKNOWN = 99 UNKNOWN = 99
DEFAULT_MAX_MODELS = 2 DEFAULT_MAX_MODELS = 2
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
"vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse",
}
class ModelManager(object): class ModelManager(object):
''' '''
@ -285,13 +282,13 @@ class ModelManager(object):
self.stack.remove(model_name) self.stack.remove(model_name)
if delete_files: if delete_files:
if weights: if weights:
print(f"** deleting file {weights}") print(f"** Deleting file {weights}")
Path(weights).unlink(missing_ok=True) Path(weights).unlink(missing_ok=True)
elif path: elif path:
print(f"** deleting directory {path}") print(f"** Deleting directory {path}")
rmtree(path, ignore_errors=True) rmtree(path, ignore_errors=True)
elif repo_id: elif repo_id:
print(f"** deleting the cached model directory for {repo_id}") print(f"** Deleting the cached model directory for {repo_id}")
self._delete_model_from_cache(repo_id) self._delete_model_from_cache(repo_id)
def add_model( def add_model(
@ -362,6 +359,7 @@ class ModelManager(object):
raise NotImplementedError( raise NotImplementedError(
f"Unknown model format {model_name}: {model_format}" f"Unknown model format {model_name}: {model_format}"
) )
self._add_embeddings_to_model(model)
# usage statistics # usage statistics
toc = time.time() toc = time.time()
@ -381,9 +379,9 @@ class ModelManager(object):
print(f">> Loading diffusers model from {name_or_path}") print(f">> Loading diffusers model from {name_or_path}")
if using_fp16: if using_fp16:
print(" | Using faster float16 precision") print(" | Using faster float16 precision")
else: else:
print(" | Using more accurate float32 precision") print(" | Using more accurate float32 precision")
# TODO: scan weights maybe? # TODO: scan weights maybe?
pipeline_args: dict[str, Any] = dict( pipeline_args: dict[str, Any] = dict(
@ -434,9 +432,7 @@ class ModelManager(object):
# square images??? # square images???
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
height = width height = width
print(f" | Default image dimensions = {width} x {height}")
print(f" | Default image dimensions = {width} x {height}")
self._add_embeddings_to_model(pipeline)
return pipeline, width, height, model_hash return pipeline, width, height, model_hash
@ -457,15 +453,21 @@ class ModelManager(object):
from . import load_pipeline_from_original_stable_diffusion_ckpt from . import load_pipeline_from_original_stable_diffusion_ckpt
self.offload_model(self.current_model) try:
if vae_config := self._choose_diffusers_vae(model_name): if self.list_models()[self.current_model]['status'] == 'active':
vae = self._load_vae(vae_config) self.offload_model(self.current_model)
except Exception as e:
pass
vae_path = None
if vae:
vae_path = vae if os.path.isabs(vae) else os.path.normpath(os.path.join(Globals.root, vae))
if self._has_cuda(): if self._has_cuda():
torch.cuda.empty_cache() torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt( pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path=weights, checkpoint_path=weights,
original_config_file=config, original_config_file=config,
vae=vae, vae_path=vae_path,
return_generator_pipeline=True, return_generator_pipeline=True,
precision=torch.float16 if self.precision == "float16" else torch.float32, precision=torch.float16 if self.precision == "float16" else torch.float32,
) )
@ -473,7 +475,6 @@ class ModelManager(object):
pipeline.enable_offload_submodels(self.device) pipeline.enable_offload_submodels(self.device)
else: else:
pipeline.to(self.device) pipeline.to(self.device)
return ( return (
pipeline, pipeline,
width, width,
@ -512,18 +513,20 @@ class ModelManager(object):
print(f">> Offloading {model_name} to CPU") print(f">> Offloading {model_name} to CPU")
model = self.models[model_name]["model"] model = self.models[model_name]["model"]
model.offload_all() model.offload_all()
self.current_model = None
gc.collect() gc.collect()
if self._has_cuda(): if self._has_cuda():
torch.cuda.empty_cache() torch.cuda.empty_cache()
@classmethod
def scan_model(self, model_name, checkpoint): def scan_model(self, model_name, checkpoint):
""" """
Apply picklescanner to the indicated checkpoint and issue a warning Apply picklescanner to the indicated checkpoint and issue a warning
and option to exit if an infected file is identified. and option to exit if an infected file is identified.
""" """
# scan model # scan model
print(f">> Scanning Model: {model_name}") print(f" | Scanning Model: {model_name}")
scan_result = scan_file_path(checkpoint) scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0: if scan_result.infected_files != 0:
if scan_result.infected_files == 1: if scan_result.infected_files == 1:
@ -546,7 +549,7 @@ class ModelManager(object):
print("### Exiting InvokeAI") print("### Exiting InvokeAI")
sys.exit() sys.exit()
else: else:
print(">> Model scanned ok") print(" | Model scanned ok")
def import_diffuser_model( def import_diffuser_model(
self, self,
@ -665,7 +668,7 @@ class ModelManager(object):
print(f">> Probing {thing} for import") print(f">> Probing {thing} for import")
if thing.startswith(("http:", "https:", "ftp:")): if thing.startswith(("http:", "https:", "ftp:")):
print(f" | {thing} appears to be a URL") print(f" | {thing} appears to be a URL")
model_path = self._resolve_path( model_path = self._resolve_path(
thing, "models/ldm/stable-diffusion-v1" thing, "models/ldm/stable-diffusion-v1"
) # _resolve_path does a download if needed ) # _resolve_path does a download if needed
@ -673,15 +676,15 @@ class ModelManager(object):
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")): elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
if Path(thing).stem in ["model", "diffusion_pytorch_model"]: if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
print( print(
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import" f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
) )
return return
else: else:
print(f" | {thing} appears to be a checkpoint file on disk") print(f" | {thing} appears to be a checkpoint file on disk")
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1") model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists(): elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
print(f" | {thing} appears to be a diffusers file on disk") print(f" | {thing} appears to be a diffusers file on disk")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, thing,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
@ -692,13 +695,13 @@ class ModelManager(object):
elif Path(thing).is_dir(): elif Path(thing).is_dir():
if (Path(thing) / "model_index.json").exists(): if (Path(thing) / "model_index.json").exists():
print(f" | {thing} appears to be a diffusers model.") print(f" | {thing} appears to be a diffusers model.")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf thing, commit_to_conf=commit_to_conf
) )
else: else:
print( print(
f" |{thing} appears to be a directory. Will scan for models to import" f" |{thing} appears to be a directory. Will scan for models to import"
) )
for m in list(Path(thing).rglob("*.ckpt")) + list( for m in list(Path(thing).rglob("*.ckpt")) + list(
Path(thing).rglob("*.safetensors") Path(thing).rglob("*.safetensors")
@ -710,7 +713,7 @@ class ModelManager(object):
return model_name return model_name
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing): elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
print(f" | {thing} appears to be a HuggingFace diffusers repo_id") print(f" | {thing} appears to be a HuggingFace diffusers repo_id")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf thing, commit_to_conf=commit_to_conf
) )
@ -727,32 +730,33 @@ class ModelManager(object):
return return
if model_path.stem in self.config: # already imported if model_path.stem in self.config: # already imported
print(" | Already imported. Skipping") print(" | Already imported. Skipping")
return model_path.stem return model_path.stem
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
checkpoint = ( checkpoint = None
torch.load(model_path) if model_path.suffix.endswith((".ckpt",".pt")):
if model_path.suffix == ".ckpt" self.scan_model(model_path,model_path)
else safetensors.torch.load_file(model_path) checkpoint = torch.load(model_path)
) else:
checkpoint = safetensors.torch.load_file(model_path)
# additional probing needed if no config file provided # additional probing needed if no config file provided
if model_config_file is None: if model_config_file is None:
model_type = self.probe_model_type(checkpoint) model_type = self.probe_model_type(checkpoint)
if model_type == SDLegacyType.V1: if model_type == SDLegacyType.V1:
print(" | SD-v1 model detected") print(" | SD-v1 model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml" Globals.root, "configs/stable-diffusion/v1-inference.yaml"
) )
elif model_type == SDLegacyType.V1_INPAINT: elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected") print(" | SD-v1 inpainting model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml" Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
) )
elif model_type == SDLegacyType.V2_v: elif model_type == SDLegacyType.V2_v:
print( print(
" | SD-v2-v model detected; model will be converted to diffusers format" " | SD-v2-v model detected; model will be converted to diffusers format"
) )
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
@ -760,7 +764,7 @@ class ModelManager(object):
convert = True convert = True
elif model_type == SDLegacyType.V2_e: elif model_type == SDLegacyType.V2_e:
print( print(
" | SD-v2-e model detected; model will be converted to diffusers format" " | SD-v2-e model detected; model will be converted to diffusers format"
) )
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml" Globals.root, "configs/stable-diffusion/v2-inference.yaml"
@ -788,18 +792,21 @@ class ModelManager(object):
model_description=description, model_description=description,
original_config_file=model_config_file, original_config_file=model_config_file,
commit_to_conf=commit_to_conf, commit_to_conf=commit_to_conf,
scan_needed=False,
) )
return model_name return model_name
def convert_and_import( def convert_and_import(
self, self,
ckpt_path: Path, ckpt_path: Path,
diffusers_path: Path, diffusers_path: Path,
model_name=None, model_name=None,
model_description=None, model_description=None,
vae=None, vae:dict=None,
original_config_file: Path = None, vae_path:Path=None,
commit_to_conf: Path = None, original_config_file: Path = None,
commit_to_conf: Path = None,
scan_needed: bool=True,
) -> str: ) -> str:
""" """
Convert a legacy ckpt weights file to diffuser model and import Convert a legacy ckpt weights file to diffuser model and import
@ -827,18 +834,23 @@ class ModelManager(object):
try: try:
# By passing the specified VAE to the conversion function, the autoencoder # By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file # will be built into the model rather than tacked on afterward via the config file
vae_model = self._load_vae(vae) if vae else None vae_model=None
if vae:
vae_model=self._load_vae(vae)
vae_path=None
convert_ckpt_to_diffusers( convert_ckpt_to_diffusers(
ckpt_path, ckpt_path,
diffusers_path, diffusers_path,
extract_ema=True, extract_ema=True,
original_config_file=original_config_file, original_config_file=original_config_file,
vae=vae_model, vae=vae_model,
vae_path=vae_path,
scan_needed=scan_needed,
) )
print( print(
f" | Success. Optimized model is now located at {str(diffusers_path)}" f" | Success. Optimized model is now located at {str(diffusers_path)}"
) )
print(f" | Writing new config file entry for {model_name}") print(f" | Writing new config file entry for {model_name}")
new_config = dict( new_config = dict(
path=str(diffusers_path), path=str(diffusers_path),
description=model_description, description=model_description,
@ -849,7 +861,7 @@ class ModelManager(object):
self.add_model(model_name, new_config, True) self.add_model(model_name, new_config, True)
if commit_to_conf: if commit_to_conf:
self.commit(commit_to_conf) self.commit(commit_to_conf)
print(">> Conversion succeeded") print(" | Conversion succeeded")
except Exception as e: except Exception as e:
print(f"** Conversion failed: {str(e)}") print(f"** Conversion failed: {str(e)}")
print( print(
@ -879,36 +891,6 @@ class ModelManager(object):
return search_folder, found_models return search_folder, found_models
def _choose_diffusers_vae(
self, model_name: str, vae: str = None
) -> Union[dict, str]:
# In the event that the original entry is using a custom ckpt VAE, we try to
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
# I would prefer to do this differently: We load the ckpt model into memory, swap the
# VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped
# VAE is built into the model. However, when I tried this I got obscure key errors.
if vae:
return vae
if model_name in self.config and (
vae_ckpt_path := self.model_info(model_name).get("vae", None)
):
vae_basename = Path(vae_ckpt_path).stem
diffusers_vae = None
if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None):
print(
f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version"
)
vae = {"repo_id": diffusers_vae}
else:
print(
f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown'
)
print(
'** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config'
)
vae = {"repo_id": "stabilityai/sd-vae-ft-mse"}
return vae
def _make_cache_room(self) -> None: def _make_cache_room(self) -> None:
num_loaded_models = len(self.models) num_loaded_models = len(self.models)
if num_loaded_models >= self.max_loaded_models: if num_loaded_models >= self.max_loaded_models:
@ -1105,7 +1087,7 @@ class ModelManager(object):
with open(hashpath) as f: with open(hashpath) as f:
hash = f.read() hash = f.read()
return hash return hash
print(" | Calculating sha256 hash of model files") print(" | Calculating sha256 hash of model files")
tic = time.time() tic = time.time()
sha = hashlib.sha256() sha = hashlib.sha256()
count = 0 count = 0
@ -1117,7 +1099,7 @@ class ModelManager(object):
sha.update(chunk) sha.update(chunk)
hash = sha.hexdigest() hash = sha.hexdigest()
toc = time.time() toc = time.time()
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic)) print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
with open(hashpath, "w") as f: with open(hashpath, "w") as f:
f.write(hash) f.write(hash)
return hash return hash
@ -1162,12 +1144,12 @@ class ModelManager(object):
local_files_only=not Globals.internet_available, local_files_only=not Globals.internet_available,
) )
print(f" | Loading diffusers VAE from {name_or_path}") print(f" | Loading diffusers VAE from {name_or_path}")
if using_fp16: if using_fp16:
vae_args.update(torch_dtype=torch.float16) vae_args.update(torch_dtype=torch.float16)
fp_args_list = [{"revision": "fp16"}, {}] fp_args_list = [{"revision": "fp16"}, {}]
else: else:
print(" | Using more accurate float32 precision") print(" | Using more accurate float32 precision")
fp_args_list = [{}] fp_args_list = [{}]
vae = None vae = None
@ -1208,7 +1190,7 @@ class ModelManager(object):
hashes_to_delete.add(revision.commit_hash) hashes_to_delete.add(revision.commit_hash)
strategy = cache_info.delete_revisions(*hashes_to_delete) strategy = cache_info.delete_revisions(*hashes_to_delete)
print( print(
f"** deletion of this model is expected to free {strategy.expected_freed_size_str}" f"** Deletion of this model is expected to free {strategy.expected_freed_size_str}"
) )
strategy.execute() strategy.execute()

View File

@ -6,7 +6,6 @@ The interface is through the Concepts() object.
""" """
import os import os
import re import re
import traceback
from typing import Callable from typing import Callable
from urllib import error as ul_error from urllib import error as ul_error
from urllib import request from urllib import request
@ -15,7 +14,6 @@ from huggingface_hub import (
HfApi, HfApi,
HfFolder, HfFolder,
ModelFilter, ModelFilter,
ModelSearchArguments,
hf_hub_url, hf_hub_url,
) )
@ -84,7 +82,7 @@ class HuggingFaceConceptsLibrary(object):
""" """
if not concept_name in self.list_concepts(): if not concept_name in self.list_concepts():
print( print(
f"This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept." f"{concept_name} is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept."
) )
return None return None
return self.get_concept_file(concept_name.lower(), "learned_embeds.bin") return self.get_concept_file(concept_name.lower(), "learned_embeds.bin")
@ -236,7 +234,7 @@ class HuggingFaceConceptsLibrary(object):
except ul_error.HTTPError as e: except ul_error.HTTPError as e:
if e.code == 404: if e.code == 404:
print( print(
f"This concept is not known to the Hugging Face library. Generation will continue without the concept." f"Concept {concept_name} is not known to the Hugging Face library. Generation will continue without the concept."
) )
else: else:
print( print(
@ -246,7 +244,7 @@ class HuggingFaceConceptsLibrary(object):
return False return False
except ul_error.URLError as e: except ul_error.URLError as e:
print( print(
f"ERROR: {str(e)}. This may reflect a network issue. Generation will continue without the concept." f"ERROR while downloading {concept_name}: {str(e)}. This may reflect a network issue. Generation will continue without the concept."
) )
os.rmdir(dest) os.rmdir(dest)
return False return False

View File

@ -1022,7 +1022,7 @@ class InvokeAIWebServer:
"RGB" "RGB"
) )
def image_progress(sample, step): def image_progress(intermediate_state: PipelineIntermediateState):
if self.canceled.is_set(): if self.canceled.is_set():
raise CanceledException raise CanceledException
@ -1030,6 +1030,14 @@ class InvokeAIWebServer:
nonlocal generation_parameters nonlocal generation_parameters
nonlocal progress nonlocal progress
step = intermediate_state.step
if intermediate_state.predicted_original is not None:
# Some schedulers report not only the noisy latents at the current timestep,
# but also their estimate so far of what the de-noised latents will be.
sample = intermediate_state.predicted_original
else:
sample = intermediate_state.latents
generation_messages = { generation_messages = {
"txt2img": "common.statusGeneratingTextToImage", "txt2img": "common.statusGeneratingTextToImage",
"img2img": "common.statusGeneratingImageToImage", "img2img": "common.statusGeneratingImageToImage",
@ -1302,16 +1310,9 @@ class InvokeAIWebServer:
progress.set_current_iteration(progress.current_iteration + 1) progress.set_current_iteration(progress.current_iteration + 1)
def diffusers_step_callback_adapter(*cb_args, **kwargs):
if isinstance(cb_args[0], PipelineIntermediateState):
progress_state: PipelineIntermediateState = cb_args[0]
return image_progress(progress_state.latents, progress_state.step)
else:
return image_progress(*cb_args, **kwargs)
self.generate.prompt2image( self.generate.prompt2image(
**generation_parameters, **generation_parameters,
step_callback=diffusers_step_callback_adapter, step_callback=image_progress,
image_callback=image_done, image_callback=image_done,
) )

View File

@ -772,16 +772,10 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
original_config_file = Path(model_info["config"]) original_config_file = Path(model_info["config"])
model_name = model_name_or_path model_name = model_name_or_path
model_description = model_info["description"] model_description = model_info["description"]
vae = model_info["vae"] vae_path = model_info.get("vae")
else: else:
print(f"** {model_name_or_path} is not a legacy .ckpt weights file") print(f"** {model_name_or_path} is not a legacy .ckpt weights file")
return return
if vae_repo := invokeai.backend.model_management.model_manager.VAE_TO_REPO_ID.get(
Path(vae).stem
):
vae_repo = dict(repo_id=vae_repo)
else:
vae_repo = None
model_name = manager.convert_and_import( model_name = manager.convert_and_import(
ckpt_path, ckpt_path,
diffusers_path=Path( diffusers_path=Path(
@ -790,7 +784,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_name=model_name, model_name=model_name,
model_description=model_description, model_description=model_description,
original_config_file=original_config_file, original_config_file=original_config_file,
vae=vae_repo, vae_path=vae_path,
) )
else: else:
try: try:

View File

@ -1,4 +1,6 @@
import React, { PropsWithChildren } from 'react'; import React, { PropsWithChildren } from 'react';
import { IAIPopoverProps } from '../web/src/common/components/IAIPopover';
import { IAIIconButtonProps } from '../web/src/common/components/IAIIconButton';
export {}; export {};
@ -50,9 +52,27 @@ declare module '@invoke-ai/invoke-ai-ui' {
declare class InvokeAiLogoComponent extends React.Component<InvokeAILogoComponentProps> { declare class InvokeAiLogoComponent extends React.Component<InvokeAILogoComponentProps> {
public constructor(props: InvokeAILogoComponentProps); public constructor(props: InvokeAILogoComponentProps);
} }
declare class IAIPopover extends React.Component<IAIPopoverProps> {
public constructor(props: IAIPopoverProps);
}
declare class IAIIconButton extends React.Component<IAIIconButtonProps> {
public constructor(props: IAIIconButtonProps);
}
declare class SettingsModal extends React.Component<SettingsModalProps> {
public constructor(props: SettingsModalProps);
}
} }
declare function Invoke(props: PropsWithChildren): JSX.Element; declare function Invoke(props: PropsWithChildren): JSX.Element;
export { ThemeChanger, InvokeAiLogoComponent }; export {
ThemeChanger,
InvokeAiLogoComponent,
IAIPopover,
IAIIconButton,
SettingsModal,
};
export = Invoke; export = Invoke;

View File

@ -6,7 +6,6 @@
"prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky", "prepare": "cd ../../../ && husky install invokeai/frontend/web/.husky",
"dev": "concurrently \"vite dev\" \"yarn run theme:watch\"", "dev": "concurrently \"vite dev\" \"yarn run theme:watch\"",
"build": "yarn run lint && vite build", "build": "yarn run lint && vite build",
"build:package": "vite build --mode=package",
"preview": "vite preview", "preview": "vite preview",
"lint:madge": "madge --circular src/main.tsx", "lint:madge": "madge --circular src/main.tsx",
"lint:eslint": "eslint --max-warnings=0 .", "lint:eslint": "eslint --max-warnings=0 .",

View File

@ -8,7 +8,6 @@
"darkTheme": "داكن", "darkTheme": "داكن",
"lightTheme": "فاتح", "lightTheme": "فاتح",
"greenTheme": "أخضر", "greenTheme": "أخضر",
"text2img": "نص إلى صورة",
"img2img": "صورة إلى صورة", "img2img": "صورة إلى صورة",
"unifiedCanvas": "لوحة موحدة", "unifiedCanvas": "لوحة موحدة",
"nodes": "عقد", "nodes": "عقد",

View File

@ -7,7 +7,6 @@
"darkTheme": "Dunkel", "darkTheme": "Dunkel",
"lightTheme": "Hell", "lightTheme": "Hell",
"greenTheme": "Grün", "greenTheme": "Grün",
"text2img": "Text zu Bild",
"img2img": "Bild zu Bild", "img2img": "Bild zu Bild",
"nodes": "Knoten", "nodes": "Knoten",
"langGerman": "Deutsch", "langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Oscuro", "darkTheme": "Oscuro",
"lightTheme": "Claro", "lightTheme": "Claro",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Texto a Imagen",
"img2img": "Imagen a Imagen", "img2img": "Imagen a Imagen",
"unifiedCanvas": "Lienzo Unificado", "unifiedCanvas": "Lienzo Unificado",
"nodes": "Nodos", "nodes": "Nodos",
@ -70,7 +69,11 @@
"langHebrew": "Hebreo", "langHebrew": "Hebreo",
"pinOptionsPanel": "Pin del panel de opciones", "pinOptionsPanel": "Pin del panel de opciones",
"loading": "Cargando", "loading": "Cargando",
"loadingInvokeAI": "Cargando invocar a la IA" "loadingInvokeAI": "Cargando invocar a la IA",
"postprocessing": "Tratamiento posterior",
"txt2img": "De texto a imagen",
"accept": "Aceptar",
"cancel": "Cancelar"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -404,7 +407,8 @@
"none": "ninguno", "none": "ninguno",
"pickModelType": "Elige el tipo de modelo", "pickModelType": "Elige el tipo de modelo",
"v2_768": "v2 (768px)", "v2_768": "v2 (768px)",
"addDifference": "Añadir una diferencia" "addDifference": "Añadir una diferencia",
"scanForModels": "Buscar modelos"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Guardar automáticamente en galería", "autoSaveToGallery": "Guardar automáticamente en galería",
"saveBoxRegionOnly": "Guardar solo región dentro de la caja", "saveBoxRegionOnly": "Guardar solo región dentro de la caja",
"limitStrokesToBox": "Limitar trazos a la caja", "limitStrokesToBox": "Limitar trazos a la caja",
"showCanvasDebugInfo": "Mostrar información de depuración de lienzo", "showCanvasDebugInfo": "Mostrar la información adicional del lienzo",
"clearCanvasHistory": "Limpiar historial de lienzo", "clearCanvasHistory": "Limpiar historial de lienzo",
"clearHistory": "Limpiar historial", "clearHistory": "Limpiar historial",
"clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.", "clearCanvasHistoryMessage": "Limpiar el historial de lienzo también restablece completamente el lienzo unificado. Esto incluye todo el historial de deshacer/rehacer, las imágenes en el área de preparación y la capa base del lienzo.",

View File

@ -8,7 +8,6 @@
"darkTheme": "Sombre", "darkTheme": "Sombre",
"lightTheme": "Clair", "lightTheme": "Clair",
"greenTheme": "Vert", "greenTheme": "Vert",
"text2img": "Texte en image",
"img2img": "Image en image", "img2img": "Image en image",
"unifiedCanvas": "Canvas unifié", "unifiedCanvas": "Canvas unifié",
"nodes": "Nœuds", "nodes": "Nœuds",
@ -47,7 +46,19 @@
"statusLoadingModel": "Chargement du modèle", "statusLoadingModel": "Chargement du modèle",
"statusModelChanged": "Modèle changé", "statusModelChanged": "Modèle changé",
"discordLabel": "Discord", "discordLabel": "Discord",
"githubLabel": "Github" "githubLabel": "Github",
"accept": "Accepter",
"statusMergingModels": "Mélange des modèles",
"loadingInvokeAI": "Chargement de Invoke AI",
"cancel": "Annuler",
"langEnglish": "Anglais",
"statusConvertingModel": "Conversion du modèle",
"statusModelConverted": "Modèle converti",
"loading": "Chargement",
"pinOptionsPanel": "Épingler la page d'options",
"statusMergedModels": "Modèles mélangés",
"txt2img": "Texte vers image",
"postprocessing": "Post-Traitement"
}, },
"gallery": { "gallery": {
"generations": "Générations", "generations": "Générations",
@ -518,5 +529,15 @@
"betaDarkenOutside": "Assombrir à l'extérieur", "betaDarkenOutside": "Assombrir à l'extérieur",
"betaLimitToBox": "Limiter à la boîte", "betaLimitToBox": "Limiter à la boîte",
"betaPreserveMasked": "Conserver masqué" "betaPreserveMasked": "Conserver masqué"
},
"accessibility": {
"uploadImage": "Charger une image",
"reset": "Réinitialiser",
"nextImage": "Image suivante",
"previousImage": "Image précédente",
"useThisParameter": "Utiliser ce paramètre",
"zoomIn": "Zoom avant",
"zoomOut": "Zoom arrière",
"showOptionsPanel": "Montrer la page d'options"
} }
} }

View File

@ -125,7 +125,6 @@
"langSimplifiedChinese": "סינית", "langSimplifiedChinese": "סינית",
"langUkranian": "אוקראינית", "langUkranian": "אוקראינית",
"langSpanish": "ספרדית", "langSpanish": "ספרדית",
"text2img": "טקסט לתמונה",
"img2img": "תמונה לתמונה", "img2img": "תמונה לתמונה",
"unifiedCanvas": "קנבס מאוחד", "unifiedCanvas": "קנבס מאוחד",
"nodes": "צמתים", "nodes": "צמתים",

View File

@ -8,7 +8,6 @@
"darkTheme": "Scuro", "darkTheme": "Scuro",
"lightTheme": "Chiaro", "lightTheme": "Chiaro",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Testo a Immagine",
"img2img": "Immagine a Immagine", "img2img": "Immagine a Immagine",
"unifiedCanvas": "Tela unificata", "unifiedCanvas": "Tela unificata",
"nodes": "Nodi", "nodes": "Nodi",
@ -70,7 +69,11 @@
"loading": "Caricamento in corso", "loading": "Caricamento in corso",
"oceanTheme": "Oceano", "oceanTheme": "Oceano",
"langHebrew": "Ebraico", "langHebrew": "Ebraico",
"loadingInvokeAI": "Caricamento Invoke AI" "loadingInvokeAI": "Caricamento Invoke AI",
"postprocessing": "Post Elaborazione",
"txt2img": "Testo a Immagine",
"accept": "Accetta",
"cancel": "Annulla"
}, },
"gallery": { "gallery": {
"generations": "Generazioni", "generations": "Generazioni",
@ -404,7 +407,8 @@
"v2_768": "v2 (768px)", "v2_768": "v2 (768px)",
"none": "niente", "none": "niente",
"addDifference": "Aggiungi differenza", "addDifference": "Aggiungi differenza",
"pickModelType": "Scegli il tipo di modello" "pickModelType": "Scegli il tipo di modello",
"scanForModels": "Cerca modelli"
}, },
"parameters": { "parameters": {
"images": "Immagini", "images": "Immagini",
@ -574,7 +578,7 @@
"autoSaveToGallery": "Salvataggio automatico nella Galleria", "autoSaveToGallery": "Salvataggio automatico nella Galleria",
"saveBoxRegionOnly": "Salva solo l'area di selezione", "saveBoxRegionOnly": "Salva solo l'area di selezione",
"limitStrokesToBox": "Limita i tratti all'area di selezione", "limitStrokesToBox": "Limita i tratti all'area di selezione",
"showCanvasDebugInfo": "Mostra informazioni di debug della Tela", "showCanvasDebugInfo": "Mostra ulteriori informazioni sulla Tela",
"clearCanvasHistory": "Cancella cronologia Tela", "clearCanvasHistory": "Cancella cronologia Tela",
"clearHistory": "Cancella la cronologia", "clearHistory": "Cancella la cronologia",
"clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.", "clearCanvasHistoryMessage": "La cancellazione della cronologia della tela lascia intatta la tela corrente, ma cancella in modo irreversibile la cronologia degli annullamenti e dei ripristini.",
@ -612,7 +616,7 @@
"copyMetadataJson": "Copia i metadati JSON", "copyMetadataJson": "Copia i metadati JSON",
"exitViewer": "Esci dal visualizzatore", "exitViewer": "Esci dal visualizzatore",
"zoomIn": "Zoom avanti", "zoomIn": "Zoom avanti",
"zoomOut": "Zoom Indietro", "zoomOut": "Zoom indietro",
"rotateCounterClockwise": "Ruotare in senso antiorario", "rotateCounterClockwise": "Ruotare in senso antiorario",
"rotateClockwise": "Ruotare in senso orario", "rotateClockwise": "Ruotare in senso orario",
"flipHorizontally": "Capovolgi orizzontalmente", "flipHorizontally": "Capovolgi orizzontalmente",

View File

@ -11,7 +11,6 @@
"langArabic": "العربية", "langArabic": "العربية",
"langEnglish": "English", "langEnglish": "English",
"langDutch": "Nederlands", "langDutch": "Nederlands",
"text2img": "텍스트->이미지",
"unifiedCanvas": "통합 캔버스", "unifiedCanvas": "통합 캔버스",
"langFrench": "Français", "langFrench": "Français",
"langGerman": "Deutsch", "langGerman": "Deutsch",

View File

@ -8,7 +8,6 @@
"darkTheme": "Donker", "darkTheme": "Donker",
"lightTheme": "Licht", "lightTheme": "Licht",
"greenTheme": "Groen", "greenTheme": "Groen",
"text2img": "Tekst naar afbeelding",
"img2img": "Afbeelding naar afbeelding", "img2img": "Afbeelding naar afbeelding",
"unifiedCanvas": "Centraal canvas", "unifiedCanvas": "Centraal canvas",
"nodes": "Knooppunten", "nodes": "Knooppunten",

View File

@ -8,7 +8,6 @@
"darkTheme": "Ciemny", "darkTheme": "Ciemny",
"lightTheme": "Jasny", "lightTheme": "Jasny",
"greenTheme": "Zielony", "greenTheme": "Zielony",
"text2img": "Tekst na obraz",
"img2img": "Obraz na obraz", "img2img": "Obraz na obraz",
"unifiedCanvas": "Tryb uniwersalny", "unifiedCanvas": "Tryb uniwersalny",
"nodes": "Węzły", "nodes": "Węzły",

View File

@ -20,7 +20,6 @@
"langSpanish": "Espanhol", "langSpanish": "Espanhol",
"langRussian": "Русский", "langRussian": "Русский",
"langUkranian": "Украї́нська", "langUkranian": "Украї́нська",
"text2img": "Texto para Imagem",
"img2img": "Imagem para Imagem", "img2img": "Imagem para Imagem",
"unifiedCanvas": "Tela Unificada", "unifiedCanvas": "Tela Unificada",
"nodes": "Nós", "nodes": "Nós",

View File

@ -8,7 +8,6 @@
"darkTheme": "Noite", "darkTheme": "Noite",
"lightTheme": "Dia", "lightTheme": "Dia",
"greenTheme": "Verde", "greenTheme": "Verde",
"text2img": "Texto Para Imagem",
"img2img": "Imagem Para Imagem", "img2img": "Imagem Para Imagem",
"unifiedCanvas": "Tela Unificada", "unifiedCanvas": "Tela Unificada",
"nodes": "Nódulos", "nodes": "Nódulos",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темная", "darkTheme": "Темная",
"lightTheme": "Светлая", "lightTheme": "Светлая",
"greenTheme": "Зеленая", "greenTheme": "Зеленая",
"text2img": "Изображение из текста (text2img)",
"img2img": "Изображение в изображение (img2img)", "img2img": "Изображение в изображение (img2img)",
"unifiedCanvas": "Универсальный холст", "unifiedCanvas": "Универсальный холст",
"nodes": "Ноды", "nodes": "Ноды",

View File

@ -8,7 +8,6 @@
"darkTheme": "Темна", "darkTheme": "Темна",
"lightTheme": "Світла", "lightTheme": "Світла",
"greenTheme": "Зелена", "greenTheme": "Зелена",
"text2img": "Зображення із тексту (text2img)",
"img2img": "Зображення із зображення (img2img)", "img2img": "Зображення із зображення (img2img)",
"unifiedCanvas": "Універсальне полотно", "unifiedCanvas": "Універсальне полотно",
"nodes": "Вузли", "nodes": "Вузли",

View File

@ -8,7 +8,6 @@
"darkTheme": "暗色", "darkTheme": "暗色",
"lightTheme": "亮色", "lightTheme": "亮色",
"greenTheme": "绿色", "greenTheme": "绿色",
"text2img": "文字到图像",
"img2img": "图像到图像", "img2img": "图像到图像",
"unifiedCanvas": "统一画布", "unifiedCanvas": "统一画布",
"nodes": "节点", "nodes": "节点",

View File

@ -33,7 +33,6 @@
"langBrPortuguese": "巴西葡萄牙語", "langBrPortuguese": "巴西葡萄牙語",
"langRussian": "俄語", "langRussian": "俄語",
"langSpanish": "西班牙語", "langSpanish": "西班牙語",
"text2img": "文字到圖像",
"unifiedCanvas": "統一畫布" "unifiedCanvas": "統一畫布"
} }
} }

View File

@ -8,7 +8,7 @@ import {
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { memo, ReactNode } from 'react'; import { memo, ReactNode } from 'react';
type IAIPopoverProps = PopoverProps & { export type IAIPopoverProps = PopoverProps & {
triggerComponent: ReactNode; triggerComponent: ReactNode;
triggerContainerProps?: BoxProps; triggerContainerProps?: BoxProps;
children: ReactNode; children: ReactNode;

View File

@ -2,6 +2,15 @@ import Component from './component';
import InvokeAiLogoComponent from './features/system/components/InvokeAILogoComponent'; import InvokeAiLogoComponent from './features/system/components/InvokeAILogoComponent';
import ThemeChanger from './features/system/components/ThemeChanger'; import ThemeChanger from './features/system/components/ThemeChanger';
import IAIPopover from './common/components/IAIPopover';
import IAIIconButton from './common/components/IAIIconButton';
import SettingsModal from './features/system/components/SettingsModal/SettingsModal';
export default Component; export default Component;
export { InvokeAiLogoComponent, ThemeChanger }; export {
InvokeAiLogoComponent,
ThemeChanger,
IAIPopover,
IAIIconButton,
SettingsModal,
};

View File

@ -34,7 +34,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<BiZoomIn />} icon={<BiZoomIn />}
aria-label={t('accessibility.zoomIn')} aria-label={t('accessibility.zoomIn')}
tooltip="Zoom In" tooltip={t('accessibility.zoomIn')}
onClick={() => zoomIn()} onClick={() => zoomIn()}
fontSize={20} fontSize={20}
/> />
@ -42,7 +42,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<BiZoomOut />} icon={<BiZoomOut />}
aria-label={t('accessibility.zoomOut')} aria-label={t('accessibility.zoomOut')}
tooltip="Zoom Out" tooltip={t('accessibility.zoomOut')}
onClick={() => zoomOut()} onClick={() => zoomOut()}
fontSize={20} fontSize={20}
/> />
@ -50,7 +50,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<BiRotateLeft />} icon={<BiRotateLeft />}
aria-label={t('accessibility.rotateCounterClockwise')} aria-label={t('accessibility.rotateCounterClockwise')}
tooltip="Rotate Counter-Clockwise" tooltip={t('accessibility.rotateCounterClockwise')}
onClick={rotateCounterClockwise} onClick={rotateCounterClockwise}
fontSize={20} fontSize={20}
/> />
@ -58,7 +58,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<BiRotateRight />} icon={<BiRotateRight />}
aria-label={t('accessibility.rotateClockwise')} aria-label={t('accessibility.rotateClockwise')}
tooltip="Rotate Clockwise" tooltip={t('accessibility.rotateClockwise')}
onClick={rotateClockwise} onClick={rotateClockwise}
fontSize={20} fontSize={20}
/> />
@ -66,7 +66,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<MdFlip />} icon={<MdFlip />}
aria-label={t('accessibility.flipHorizontally')} aria-label={t('accessibility.flipHorizontally')}
tooltip="Flip Horizontally" tooltip={t('accessibility.flipHorizontally')}
onClick={flipHorizontally} onClick={flipHorizontally}
fontSize={20} fontSize={20}
/> />
@ -74,7 +74,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<MdFlip style={{ transform: 'rotate(90deg)' }} />} icon={<MdFlip style={{ transform: 'rotate(90deg)' }} />}
aria-label={t('accessibility.flipVertically')} aria-label={t('accessibility.flipVertically')}
tooltip="Flip Vertically" tooltip={t('accessibility.flipVertically')}
onClick={flipVertically} onClick={flipVertically}
fontSize={20} fontSize={20}
/> />
@ -82,7 +82,7 @@ const ReactPanZoomButtons = ({
<IAIIconButton <IAIIconButton
icon={<BiReset />} icon={<BiReset />}
aria-label={t('accessibility.reset')} aria-label={t('accessibility.reset')}
tooltip="Reset" tooltip={t('accessibility.reset')}
onClick={() => { onClick={() => {
resetTransform(); resetTransform();
reset(); reset();

View File

@ -1,4 +1,3 @@
import path from 'path';
import react from '@vitejs/plugin-react-swc'; import react from '@vitejs/plugin-react-swc';
import { visualizer } from 'rollup-plugin-visualizer'; import { visualizer } from 'rollup-plugin-visualizer';
import { defineConfig, PluginOption } from 'vite'; import { defineConfig, PluginOption } from 'vite';
@ -58,26 +57,6 @@ export default defineConfig(({ mode }) => {
// sourcemap: true, // this can be enabled if needed, it adds ovwer 15MB to the commit // sourcemap: true, // this can be enabled if needed, it adds ovwer 15MB to the commit
}, },
}; };
} else if (mode === 'package') {
return {
...common,
build: {
...common.build,
lib: {
entry: path.resolve(__dirname, 'src/exports.tsx'),
name: 'InvokeAI UI',
fileName: (format) => `invoke-ai-ui.${format}.js`,
},
rollupOptions: {
external: ['react', 'react-dom'],
output: {
globals: {
react: 'React',
},
},
},
},
};
} else { } else {
return { return {
...common, ...common,

View File

@ -38,16 +38,16 @@ dependencies = [
"albumentations", "albumentations",
"click", "click",
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel==1.0.1", "compel==1.0.4",
"datasets", "datasets",
"diffusers[torch]~=0.14", "diffusers[torch]~=0.14",
"dnspython==2.2.1", "dnspython==2.2.1",
"einops", "einops",
"eventlet", "eventlet",
"facexlib", "facexlib",
"fastapi==0.85.0", "fastapi==0.94.1",
"fastapi-events==0.6.0", "fastapi-events==0.8.0",
"fastapi-socketio==0.0.9", "fastapi-socketio==0.0.10",
"flask==2.1.3", "flask==2.1.3",
"flask_cors==3.0.10", "flask_cors==3.0.10",
"flask_socketio==5.3.0", "flask_socketio==5.3.0",
@ -75,7 +75,7 @@ dependencies = [
"torchvision>=0.14.1", "torchvision>=0.14.1",
"torchmetrics", "torchmetrics",
"transformers~=4.26", "transformers~=4.26",
"uvicorn[standard]==0.20.0", "uvicorn[standard]==0.21.1",
"windows-curses; sys_platform=='win32'", "windows-curses; sys_platform=='win32'",
] ]
@ -139,8 +139,24 @@ version = { attr = "invokeai.version.__version__" }
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] "invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
"invokeai.frontend.web.dist" = ["**"] "invokeai.frontend.web.dist" = ["**"]
#=== Begin: PyTest and Coverage
[tool.pytest.ini_options] [tool.pytest.ini_options]
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" addopts = "--cov-report term --cov-report html --cov-report xml"
[tool.coverage.run]
branch = true
source = ["invokeai"]
omit = ["*tests*", "*migrations*", ".venv/*", "*.env"]
[tool.coverage.report]
show_missing = true
fail_under = 85 # let's set something sensible on Day 1 ...
[tool.coverage.json]
output = "coverage/coverage.json"
pretty_print = true
[tool.coverage.html]
directory = "coverage/html"
[tool.coverage.xml]
output = "coverage/index.xml"
#=== End: PyTest and Coverage
[flake8] [flake8]
max-line-length = 120 max-line-length = 120