diff --git a/.github/actions/install-frontend-deps/action.yml b/.github/actions/install-frontend-deps/action.yml index 32b4987249..6152da80c6 100644 --- a/.github/actions/install-frontend-deps/action.yml +++ b/.github/actions/install-frontend-deps/action.yml @@ -9,9 +9,9 @@ runs: node-version: '18' - name: setup pnpm - uses: pnpm/action-setup@v2 + uses: pnpm/action-setup@v4 with: - version: 8 + version: 8.15.6 run_install: false - name: get pnpm store directory diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6c8fee470e..e29d481b41 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -8,7 +8,7 @@ ## QA Instructions - + ## Merge Plan diff --git a/README.md b/README.md index 4c24ac6206..96c3210c66 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,33 @@ Invoke is available in two editions: More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs]. +## Docker Container + +We publish official container images in Github Container Registry: https://github.com/invoke-ai/InvokeAI/pkgs/container/invokeai. Both CUDA and ROCm images are available. Check the above link for relevant tags. + +> [!IMPORTANT] +> Ensure that Docker is set up to use the GPU. Refer to [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] documentation. + +### Generate! + +Run the container, modifying the command as necessary: + +```bash +docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai +``` + +Then open `http://localhost:9090` and install some models using the Model Manager tab to begin generating. + +For ROCm, add `--device /dev/kfd --device /dev/dri` to the `docker run` command. + +### Persist your data + +You will likely want to persist your workspace outside of the container. Use the `--volume /home/myuser/invokeai:/invokeai` flag to mount some local directory (using its **absolute** path) to the `/invokeai` path inside the container. Your generated images and models will reside there. You can use this directory with other InvokeAI installations, or switch between runtime directories as needed. + +### DIY + +Build your own image and customize the environment to match your needs using our `docker-compose` stack. See [README.md](./docker/README.md) in the [docker](./docker) directory. + ## Troubleshooting, FAQ and Support Please review our [FAQ][faq] for solutions to common installation problems and other issues. @@ -126,3 +153,5 @@ Original portions of the software are Copyright © 2024 by respective contributo [latest release link]: https://github.com/invoke-ai/InvokeAI/releases/latest [translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg [translation status link]: https://hosted.weblate.org/engage/invokeai/ +[nvidia docker docs]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html +[amd docker docs]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html diff --git a/docker/.env.sample b/docker/.env.sample index aeb69bfd27..eef690a808 100644 --- a/docker/.env.sample +++ b/docker/.env.sample @@ -19,8 +19,9 @@ ## INVOKEAI_PORT is the port on which the InvokeAI web interface will be available # INVOKEAI_PORT=9090 -## GPU_DRIVER can be set to either `nvidia` or `rocm` to enable GPU support in the container accordingly. -# GPU_DRIVER=nvidia #| rocm +## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly. +# GPU_DRIVER=cuda #| rocm ## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container. +## It is usually not necessary to change this. Use `id -u` on the host system to find the UID. # CONTAINER_UID=1000 diff --git a/docker/README.md b/docker/README.md index 9e7ac15145..fc6edeacd3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,41 +1,75 @@ -# InvokeAI Containerized +# Invoke in Docker -All commands should be run within the `docker` directory: `cd docker` +- Ensure that Docker can use the GPU on your system +- This documentation assumes Linux, but should work similarly under Windows with WSL2 +- We don't recommend running Invoke in Docker on macOS at this time. It works, but very slowly. -## Quickstart :rocket: +## Quickstart :lightning: -On a known working Linux+Docker+CUDA (Nvidia) system, execute `./run.sh` in this directory. It will take a few minutes - depending on your internet speed - to install the core models. Once the application starts up, open `http://localhost:9090` in your browser to Invoke! +No `docker compose`, no persistence, just a simple one-liner using the official images: -For more configuration options (using an AMD GPU, custom root directory location, etc): read on. +**CUDA:** -## Detailed setup +```bash +docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai +``` + +**ROCm:** + +```bash +docker run --device /dev/kfd --device /dev/dri --publish 9090:9090 ghcr.io/invoke-ai/invokeai:main-rocm +``` + +Open `http://localhost:9090` in your browser once the container finishes booting, install some models, and generate away! + +> [!TIP] +> To persist your data (including downloaded models) outside of the container, add a `--volume/-v` flag to the above command, e.g.: `docker run --volume /some/local/path:/invokeai <...the rest of the command>` + +## Customize the container + +We ship the `run.sh` script, which is a convenient wrapper around `docker compose` for cases where custom image build args are needed. Alternatively, the familiar `docker compose` commands work just as well. + +```bash +cd docker +cp .env.sample .env +# edit .env to your liking if you need to; it is well commented. +./run.sh +``` + +It will take a few minutes to build the image the first time. Once the application starts up, open `http://localhost:9090` in your browser to invoke! + +## Docker setup in detail #### Linux 1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`) 2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://docs.docker.com/compose/install/linux/#install-using-the-repository). - - The deprecated `docker-compose` (hyphenated) CLI continues to work for now. + - The deprecated `docker-compose` (hyphenated) CLI probably won't work. Update to a recent version. 3. Ensure docker daemon is able to access the GPU. - - You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + - [NVIDIA docs](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) + - [AMD docs](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html) #### macOS +> [!TIP] +> You'll be better off installing Invoke directly on your system, because Docker can not use the GPU on macOS. + +If you are still reading: + 1. Ensure Docker has at least 16GB RAM 2. Enable VirtioFS for file sharing 3. Enable `docker compose` V2 support -This is done via Docker Desktop preferences +This is done via Docker Desktop preferences. -### Configure Invoke environment +### Configure the Invoke Environment -1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to: - a. the desired location of the InvokeAI runtime directory, or - b. an existing, v3.0.0 compatible runtime directory. +1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to the desired location of the InvokeAI runtime directory. It may be an existing directory from a previous installation (post 4.0.0). 1. Execute `run.sh` The image will be built automatically if needed. -The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. The runtime directory will be populated with the base configs and models necessary to start generating. +The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. Navigate to the Model Manager tab and install some models before generating. ### Use a GPU @@ -43,9 +77,9 @@ The runtime directory (holding models and outputs) will be created in the locati - WSL2 is *required* for Windows. - only `x86_64` architecture is supported. -The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker. +The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker/NVIDIA/AMD documentation for the most up-to-date instructions for using your GPU with Docker. -To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file. +To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file before running `./run.sh`. ## Customize @@ -59,10 +93,10 @@ Values are optional, but setting `INVOKEAI_ROOT` is highly recommended. The defa INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai HUGGINGFACE_TOKEN=the_actual_token CONTAINER_UID=1000 -GPU_DRIVER=nvidia +GPU_DRIVER=cuda ``` -Any environment variables supported by InvokeAI can be set here - please see the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail. +Any environment variables supported by InvokeAI can be set here. See the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail. ## Even More Customizing! diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 2ad50e74a1..af96cc1c8f 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,7 +1,5 @@ # Copyright (c) 2023 Eugene Brodsky https://github.com/ebr -version: '3.8' - x-invokeai: &invokeai image: "local/invokeai:latest" build: @@ -32,7 +30,7 @@ x-invokeai: &invokeai services: - invokeai-nvidia: + invokeai-cuda: <<: *invokeai deploy: resources: diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh index 7fb52f3af9..686bd9630f 100755 --- a/docker/docker-entrypoint.sh +++ b/docker/docker-entrypoint.sh @@ -23,18 +23,18 @@ usermod -u ${USER_ID} ${USER} 1>/dev/null # but it is useful to have the full SSH server e.g. on Runpod. # (use SCP to copy files to/from the image, etc) if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then - apt-get update - apt-get install -y openssh-server - pushd "$HOME" - mkdir -p .ssh - echo "${PUBLIC_KEY}" > .ssh/authorized_keys - chmod -R 700 .ssh - popd - service ssh start + apt-get update + apt-get install -y openssh-server + pushd "$HOME" + mkdir -p .ssh + echo "${PUBLIC_KEY}" >.ssh/authorized_keys + chmod -R 700 .ssh + popd + service ssh start fi mkdir -p "${INVOKEAI_ROOT}" -chown --recursive ${USER} "${INVOKEAI_ROOT}" +chown --recursive ${USER} "${INVOKEAI_ROOT}" || true cd "${INVOKEAI_ROOT}" # Run the CMD as the Container User (not root). diff --git a/docker/run.sh b/docker/run.sh index d413e53453..1040e865bf 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -8,11 +8,15 @@ run() { local build_args="" local profile="" + # create .env file if it doesn't exist, otherwise docker compose will fail touch .env + + # parse .env file for build args build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) && profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)" - [[ -z "$profile" ]] && profile="nvidia" + # default to 'cuda' profile + [[ -z "$profile" ]] && profile="cuda" local service_name="invokeai-$profile" diff --git a/docs/installation/040_INSTALL_DOCKER.md b/docs/installation/040_INSTALL_DOCKER.md index 3814b72e80..119cff93d2 100644 --- a/docs/installation/040_INSTALL_DOCKER.md +++ b/docs/installation/040_INSTALL_DOCKER.md @@ -4,50 +4,37 @@ title: Installing with Docker # :fontawesome-brands-docker: Docker -!!! warning "macOS and AMD GPU Users" +!!! warning "macOS users" - We highly recommend to Install InvokeAI locally using [these instructions](INSTALLATION.md), - because Docker containers can not access the GPU on macOS. - -!!! warning "AMD GPU Users" - - Container support for AMD GPUs has been reported to work by the community, but has not received - extensive testing. Please make sure to set the `GPU_DRIVER=rocm` environment variable (see below), and - use the `build.sh` script to build the image for this to take effect at build time. + Docker can not access the GPU on macOS, so your generation speeds will be slow. [Install InvokeAI](INSTALLATION.md) instead. !!! tip "Linux and Windows Users" - For optimal performance, configure your Docker daemon to access your machine's GPU. + Configure Docker to access your machine's GPU. Docker Desktop on Windows [includes GPU support](https://www.docker.com/blog/wsl-2-gpu-support-for-docker-desktop-on-nvidia-gpus/). - Linux users should install and configure the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) - -## Why containers? - -They provide a flexible, reliable way to build and deploy InvokeAI. -See [Processes](https://12factor.net/processes) under the Twelve-Factor App -methodology for details on why running applications in such a stateless fashion is important. - -The container is configured for CUDA by default, but can be built to support AMD GPUs -by setting the `GPU_DRIVER=rocm` environment variable at Docker image build time. - -Developers on Apple silicon (M1/M2/M3): You -[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224) -and performance is reduced compared with running it directly on macOS but for -development purposes it's fine. Once you're done with development tasks on your -laptop you can build for the target platform and architecture and deploy to -another environment with NVIDIA GPUs on-premises or in the cloud. + Linux users should follow the [NVIDIA](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) or [AMD](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html) documentation. ## TL;DR -This assumes properly configured Docker on Linux or Windows/WSL2. Read on for detailed customization options. +Ensure your Docker setup is able to use your GPU. Then: + + ```bash + docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai + ``` + +Once the container starts up, open http://localhost:9090 in your browser, install some models, and start generating. + +## Build-It-Yourself + +All the docker materials are located inside the [docker](https://github.com/invoke-ai/InvokeAI/tree/main/docker) directory in the Git repo. ```bash - # docker compose commands should be run from the `docker` directory cd docker + cp .env.sample .env docker compose up ``` -## Installation in a Linux container (desktop) +We also ship the `run.sh` convenience script. See the `docker/README.md` file for detailed instructions on how to customize the docker setup to your needs. ### Prerequisites @@ -58,18 +45,9 @@ Preferences, Resources, Advanced. Increase the CPUs and Memory to avoid this [Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to increase Swap and Disk image size too. -#### Get a Huggingface-Token - -Besides the Docker Agent you will need an Account on -[huggingface.co](https://huggingface.co/join). - -After you succesfully registered your account, go to -[huggingface.co/settings/tokens](https://huggingface.co/settings/tokens), create -a token and copy it, since you will need in for the next step. - ### Setup -Set up your environmnent variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary. +Set up your environment variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary. Any environment variables supported by InvokeAI can be set here - please see the [CONFIGURATION](../features/CONFIGURATION.md) for further detail. @@ -103,10 +81,9 @@ Once the container starts up (and configures the InvokeAI root directory if this ## Troubleshooting / FAQ - Q: I am running on Windows under WSL2, and am seeing a "no such file or directory" error. -- A: Your `docker-entrypoint.sh` file likely has Windows (CRLF) as opposed to Unix (LF) line endings, - and you may have cloned this repository before the issue was fixed. To solve this, please change - the line endings in the `docker-entrypoint.sh` file to `LF`. You can do this in VSCode +- A: Your `docker-entrypoint.sh` might have has Windows (CRLF) line endings, depending how you cloned the repository. + To solve this, change the line endings in the `docker-entrypoint.sh` file to `LF`. You can do this in VSCode (`Ctrl+P` and search for "line endings"), or by using the `dos2unix` utility in WSL. Finally, you may delete `docker-entrypoint.sh` followed by `git pull; git checkout docker/docker-entrypoint.sh` to reset the file to its most recent version. - For more information on this issue, please see the [Docker Desktop documentation](https://docs.docker.com/desktop/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers) + For more information on this issue, see [Docker Desktop documentation](https://docs.docker.com/desktop/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers) diff --git a/installer/templates/invoke.bat.in b/installer/templates/invoke.bat.in index c8ef19710b..774b667c08 100644 --- a/installer/templates/invoke.bat.in +++ b/installer/templates/invoke.bat.in @@ -13,7 +13,7 @@ echo 2. Open the developer console echo 3. Command-line help echo Q - Quit echo. -echo To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest. +echo To update, download and run the installer from https://github.com/invoke-ai/InvokeAI/releases/latest echo. set /P choice="Please enter 1-4, Q: [1] " if not defined choice set choice=1 diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index 19a7bb083d..27ab030d4c 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -4,37 +4,39 @@ from logging import Logger import torch +from invokeai.app.services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage +from invokeai.app.services.board_images.board_images_default import BoardImagesService +from invokeai.app.services.board_records.board_records_sqlite import SqliteBoardRecordStorage +from invokeai.app.services.boards.boards_default import BoardService +from invokeai.app.services.bulk_download.bulk_download_default import BulkDownloadService +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.services.download.download_default import DownloadQueueService +from invokeai.app.services.events.events_fastapievents import FastAPIEventService +from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage +from invokeai.app.services.image_records.image_records_sqlite import SqliteImageRecordStorage +from invokeai.app.services.images.images_default import ImageService +from invokeai.app.services.invocation_cache.invocation_cache_memory import MemoryInvocationCache +from invokeai.app.services.invocation_services import InvocationServices +from invokeai.app.services.invocation_stats.invocation_stats_default import InvocationStatsService +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.model_images.model_images_default import ModelImageFileStorageDisk +from invokeai.app.services.model_manager.model_manager_default import ModelManagerService +from invokeai.app.services.model_records.model_records_sql import ModelRecordServiceSQL +from invokeai.app.services.names.names_default import SimpleNameService from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache +from invokeai.app.services.session_processor.session_processor_default import ( + DefaultSessionProcessor, + DefaultSessionRunner, +) +from invokeai.app.services.session_queue.session_queue_sqlite import SqliteSessionQueue from invokeai.app.services.shared.sqlite.sqlite_util import init_db +from invokeai.app.services.urls.urls_default import LocalUrlService +from invokeai.app.services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData from invokeai.backend.util.logging import InvokeAILogger from invokeai.version.invokeai_version import __version__ -from ..services.board_image_records.board_image_records_sqlite import SqliteBoardImageRecordStorage -from ..services.board_images.board_images_default import BoardImagesService -from ..services.board_records.board_records_sqlite import SqliteBoardRecordStorage -from ..services.boards.boards_default import BoardService -from ..services.bulk_download.bulk_download_default import BulkDownloadService -from ..services.config import InvokeAIAppConfig -from ..services.download import DownloadQueueService -from ..services.events.events_fastapievents import FastAPIEventService -from ..services.image_files.image_files_disk import DiskImageFileStorage -from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage -from ..services.images.images_default import ImageService -from ..services.invocation_cache.invocation_cache_memory import MemoryInvocationCache -from ..services.invocation_services import InvocationServices -from ..services.invocation_stats.invocation_stats_default import InvocationStatsService -from ..services.invoker import Invoker -from ..services.model_images.model_images_default import ModelImageFileStorageDisk -from ..services.model_manager.model_manager_default import ModelManagerService -from ..services.model_records import ModelRecordServiceSQL -from ..services.names.names_default import SimpleNameService -from ..services.session_processor.session_processor_default import DefaultSessionProcessor, DefaultSessionRunner -from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue -from ..services.urls.urls_default import LocalUrlService -from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage - # TODO: is there a better way to achieve this? def check_internet() -> bool: diff --git a/invokeai/app/api/routers/app_info.py b/invokeai/app/api/routers/app_info.py index c3bc98a038..3206adb242 100644 --- a/invokeai/app/api/routers/app_info.py +++ b/invokeai/app/api/routers/app_info.py @@ -10,14 +10,13 @@ from fastapi import Body from fastapi.routing import APIRouter from pydantic import BaseModel, Field +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.invocations.upscale import ESRGAN_MODELS from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch from invokeai.backend.util.logging import logging from invokeai.version import __version__ -from ..dependencies import ApiDependencies - class LogLevel(int, Enum): NotSet = logging.NOTSET diff --git a/invokeai/app/api/routers/board_images.py b/invokeai/app/api/routers/board_images.py index 8e36a682d2..eb193f6585 100644 --- a/invokeai/app/api/routers/board_images.py +++ b/invokeai/app/api/routers/board_images.py @@ -2,7 +2,7 @@ from fastapi import Body, HTTPException from fastapi.routing import APIRouter from pydantic import BaseModel, Field -from ..dependencies import ApiDependencies +from invokeai.app.api.dependencies import ApiDependencies board_images_router = APIRouter(prefix="/v1/board_images", tags=["boards"]) diff --git a/invokeai/app/api/routers/boards.py b/invokeai/app/api/routers/boards.py index 19c2b330f0..926c0f7fd2 100644 --- a/invokeai/app/api/routers/boards.py +++ b/invokeai/app/api/routers/boards.py @@ -4,12 +4,11 @@ from fastapi import Body, HTTPException, Path, Query from fastapi.routing import APIRouter from pydantic import BaseModel, Field +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.services.board_records.board_records_common import BoardChanges from invokeai.app.services.boards.boards_common import BoardDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from ..dependencies import ApiDependencies - boards_router = APIRouter(prefix="/v1/boards", tags=["boards"]) @@ -32,6 +31,7 @@ class DeleteBoardResult(BaseModel): ) async def create_board( board_name: str = Query(description="The name of the board to create"), + is_private: bool = Query(default=False, description="Whether the board is private"), ) -> BoardDTO: """Creates a board""" try: diff --git a/invokeai/app/api/routers/download_queue.py b/invokeai/app/api/routers/download_queue.py index a6e53c7a5c..2633b28bca 100644 --- a/invokeai/app/api/routers/download_queue.py +++ b/invokeai/app/api/routers/download_queue.py @@ -8,13 +8,12 @@ from fastapi.routing import APIRouter from pydantic.networks import AnyHttpUrl from starlette.exceptions import HTTPException +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.services.download import ( DownloadJob, UnknownJobIDException, ) -from ..dependencies import ApiDependencies - download_queue_router = APIRouter(prefix="/v1/download_queue", tags=["download_queue"]) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index d540fd3b55..8e3824ce93 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -8,6 +8,7 @@ from fastapi.routing import APIRouter from PIL import Image from pydantic import BaseModel, Field, JsonValue +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.invocations.fields import MetadataField from invokeai.app.services.image_records.image_records_common import ( ImageCategory, @@ -18,8 +19,6 @@ from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection -from ..dependencies import ApiDependencies - images_router = APIRouter(prefix="/v1/images", tags=["images"]) diff --git a/invokeai/app/api/routers/model_manager.py b/invokeai/app/api/routers/model_manager.py index 298756d175..f73b7a86b1 100644 --- a/invokeai/app/api/routers/model_manager.py +++ b/invokeai/app/api/routers/model_manager.py @@ -16,6 +16,7 @@ from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field from starlette.exceptions import HTTPException from typing_extensions import Annotated +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException from invokeai.app.services.model_install.model_install_common import ModelInstallJob from invokeai.app.services.model_records import ( @@ -35,8 +36,6 @@ from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataW from invokeai.backend.model_manager.search import ModelSearch from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel, StarterModelWithoutDependencies -from ..dependencies import ApiDependencies - model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"]) # images are immutable; set a high max-age diff --git a/invokeai/app/api/routers/session_queue.py b/invokeai/app/api/routers/session_queue.py index 7161e54a41..5dd4693795 100644 --- a/invokeai/app/api/routers/session_queue.py +++ b/invokeai/app/api/routers/session_queue.py @@ -4,6 +4,7 @@ from fastapi import Body, Path, Query from fastapi.routing import APIRouter from pydantic import BaseModel +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.services.session_processor.session_processor_common import SessionProcessorStatus from invokeai.app.services.session_queue.session_queue_common import ( QUEUE_ITEM_STATUS, @@ -19,8 +20,6 @@ from invokeai.app.services.session_queue.session_queue_common import ( ) from invokeai.app.services.shared.pagination import CursorPaginatedResults -from ..dependencies import ApiDependencies - session_queue_router = APIRouter(prefix="/v1/queue", tags=["queue"]) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index e69d95af71..dca0bc139d 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -20,14 +20,9 @@ from torch.backends.mps import is_available as is_mps_available # noinspection PyUnresolvedReferences import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import) import invokeai.frontend.web as web_dir +from invokeai.app.api.dependencies import ApiDependencies from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles -from invokeai.app.services.config.config_default import get_config -from invokeai.app.util.custom_openapi import get_openapi_func -from invokeai.backend.util.devices import TorchDevice - -from ..backend.util.logging import InvokeAILogger -from .api.dependencies import ApiDependencies -from .api.routers import ( +from invokeai.app.api.routers import ( app_info, board_images, boards, @@ -38,7 +33,11 @@ from .api.routers import ( utilities, workflows, ) -from .api.sockets import SocketIO +from invokeai.app.api.sockets import SocketIO +from invokeai.app.services.config.config_default import get_config +from invokeai.app.util.custom_openapi import get_openapi_func +from invokeai.backend.util.devices import TorchDevice +from invokeai.backend.util.logging import InvokeAILogger app_config = get_config() diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 1d169f0a82..b527de41bc 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -40,7 +40,7 @@ from invokeai.app.util.misc import uuid_string from invokeai.backend.util.logging import InvokeAILogger if TYPE_CHECKING: - from ..services.invocation_services import InvocationServices + from invokeai.app.services.invocation_services import InvocationServices logger = InvokeAILogger.get_logger() diff --git a/invokeai/app/invocations/collections.py b/invokeai/app/invocations/collections.py index e02291980f..bd3dedb3f8 100644 --- a/invokeai/app/invocations/collections.py +++ b/invokeai/app/invocations/collections.py @@ -4,13 +4,12 @@ import numpy as np from pydantic import ValidationInfo, field_validator +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import InputField from invokeai.app.invocations.primitives import IntegerCollectionOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import SEED_MAX -from .baseinvocation import BaseInvocation, invocation -from .fields import InputField - @invocation( "range", title="Integer Range", tags=["collection", "integer", "range"], category="collections", version="1.0.0" diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 1e78e10d38..fffb09e654 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -5,6 +5,7 @@ from compel import Compel, ReturnedEmbeddingsType from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output from invokeai.app.invocations.fields import ( ConditioningField, FieldDescriptions, @@ -14,6 +15,7 @@ from invokeai.app.invocations.fields import ( TensorField, UIComponent, ) +from invokeai.app.invocations.model import CLIPField from invokeai.app.invocations.primitives import ConditioningOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.ti_utils import generate_ti_list @@ -26,9 +28,6 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( ) from invokeai.backend.util.devices import TorchDevice -from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output -from .model import CLIPField - # unconditioned: Optional[torch.Tensor] diff --git a/invokeai/app/invocations/constants.py b/invokeai/app/invocations/constants.py index e01589be81..e97275e4fd 100644 --- a/invokeai/app/invocations/constants.py +++ b/invokeai/app/invocations/constants.py @@ -1,6 +1,5 @@ from typing import Literal -from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP from invokeai.backend.util.devices import TorchDevice LATENT_SCALE_FACTOR = 8 @@ -11,9 +10,6 @@ factor is hard-coded to a literal '8' rather than using this constant. The ratio of image:latent dimensions is LATENT_SCALE_FACTOR:1, or 8:1. """ -SCHEDULER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())] -"""A literal type representing the valid scheduler names.""" - IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] """A literal type for PIL image modes supported by Invoke""" diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index c0b332f27b..497d07e7cf 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -22,6 +22,13 @@ from controlnet_aux.util import HWC3, ade_palette from PIL import Image from pydantic import BaseModel, Field, field_validator, model_validator +from invokeai.app.invocations.baseinvocation import ( + BaseInvocation, + BaseInvocationOutput, + Classification, + invocation, + invocation_output, +) from invokeai.app.invocations.fields import ( FieldDescriptions, ImageField, @@ -45,8 +52,6 @@ from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor from invokeai.backend.image_util.util import np_to_pil, pil_to_np from invokeai.backend.util.devices import TorchDevice -from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output - class ControlField(BaseModel): image: ImageField = Field(description="The control image") diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py index a7c394deb2..f7951ccfeb 100644 --- a/invokeai/app/invocations/cv.py +++ b/invokeai/app/invocations/cv.py @@ -5,13 +5,11 @@ import cv2 as cv import numpy from PIL import Image, ImageOps -from invokeai.app.invocations.fields import ImageField +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, invocation -from .fields import InputField, WithBoard, WithMetadata - @invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.3.1") class CvInpaintInvocation(BaseInvocation, WithMetadata, WithBoard): diff --git a/invokeai/app/invocations/denoise_latents.py b/invokeai/app/invocations/denoise_latents.py index fd901298f7..7ccf906893 100644 --- a/invokeai/app/invocations/denoise_latents.py +++ b/invokeai/app/invocations/denoise_latents.py @@ -17,7 +17,7 @@ from torchvision.transforms.functional import resize as tv_resize from transformers import CLIPVisionModelWithProjection from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation -from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR, SCHEDULER_NAME_VALUES +from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.controlnet_image_processors import ControlField from invokeai.app.invocations.fields import ( ConditioningField, @@ -54,6 +54,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( TextConditioningRegions, ) from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP +from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.hotfixes import ControlNetModel from invokeai.backend.util.mask import to_standard_float_mask diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 65e7ce5e06..a551f8df8a 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -6,6 +6,7 @@ import cv2 import numpy from PIL import Image, ImageChops, ImageFilter, ImageOps +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation from invokeai.app.invocations.constants import IMAGE_MODES from invokeai.app.invocations.fields import ( ColorField, @@ -21,8 +22,6 @@ from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark from invokeai.backend.image_util.safety_checker import SafetyChecker -from .baseinvocation import BaseInvocation, Classification, invocation - @invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.1") class ShowImageInvocation(BaseInvocation): diff --git a/invokeai/app/invocations/infill.py b/invokeai/app/invocations/infill.py index 7e1a2ee322..3314d72620 100644 --- a/invokeai/app/invocations/infill.py +++ b/invokeai/app/invocations/infill.py @@ -3,7 +3,9 @@ from typing import Literal, get_args from PIL import Image -from invokeai.app.invocations.fields import ColorField, ImageField +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import ColorField, ImageField, InputField, WithBoard, WithMetadata +from invokeai.app.invocations.image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import SEED_MAX @@ -14,10 +16,6 @@ from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch, in from invokeai.backend.image_util.infill_methods.tile import infill_tile from invokeai.backend.util.logging import InvokeAILogger -from .baseinvocation import BaseInvocation, invocation -from .fields import InputField, WithBoard, WithMetadata -from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES - logger = InvokeAILogger.get_logger() diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index dad000d411..5d3988031b 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -5,12 +5,11 @@ from typing import Literal import numpy as np from pydantic import ValidationInfo, field_validator +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import FieldDescriptions, InputField from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, invocation - @invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.1") class AddInvocation(BaseInvocation): diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 9c7264a9bb..17b68ffc0b 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -14,8 +14,7 @@ from invokeai.app.invocations.fields import ( from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES - -from ...version import __version__ +from invokeai.version.invokeai_version import __version__ class MetadataItemField(BaseModel): diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 94a6136fcb..c0d067c0a7 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -3,18 +3,17 @@ from typing import List, Optional from pydantic import BaseModel, Field -from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType -from invokeai.app.services.shared.invocation_context import InvocationContext -from invokeai.app.shared.models import FreeUConfig -from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType - -from .baseinvocation import ( +from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output, ) +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.app.shared.models import FreeUConfig +from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType class ModelIdentifierField(BaseModel): diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py index 931e639106..1d3ff3a29c 100644 --- a/invokeai/app/invocations/noise.py +++ b/invokeai/app/invocations/noise.py @@ -4,18 +4,12 @@ import torch from pydantic import field_validator +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.fields import FieldDescriptions, InputField, LatentsField, OutputField from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import SEED_MAX - -from ...backend.util.devices import TorchDevice -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - invocation, - invocation_output, -) +from invokeai.backend.util.devices import TorchDevice """ Utilities diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py index 0e590f4e2b..3e785ef545 100644 --- a/invokeai/app/invocations/param_easing.py +++ b/invokeai/app/invocations/param_easing.py @@ -39,12 +39,11 @@ from easing_functions import ( ) from matplotlib.ticker import MaxNLocator +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import InputField from invokeai.app.invocations.primitives import FloatCollectionOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, invocation -from .fields import InputField - @invocation( "float_range", diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 28f72fb377..e5056e3775 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -4,6 +4,7 @@ from typing import Optional import torch +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.fields import ( ColorField, @@ -21,13 +22,6 @@ from invokeai.app.invocations.fields import ( from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - invocation, - invocation_output, -) - """ Primitives: Boolean, Integer, Float, String, Image, Latents, Conditioning, Color - primitive nodes diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index 64a06d2f18..48eec0ac0e 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -5,12 +5,11 @@ import numpy as np from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPromptGenerator from pydantic import field_validator +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import InputField, UIComponent from invokeai.app.invocations.primitives import StringCollectionOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, invocation -from .fields import InputField, UIComponent - @invocation( "dynamic_prompt", diff --git a/invokeai/app/invocations/scheduler.py b/invokeai/app/invocations/scheduler.py index 52af20378e..a870a442ef 100644 --- a/invokeai/app/invocations/scheduler.py +++ b/invokeai/app/invocations/scheduler.py @@ -1,5 +1,4 @@ from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output -from invokeai.app.invocations.constants import SCHEDULER_NAME_VALUES from invokeai.app.invocations.fields import ( FieldDescriptions, InputField, @@ -7,6 +6,7 @@ from invokeai.app.invocations.fields import ( UIType, ) from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES @invocation_output("scheduler_output") diff --git a/invokeai/app/invocations/sdxl.py b/invokeai/app/invocations/sdxl.py index 1c0817cb92..8eed158a61 100644 --- a/invokeai/app/invocations/sdxl.py +++ b/invokeai/app/invocations/sdxl.py @@ -1,15 +1,9 @@ +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType +from invokeai.app.invocations.model import CLIPField, ModelIdentifierField, UNetField, VAEField from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.model_manager import SubModelType -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - invocation, - invocation_output, -) -from .model import CLIPField, ModelIdentifierField, UNetField, VAEField - @invocation_output("sdxl_model_loader_output") class SDXLModelLoaderOutput(BaseInvocationOutput): diff --git a/invokeai/app/invocations/strings.py b/invokeai/app/invocations/strings.py index 46ef35cbbf..2b6bf300b9 100644 --- a/invokeai/app/invocations/strings.py +++ b/invokeai/app/invocations/strings.py @@ -2,17 +2,11 @@ import re +from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output +from invokeai.app.invocations.fields import InputField, OutputField, UIComponent +from invokeai.app.invocations.primitives import StringOutput from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - invocation, - invocation_output, -) -from .fields import InputField, OutputField, UIComponent -from .primitives import StringOutput - @invocation_output("string_pos_neg_output") class StringPosNegOutput(BaseInvocationOutput): diff --git a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py index 2566fd2551..5d408a4df7 100644 --- a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py +++ b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py @@ -8,7 +8,7 @@ from diffusers.schedulers.scheduling_utils import SchedulerMixin from pydantic import field_validator from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation -from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR, SCHEDULER_NAME_VALUES +from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.controlnet_image_processors import ControlField from invokeai.app.invocations.denoise_latents import DenoiseLatentsInvocation, get_scheduler from invokeai.app.invocations.fields import ( @@ -29,6 +29,7 @@ from invokeai.backend.stable_diffusion.multi_diffusion_pipeline import ( MultiDiffusionPipeline, MultiDiffusionRegionConditioning, ) +from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES from invokeai.backend.tiles.tiles import ( calc_tiles_min_overlap, ) diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index f93060f8d3..e7b3968aec 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -6,15 +6,13 @@ import numpy as np from PIL import Image from pydantic import ConfigDict -from invokeai.app.invocations.fields import ImageField +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.fields import ImageField, InputField, WithBoard, WithMetadata from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN -from .baseinvocation import BaseInvocation, invocation -from .fields import InputField, WithBoard, WithMetadata - # TODO: Populate this from disk? # TODO: Use model manager to load? ESRGAN_MODELS = Literal[ diff --git a/invokeai/app/services/board_image_records/board_image_records_sqlite.py b/invokeai/app/services/board_image_records/board_image_records_sqlite.py index cde810a739..33ac76b06f 100644 --- a/invokeai/app/services/board_image_records/board_image_records_sqlite.py +++ b/invokeai/app/services/board_image_records/board_image_records_sqlite.py @@ -2,12 +2,11 @@ import sqlite3 import threading from typing import Optional, cast +from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase from invokeai.app.services.image_records.image_records_common import ImageRecord, deserialize_image_record from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase -from .board_image_records_base import BoardImageRecordStorageBase - class SqliteBoardImageRecordStorage(BoardImageRecordStorageBase): _conn: sqlite3.Connection diff --git a/invokeai/app/services/board_images/board_images_default.py b/invokeai/app/services/board_images/board_images_default.py index 85e478619c..6a564f5a91 100644 --- a/invokeai/app/services/board_images/board_images_default.py +++ b/invokeai/app/services/board_images/board_images_default.py @@ -1,9 +1,8 @@ from typing import Optional +from invokeai.app.services.board_images.board_images_base import BoardImagesServiceABC from invokeai.app.services.invoker import Invoker -from .board_images_base import BoardImagesServiceABC - class BoardImagesService(BoardImagesServiceABC): __invoker: Invoker diff --git a/invokeai/app/services/board_records/board_records_base.py b/invokeai/app/services/board_records/board_records_base.py index 9d065b3750..9d16dacf60 100644 --- a/invokeai/app/services/board_records/board_records_base.py +++ b/invokeai/app/services/board_records/board_records_base.py @@ -1,9 +1,8 @@ from abc import ABC, abstractmethod +from invokeai.app.services.board_records.board_records_common import BoardChanges, BoardRecord from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from .board_records_common import BoardChanges, BoardRecord - class BoardRecordStorageBase(ABC): """Low-level service responsible for interfacing with the board record store.""" diff --git a/invokeai/app/services/board_records/board_records_common.py b/invokeai/app/services/board_records/board_records_common.py index d763480a9f..0dda8a8b6b 100644 --- a/invokeai/app/services/board_records/board_records_common.py +++ b/invokeai/app/services/board_records/board_records_common.py @@ -24,6 +24,8 @@ class BoardRecord(BaseModelExcludeNull): """The name of the cover image of the board.""" archived: bool = Field(description="Whether or not the board is archived.") """Whether or not the board is archived.""" + is_private: Optional[bool] = Field(default=None, description="Whether the board is private.") + """Whether the board is private.""" def deserialize_board_record(board_dict: dict) -> BoardRecord: @@ -38,6 +40,7 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: updated_at = board_dict.get("updated_at", get_iso_timestamp()) deleted_at = board_dict.get("deleted_at", get_iso_timestamp()) archived = board_dict.get("archived", False) + is_private = board_dict.get("is_private", False) return BoardRecord( board_id=board_id, @@ -47,6 +50,7 @@ def deserialize_board_record(board_dict: dict) -> BoardRecord: updated_at=updated_at, deleted_at=deleted_at, archived=archived, + is_private=is_private, ) diff --git a/invokeai/app/services/board_records/board_records_sqlite.py b/invokeai/app/services/board_records/board_records_sqlite.py index 9d81e2f1e7..c64e060b95 100644 --- a/invokeai/app/services/board_records/board_records_sqlite.py +++ b/invokeai/app/services/board_records/board_records_sqlite.py @@ -2,12 +2,8 @@ import sqlite3 import threading from typing import Union, cast -from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase -from invokeai.app.util.misc import uuid_string - -from .board_records_base import BoardRecordStorageBase -from .board_records_common import ( +from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase +from invokeai.app.services.board_records.board_records_common import ( BoardChanges, BoardRecord, BoardRecordDeleteException, @@ -15,6 +11,9 @@ from .board_records_common import ( BoardRecordSaveException, deserialize_board_record, ) +from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase +from invokeai.app.util.misc import uuid_string class SqliteBoardRecordStorage(BoardRecordStorageBase): diff --git a/invokeai/app/services/boards/boards_base.py b/invokeai/app/services/boards/boards_base.py index fddd5a7954..3c2d148cb9 100644 --- a/invokeai/app/services/boards/boards_base.py +++ b/invokeai/app/services/boards/boards_base.py @@ -1,10 +1,9 @@ from abc import ABC, abstractmethod from invokeai.app.services.board_records.board_records_common import BoardChanges +from invokeai.app.services.boards.boards_common import BoardDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from .boards_common import BoardDTO - class BoardServiceABC(ABC): """High-level service for board management.""" diff --git a/invokeai/app/services/boards/boards_common.py b/invokeai/app/services/boards/boards_common.py index 0cb54102bb..15d0b3c37f 100644 --- a/invokeai/app/services/boards/boards_common.py +++ b/invokeai/app/services/boards/boards_common.py @@ -2,7 +2,7 @@ from typing import Optional from pydantic import Field -from ..board_records.board_records_common import BoardRecord +from invokeai.app.services.board_records.board_records_common import BoardRecord class BoardDTO(BoardRecord): diff --git a/invokeai/app/services/boards/boards_default.py b/invokeai/app/services/boards/boards_default.py index 6457aee1d2..97fd3059a9 100644 --- a/invokeai/app/services/boards/boards_default.py +++ b/invokeai/app/services/boards/boards_default.py @@ -1,11 +1,9 @@ from invokeai.app.services.board_records.board_records_common import BoardChanges -from invokeai.app.services.boards.boards_common import BoardDTO +from invokeai.app.services.boards.boards_base import BoardServiceABC +from invokeai.app.services.boards.boards_common import BoardDTO, board_record_to_dto from invokeai.app.services.invoker import Invoker from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from .boards_base import BoardServiceABC -from .boards_common import board_record_to_dto - class BoardService(BoardServiceABC): __invoker: Invoker diff --git a/invokeai/app/services/bulk_download/bulk_download_default.py b/invokeai/app/services/bulk_download/bulk_download_default.py index d4bf059b8f..4ebbd10d4f 100644 --- a/invokeai/app/services/bulk_download/bulk_download_default.py +++ b/invokeai/app/services/bulk_download/bulk_download_default.py @@ -4,6 +4,7 @@ from typing import Optional, Union from zipfile import ZipFile from invokeai.app.services.board_records.board_records_common import BoardRecordNotFoundException +from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase from invokeai.app.services.bulk_download.bulk_download_common import ( DEFAULT_BULK_DOWNLOAD_ID, BulkDownloadException, @@ -15,8 +16,6 @@ from invokeai.app.services.images.images_common import ImageDTO from invokeai.app.services.invoker import Invoker from invokeai.app.util.misc import uuid_string -from .bulk_download_base import BulkDownloadBase - class BulkDownloadService(BulkDownloadBase): def start(self, invoker: Invoker) -> None: diff --git a/invokeai/app/services/config/__init__.py b/invokeai/app/services/config/__init__.py index 126692f08a..df1acbf104 100644 --- a/invokeai/app/services/config/__init__.py +++ b/invokeai/app/services/config/__init__.py @@ -1,7 +1,6 @@ """Init file for InvokeAI configure package.""" from invokeai.app.services.config.config_common import PagingArgumentParser - -from .config_default import InvokeAIAppConfig, get_config +from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config __all__ = ["InvokeAIAppConfig", "get_config", "PagingArgumentParser"] diff --git a/invokeai/app/services/download/__init__.py b/invokeai/app/services/download/__init__.py index 33b0025809..48ded7d549 100644 --- a/invokeai/app/services/download/__init__.py +++ b/invokeai/app/services/download/__init__.py @@ -1,13 +1,13 @@ """Init file for download queue.""" -from .download_base import ( +from invokeai.app.services.download.download_base import ( DownloadJob, DownloadJobStatus, DownloadQueueServiceBase, MultiFileDownloadJob, UnknownJobIDException, ) -from .download_default import DownloadQueueService, TqdmProgress +from invokeai.app.services.download.download_default import DownloadQueueService, TqdmProgress __all__ = [ "DownloadJob", diff --git a/invokeai/app/services/download/download_default.py b/invokeai/app/services/download/download_default.py index f6c7c1a1a0..b97f61657c 100644 --- a/invokeai/app/services/download/download_default.py +++ b/invokeai/app/services/download/download_default.py @@ -16,12 +16,7 @@ from requests import HTTPError from tqdm import tqdm from invokeai.app.services.config import InvokeAIAppConfig, get_config -from invokeai.app.services.events.events_base import EventServiceBase -from invokeai.app.util.misc import get_iso_timestamp -from invokeai.backend.model_manager.metadata import RemoteModelFile -from invokeai.backend.util.logging import InvokeAILogger - -from .download_base import ( +from invokeai.app.services.download.download_base import ( DownloadEventHandler, DownloadExceptionHandler, DownloadJob, @@ -33,6 +28,10 @@ from .download_base import ( ServiceInactiveException, UnknownJobIDException, ) +from invokeai.app.services.events.events_base import EventServiceBase +from invokeai.app.util.misc import get_iso_timestamp +from invokeai.backend.model_manager.metadata import RemoteModelFile +from invokeai.backend.util.logging import InvokeAILogger # Maximum number of bytes to download during each call to requests.iter_content() DOWNLOAD_CHUNK_SIZE = 100000 @@ -185,7 +184,7 @@ class DownloadQueueService(DownloadQueueServiceBase): job = DownloadJob( source=url, dest=path, - access_token=access_token, + access_token=access_token or self._lookup_access_token(url), ) mfdj.download_parts.add(job) self._download_part2parent[job.source] = mfdj diff --git a/invokeai/app/services/events/events_fastapievents.py b/invokeai/app/services/events/events_fastapievents.py index 8279d3bb34..d514a06b67 100644 --- a/invokeai/app/services/events/events_fastapievents.py +++ b/invokeai/app/services/events/events_fastapievents.py @@ -6,12 +6,11 @@ from queue import Empty, Queue from fastapi_events.dispatcher import dispatch +from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.events.events_common import ( EventBase, ) -from .events_base import EventServiceBase - class FastAPIEventService(EventServiceBase): def __init__(self, event_handler_id: int) -> None: diff --git a/invokeai/app/services/image_files/image_files_disk.py b/invokeai/app/services/image_files/image_files_disk.py index 15d0be31f8..95ab052520 100644 --- a/invokeai/app/services/image_files/image_files_disk.py +++ b/invokeai/app/services/image_files/image_files_disk.py @@ -7,12 +7,15 @@ from PIL import Image, PngImagePlugin from PIL.Image import Image as PILImageType from send2trash import send2trash +from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase +from invokeai.app.services.image_files.image_files_common import ( + ImageFileDeleteException, + ImageFileNotFoundException, + ImageFileSaveException, +) from invokeai.app.services.invoker import Invoker from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail -from .image_files_base import ImageFileStorageBase -from .image_files_common import ImageFileDeleteException, ImageFileNotFoundException, ImageFileSaveException - class DiskImageFileStorage(ImageFileStorageBase): """Stores images on disk""" diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 94e884e549..1211c9762c 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -3,11 +3,15 @@ from datetime import datetime from typing import Optional from invokeai.app.invocations.fields import MetadataField +from invokeai.app.services.image_records.image_records_common import ( + ImageCategory, + ImageRecord, + ImageRecordChanges, + ResourceOrigin, +) from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection -from .image_records_common import ImageCategory, ImageRecord, ImageRecordChanges, ResourceOrigin - class ImageRecordStorageBase(ABC): """Low-level service responsible for interfacing with the image record store.""" diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 82e7ffae9d..b0c2155a18 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -4,12 +4,8 @@ from datetime import datetime from typing import Optional, Union, cast from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator -from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection -from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase - -from .image_records_base import ImageRecordStorageBase -from .image_records_common import ( +from invokeai.app.services.image_records.image_records_base import ImageRecordStorageBase +from invokeai.app.services.image_records.image_records_common import ( IMAGE_DTO_COLS, ImageCategory, ImageRecord, @@ -20,6 +16,9 @@ from .image_records_common import ( ResourceOrigin, deserialize_image_record, ) +from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection +from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase class SqliteImageRecordStorage(ImageRecordStorageBase): diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 4e78375034..15d950bab8 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -3,16 +3,12 @@ from typing import Optional from PIL.Image import Image as PILImageType from invokeai.app.invocations.fields import MetadataField -from invokeai.app.services.invoker import Invoker -from invokeai.app.services.shared.pagination import OffsetPaginatedResults -from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection - -from ..image_files.image_files_common import ( +from invokeai.app.services.image_files.image_files_common import ( ImageFileDeleteException, ImageFileNotFoundException, ImageFileSaveException, ) -from ..image_records.image_records_common import ( +from invokeai.app.services.image_records.image_records_common import ( ImageCategory, ImageRecord, ImageRecordChanges, @@ -23,8 +19,11 @@ from ..image_records.image_records_common import ( InvalidOriginException, ResourceOrigin, ) -from .images_base import ImageServiceABC -from .images_common import ImageDTO, image_record_to_dto +from invokeai.app.services.images.images_base import ImageServiceABC +from invokeai.app.services.images.images_common import ImageDTO, image_record_to_dto +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.shared.pagination import OffsetPaginatedResults +from invokeai.app.services.shared.sqlite.sqlite_common import SQLiteDirection class ImageService(ImageServiceABC): diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index f4fce6098f..90ca613074 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -10,29 +10,28 @@ if TYPE_CHECKING: import torch + from invokeai.app.services.board_image_records.board_image_records_base import BoardImageRecordStorageBase + from invokeai.app.services.board_images.board_images_base import BoardImagesServiceABC + from invokeai.app.services.board_records.board_records_base import BoardRecordStorageBase + from invokeai.app.services.boards.boards_base import BoardServiceABC + from invokeai.app.services.bulk_download.bulk_download_base import BulkDownloadBase + from invokeai.app.services.config import InvokeAIAppConfig + from invokeai.app.services.download import DownloadQueueServiceBase + from invokeai.app.services.events.events_base import EventServiceBase + from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase + from invokeai.app.services.image_records.image_records_base import ImageRecordStorageBase + from invokeai.app.services.images.images_base import ImageServiceABC + from invokeai.app.services.invocation_cache.invocation_cache_base import InvocationCacheBase + from invokeai.app.services.invocation_stats.invocation_stats_base import InvocationStatsServiceBase + from invokeai.app.services.model_images.model_images_base import ModelImageFileStorageBase + from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase + from invokeai.app.services.names.names_base import NameServiceBase + from invokeai.app.services.session_processor.session_processor_base import SessionProcessorBase + from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase + from invokeai.app.services.urls.urls_base import UrlServiceBase + from invokeai.app.services.workflow_records.workflow_records_base import WorkflowRecordsStorageBase from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData - from .board_image_records.board_image_records_base import BoardImageRecordStorageBase - from .board_images.board_images_base import BoardImagesServiceABC - from .board_records.board_records_base import BoardRecordStorageBase - from .boards.boards_base import BoardServiceABC - from .bulk_download.bulk_download_base import BulkDownloadBase - from .config import InvokeAIAppConfig - from .download import DownloadQueueServiceBase - from .events.events_base import EventServiceBase - from .image_files.image_files_base import ImageFileStorageBase - from .image_records.image_records_base import ImageRecordStorageBase - from .images.images_base import ImageServiceABC - from .invocation_cache.invocation_cache_base import InvocationCacheBase - from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase - from .model_images.model_images_base import ModelImageFileStorageBase - from .model_manager.model_manager_base import ModelManagerServiceBase - from .names.names_base import NameServiceBase - from .session_processor.session_processor_base import SessionProcessorBase - from .session_queue.session_queue_base import SessionQueueBase - from .urls.urls_base import UrlServiceBase - from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase - class InvocationServices: """Services that can be used by invocations""" diff --git a/invokeai/app/services/invocation_stats/invocation_stats_default.py b/invokeai/app/services/invocation_stats/invocation_stats_default.py index 5a41f1f5d6..5533657dc7 100644 --- a/invokeai/app/services/invocation_stats/invocation_stats_default.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_default.py @@ -9,11 +9,8 @@ import torch import invokeai.backend.util.logging as logger from invokeai.app.invocations.baseinvocation import BaseInvocation -from invokeai.app.services.invoker import Invoker -from invokeai.backend.model_manager.load.model_cache import CacheStats - -from .invocation_stats_base import InvocationStatsServiceBase -from .invocation_stats_common import ( +from invokeai.app.services.invocation_stats.invocation_stats_base import InvocationStatsServiceBase +from invokeai.app.services.invocation_stats.invocation_stats_common import ( GESStatsNotFoundError, GraphExecutionStats, GraphExecutionStatsSummary, @@ -22,6 +19,8 @@ from .invocation_stats_common import ( NodeExecutionStats, NodeExecutionStatsSummary, ) +from invokeai.app.services.invoker import Invoker +from invokeai.backend.model_manager.load.model_cache import CacheStats # Size of 1GB in bytes. GB = 2**30 diff --git a/invokeai/app/services/invoker.py b/invokeai/app/services/invoker.py index 527afb37f4..64f83725a1 100644 --- a/invokeai/app/services/invoker.py +++ b/invokeai/app/services/invoker.py @@ -1,7 +1,7 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from .invocation_services import InvocationServices +from invokeai.app.services.invocation_services import InvocationServices class Invoker: diff --git a/invokeai/app/services/model_images/model_images_default.py b/invokeai/app/services/model_images/model_images_default.py index 0ab79df3ed..36f04a93b5 100644 --- a/invokeai/app/services/model_images/model_images_default.py +++ b/invokeai/app/services/model_images/model_images_default.py @@ -5,15 +5,14 @@ from PIL.Image import Image as PILImageType from send2trash import send2trash from invokeai.app.services.invoker import Invoker -from invokeai.app.util.misc import uuid_string -from invokeai.app.util.thumbnails import make_thumbnail - -from .model_images_base import ModelImageFileStorageBase -from .model_images_common import ( +from invokeai.app.services.model_images.model_images_base import ModelImageFileStorageBase +from invokeai.app.services.model_images.model_images_common import ( ModelImageFileDeleteException, ModelImageFileNotFoundException, ModelImageFileSaveException, ) +from invokeai.app.util.misc import uuid_string +from invokeai.app.util.thumbnails import make_thumbnail class ModelImageFileStorageDisk(ModelImageFileStorageBase): diff --git a/invokeai/app/services/model_install/__init__.py b/invokeai/app/services/model_install/__init__.py index 941485a134..d96e86cbfe 100644 --- a/invokeai/app/services/model_install/__init__.py +++ b/invokeai/app/services/model_install/__init__.py @@ -1,9 +1,7 @@ """Initialization file for model install service package.""" -from .model_install_base import ( - ModelInstallServiceBase, -) -from .model_install_common import ( +from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase +from invokeai.app.services.model_install.model_install_common import ( HFModelSource, InstallStatus, LocalModelSource, @@ -12,7 +10,7 @@ from .model_install_common import ( UnknownInstallJobException, URLModelSource, ) -from .model_install_default import ModelInstallService +from invokeai.app.services.model_install.model_install_default import ModelInstallService __all__ = [ "ModelInstallServiceBase", diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py index dd1b44d899..5e19a349ad 100644 --- a/invokeai/app/services/model_install/model_install_default.py +++ b/invokeai/app/services/model_install/model_install_default.py @@ -23,6 +23,16 @@ from invokeai.app.services.download import DownloadQueueServiceBase, MultiFileDo from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.invoker import Invoker from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase +from invokeai.app.services.model_install.model_install_common import ( + MODEL_SOURCE_TO_TYPE_MAP, + HFModelSource, + InstallStatus, + LocalModelSource, + ModelInstallJob, + ModelSource, + StringLikeSource, + URLModelSource, +) from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase from invokeai.app.services.model_records.model_records_base import ModelRecordChanges from invokeai.backend.model_manager.config import ( @@ -47,17 +57,6 @@ from invokeai.backend.util.catch_sigint import catch_sigint from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.util import slugify -from .model_install_common import ( - MODEL_SOURCE_TO_TYPE_MAP, - HFModelSource, - InstallStatus, - LocalModelSource, - ModelInstallJob, - ModelSource, - StringLikeSource, - URLModelSource, -) - TMPDIR_PREFIX = "tmpinstall_" @@ -848,7 +847,7 @@ class ModelInstallService(ModelInstallServiceBase): with self._lock: if install_job := self._download_cache.pop(download_job.id, None): assert excp is not None - install_job.set_error(excp) + self._set_error(install_job, excp) self._download_queue.cancel_job(download_job) # Let other threads know that the number of downloads has changed diff --git a/invokeai/app/services/model_load/__init__.py b/invokeai/app/services/model_load/__init__.py index b4a86e9348..4c7e40c8c7 100644 --- a/invokeai/app/services/model_load/__init__.py +++ b/invokeai/app/services/model_load/__init__.py @@ -1,6 +1,6 @@ """Initialization file for model load service module.""" -from .model_load_base import ModelLoadServiceBase -from .model_load_default import ModelLoadService +from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase +from invokeai.app.services.model_load.model_load_default import ModelLoadService __all__ = ["ModelLoadServiceBase", "ModelLoadService"] diff --git a/invokeai/app/services/model_load/model_load_default.py b/invokeai/app/services/model_load/model_load_default.py index 8eb94616be..be2cc2478a 100644 --- a/invokeai/app/services/model_load/model_load_default.py +++ b/invokeai/app/services/model_load/model_load_default.py @@ -10,6 +10,7 @@ from torch import load as torch_load from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.invoker import Invoker +from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType from invokeai.backend.model_manager.load import ( LoadedModel, @@ -22,8 +23,6 @@ from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.logging import InvokeAILogger -from .model_load_base import ModelLoadServiceBase - class ModelLoadService(ModelLoadServiceBase): """Wrapper around ModelLoaderRegistry.""" diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py index 5455577266..07c27cee31 100644 --- a/invokeai/app/services/model_manager/__init__.py +++ b/invokeai/app/services/model_manager/__init__.py @@ -1,10 +1,9 @@ """Initialization file for model manager service.""" +from invokeai.app.services.model_manager.model_manager_default import ModelManagerService, ModelManagerServiceBase from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelType, SubModelType from invokeai.backend.model_manager.load import LoadedModel -from .model_manager_default import ModelManagerService, ModelManagerServiceBase - __all__ = [ "ModelManagerServiceBase", "ModelManagerService", diff --git a/invokeai/app/services/model_manager/model_manager_base.py b/invokeai/app/services/model_manager/model_manager_base.py index af1b68e1ec..a906076b16 100644 --- a/invokeai/app/services/model_manager/model_manager_base.py +++ b/invokeai/app/services/model_manager/model_manager_base.py @@ -5,14 +5,13 @@ from abc import ABC, abstractmethod import torch from typing_extensions import Self +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.services.download.download_base import DownloadQueueServiceBase +from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.invoker import Invoker - -from ..config import InvokeAIAppConfig -from ..download import DownloadQueueServiceBase -from ..events.events_base import EventServiceBase -from ..model_install import ModelInstallServiceBase -from ..model_load import ModelLoadServiceBase -from ..model_records import ModelRecordServiceBase +from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase +from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase +from invokeai.app.services.model_records.model_records_base import ModelRecordServiceBase class ModelManagerServiceBase(ABC): diff --git a/invokeai/app/services/model_manager/model_manager_default.py b/invokeai/app/services/model_manager/model_manager_default.py index f695c3c8c1..78f8e09e74 100644 --- a/invokeai/app/services/model_manager/model_manager_default.py +++ b/invokeai/app/services/model_manager/model_manager_default.py @@ -6,19 +6,20 @@ from typing import Optional import torch from typing_extensions import Self +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.services.download.download_base import DownloadQueueServiceBase +from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.invoker import Invoker +from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase +from invokeai.app.services.model_install.model_install_default import ModelInstallService +from invokeai.app.services.model_load.model_load_base import ModelLoadServiceBase +from invokeai.app.services.model_load.model_load_default import ModelLoadService +from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase +from invokeai.app.services.model_records.model_records_base import ModelRecordServiceBase from invokeai.backend.model_manager.load import ModelCache, ModelLoaderRegistry from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.logging import InvokeAILogger -from ..config import InvokeAIAppConfig -from ..download import DownloadQueueServiceBase -from ..events.events_base import EventServiceBase -from ..model_install import ModelInstallService, ModelInstallServiceBase -from ..model_load import ModelLoadService, ModelLoadServiceBase -from ..model_records import ModelRecordServiceBase -from .model_manager_base import ModelManagerServiceBase - class ModelManagerService(ModelManagerServiceBase): """ diff --git a/invokeai/app/services/model_records/model_records_sql.py b/invokeai/app/services/model_records/model_records_sql.py index 16abf4c523..2f9829dad4 100644 --- a/invokeai/app/services/model_records/model_records_sql.py +++ b/invokeai/app/services/model_records/model_records_sql.py @@ -45,17 +45,7 @@ from math import ceil from pathlib import Path from typing import List, Optional, Union -from invokeai.app.services.shared.pagination import PaginatedResults -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - BaseModelType, - ModelConfigFactory, - ModelFormat, - ModelType, -) - -from ..shared.sqlite.sqlite_database import SqliteDatabase -from .model_records_base import ( +from invokeai.app.services.model_records.model_records_base import ( DuplicateModelException, ModelRecordChanges, ModelRecordOrderBy, @@ -63,6 +53,15 @@ from .model_records_base import ( ModelSummary, UnknownModelException, ) +from invokeai.app.services.shared.pagination import PaginatedResults +from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase +from invokeai.backend.model_manager.config import ( + AnyModelConfig, + BaseModelType, + ModelConfigFactory, + ModelFormat, + ModelType, +) class ModelRecordServiceSQL(ModelRecordServiceBase): diff --git a/invokeai/app/services/names/names_default.py b/invokeai/app/services/names/names_default.py index 104268c8bd..5804a937d6 100644 --- a/invokeai/app/services/names/names_default.py +++ b/invokeai/app/services/names/names_default.py @@ -1,7 +1,6 @@ +from invokeai.app.services.names.names_base import NameServiceBase from invokeai.app.util.misc import uuid_string -from .names_base import NameServiceBase - class SimpleNameService(NameServiceBase): """Creates image names from UUIDs.""" diff --git a/invokeai/app/services/session_processor/session_processor_default.py b/invokeai/app/services/session_processor/session_processor_default.py index 3f348fb239..e4faaeb911 100644 --- a/invokeai/app/services/session_processor/session_processor_default.py +++ b/invokeai/app/services/session_processor/session_processor_default.py @@ -13,24 +13,24 @@ from invokeai.app.services.events.events_common import ( register_events, ) from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError +from invokeai.app.services.invoker import Invoker from invokeai.app.services.session_processor.session_processor_base import ( + InvocationServices, OnAfterRunNode, OnAfterRunSession, OnBeforeRunNode, OnBeforeRunSession, OnNodeError, OnNonFatalProcessorError, + SessionProcessorBase, + SessionRunnerBase, ) -from invokeai.app.services.session_processor.session_processor_common import CanceledException +from invokeai.app.services.session_processor.session_processor_common import CanceledException, SessionProcessorStatus from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem, SessionQueueItemNotFoundError from invokeai.app.services.shared.graph import NodeInputError from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context from invokeai.app.util.profiler import Profiler -from ..invoker import Invoker -from .session_processor_base import InvocationServices, SessionProcessorBase, SessionRunnerBase -from .session_processor_common import SessionProcessorStatus - class DefaultSessionRunner(SessionRunnerBase): """Processes a single session's invocations.""" diff --git a/invokeai/app/services/urls/urls_default.py b/invokeai/app/services/urls/urls_default.py index ff5071333f..d570521fb8 100644 --- a/invokeai/app/services/urls/urls_default.py +++ b/invokeai/app/services/urls/urls_default.py @@ -1,6 +1,6 @@ import os -from .urls_base import UrlServiceBase +from invokeai.app.services.urls.urls_base import UrlServiceBase class LocalUrlService(UrlServiceBase): diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index 8992e59ace..c0c101cd75 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -5,9 +5,8 @@ from PIL import Image from invokeai.app.services.session_processor.session_processor_common import CanceledException, ProgressImage from invokeai.backend.model_manager.config import BaseModelType - -from ...backend.stable_diffusion import PipelineIntermediateState -from ...backend.util.util import image_to_dataURL +from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState +from invokeai.backend.util.util import image_to_dataURL if TYPE_CHECKING: from invokeai.app.services.events.events_base import EventServiceBase diff --git a/invokeai/backend/image_util/__init__.py b/invokeai/backend/image_util/__init__.py index f45af9feb4..bc5eed7ddd 100644 --- a/invokeai/backend/image_util/__init__.py +++ b/invokeai/backend/image_util/__init__.py @@ -2,6 +2,11 @@ Initialization file for invokeai.backend.image_util methods. """ -from .infill_methods.patchmatch import PatchMatch # noqa: F401 -from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401 -from .util import InitImageResizer, make_grid # noqa: F401 +from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch # noqa: F401 +from invokeai.backend.image_util.pngwriter import ( # noqa: F401 + PngWriter, + PromptFormatter, + retrieve_metadata, + write_metadata, +) +from invokeai.backend.image_util.util import InitImageResizer, make_grid # noqa: F401 diff --git a/invokeai/backend/image_util/basicsr/rrdbnet_arch.py b/invokeai/backend/image_util/basicsr/rrdbnet_arch.py index cdb77f3c21..a99a697123 100644 --- a/invokeai/backend/image_util/basicsr/rrdbnet_arch.py +++ b/invokeai/backend/image_util/basicsr/rrdbnet_arch.py @@ -2,7 +2,7 @@ import torch from torch import nn as nn from torch.nn import functional as F -from .arch_util import default_init_weights, make_layer, pixel_unshuffle +from invokeai.backend.image_util.basicsr.arch_util import default_init_weights, make_layer, pixel_unshuffle class ResidualDenseBlock(nn.Module): diff --git a/invokeai/backend/image_util/depth_anything/model/dpt.py b/invokeai/backend/image_util/depth_anything/model/dpt.py index e1101b3c39..9b1e84c7bd 100644 --- a/invokeai/backend/image_util/depth_anything/model/dpt.py +++ b/invokeai/backend/image_util/depth_anything/model/dpt.py @@ -4,7 +4,7 @@ import torch import torch.nn as nn import torch.nn.functional as F -from .blocks import FeatureFusionBlock, _make_scratch +from invokeai.backend.image_util.depth_anything.model.blocks import FeatureFusionBlock, _make_scratch torchhub_path = Path(__file__).parent.parent / "torchhub" diff --git a/invokeai/backend/image_util/dw_openpose/wholebody.py b/invokeai/backend/image_util/dw_openpose/wholebody.py index 3f77f20b9c..ce028df1fe 100644 --- a/invokeai/backend/image_util/dw_openpose/wholebody.py +++ b/invokeai/backend/image_util/dw_openpose/wholebody.py @@ -8,11 +8,10 @@ import numpy as np import onnxruntime as ort from invokeai.app.services.config.config_default import get_config +from invokeai.backend.image_util.dw_openpose.onnxdet import inference_detector +from invokeai.backend.image_util.dw_openpose.onnxpose import inference_pose from invokeai.backend.util.devices import TorchDevice -from .onnxdet import inference_detector -from .onnxpose import inference_pose - config = get_config() diff --git a/invokeai/backend/ip_adapter/ip_adapter.py b/invokeai/backend/ip_adapter/ip_adapter.py index abd6ca655a..75286f4733 100644 --- a/invokeai/backend/ip_adapter/ip_adapter.py +++ b/invokeai/backend/ip_adapter/ip_adapter.py @@ -11,9 +11,8 @@ from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from invokeai.backend.ip_adapter.ip_attention_weights import IPAttentionWeights - -from ..raw_model import RawModel -from .resampler import Resampler +from invokeai.backend.ip_adapter.resampler import Resampler +from invokeai.backend.raw_model import RawModel class IPAdapterStateDict(TypedDict): diff --git a/invokeai/backend/lora.py b/invokeai/backend/lora.py index 8d17de0837..9c669a4c78 100644 --- a/invokeai/backend/lora.py +++ b/invokeai/backend/lora.py @@ -10,10 +10,9 @@ from safetensors.torch import load_file from typing_extensions import Self from invokeai.backend.model_manager import BaseModelType +from invokeai.backend.raw_model import RawModel from invokeai.backend.util.devices import TorchDevice -from .raw_model import RawModel - class LoRALayerBase: # rank: Optional[int] diff --git a/invokeai/backend/model_manager/__init__.py b/invokeai/backend/model_manager/__init__.py index 98cc5054c7..199c0c01f7 100644 --- a/invokeai/backend/model_manager/__init__.py +++ b/invokeai/backend/model_manager/__init__.py @@ -1,6 +1,6 @@ """Re-export frequently-used symbols from the Model Manager backend.""" -from .config import ( +from invokeai.backend.model_manager.config import ( AnyModel, AnyModelConfig, BaseModelType, @@ -13,9 +13,9 @@ from .config import ( SchedulerPredictionType, SubModelType, ) -from .load import LoadedModel -from .probe import ModelProbe -from .search import ModelSearch +from invokeai.backend.model_manager.load import LoadedModel +from invokeai.backend.model_manager.probe import ModelProbe +from invokeai.backend.model_manager.search import ModelSearch __all__ = [ "AnyModel", diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 3579a0c7b2..f6cc5929c8 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -30,11 +30,10 @@ from diffusers.models.modeling_utils import ModelMixin from pydantic import BaseModel, ConfigDict, Discriminator, Field, Tag, TypeAdapter from typing_extensions import Annotated, Any, Dict -from invokeai.app.invocations.constants import SCHEDULER_NAME_VALUES from invokeai.app.util.misc import uuid_string from invokeai.backend.model_hash.hash_validator import validate_hash - -from ..raw_model import RawModel +from invokeai.backend.raw_model import RawModel +from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_NAME_VALUES # ModelMixin is the base class for all diffusers and transformers models # RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime diff --git a/invokeai/backend/model_manager/libc_util.py b/invokeai/backend/model_manager/libc_util.py deleted file mode 100644 index 1fbcae0a93..0000000000 --- a/invokeai/backend/model_manager/libc_util.py +++ /dev/null @@ -1,75 +0,0 @@ -import ctypes - - -class Struct_mallinfo2(ctypes.Structure): - """A ctypes Structure that matches the libc mallinfo2 struct. - - Docs: - - https://man7.org/linux/man-pages/man3/mallinfo.3.html - - https://www.gnu.org/software/libc/manual/html_node/Statistics-of-Malloc.html - - struct mallinfo2 { - size_t arena; /* Non-mmapped space allocated (bytes) */ - size_t ordblks; /* Number of free chunks */ - size_t smblks; /* Number of free fastbin blocks */ - size_t hblks; /* Number of mmapped regions */ - size_t hblkhd; /* Space allocated in mmapped regions (bytes) */ - size_t usmblks; /* See below */ - size_t fsmblks; /* Space in freed fastbin blocks (bytes) */ - size_t uordblks; /* Total allocated space (bytes) */ - size_t fordblks; /* Total free space (bytes) */ - size_t keepcost; /* Top-most, releasable space (bytes) */ - }; - """ - - _fields_ = [ - ("arena", ctypes.c_size_t), - ("ordblks", ctypes.c_size_t), - ("smblks", ctypes.c_size_t), - ("hblks", ctypes.c_size_t), - ("hblkhd", ctypes.c_size_t), - ("usmblks", ctypes.c_size_t), - ("fsmblks", ctypes.c_size_t), - ("uordblks", ctypes.c_size_t), - ("fordblks", ctypes.c_size_t), - ("keepcost", ctypes.c_size_t), - ] - - def __str__(self): - s = "" - s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n" - s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n" - s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n" - s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n" - s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n" - s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n" - s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n" - s += ( - f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)" - " (GB)\n" - ) - s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n" - s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n" - return s - - -class LibcUtil: - """A utility class for interacting with the C Standard Library (`libc`) via ctypes. - - Note that this class will raise on __init__() if 'libc.so.6' can't be found. Take care to handle environments where - this shared library is not available. - - TODO: Improve cross-OS compatibility of this class. - """ - - def __init__(self): - self._libc = ctypes.cdll.LoadLibrary("libc.so.6") - - def mallinfo2(self) -> Struct_mallinfo2: - """Calls `libc` `mallinfo2`. - - Docs: https://man7.org/linux/man-pages/man3/mallinfo.3.html - """ - mallinfo2 = self._libc.mallinfo2 - mallinfo2.restype = Struct_mallinfo2 - return mallinfo2() diff --git a/invokeai/backend/model_manager/load/__init__.py b/invokeai/backend/model_manager/load/__init__.py index 3d34f02727..d9a07bc250 100644 --- a/invokeai/backend/model_manager/load/__init__.py +++ b/invokeai/backend/model_manager/load/__init__.py @@ -6,10 +6,10 @@ Init file for the model loader. from importlib import import_module from pathlib import Path -from .load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase -from .load_default import ModelLoader -from .model_cache.model_cache_default import ModelCache -from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase +from invokeai.backend.model_manager.load.load_base import LoadedModel, LoadedModelWithoutConfig, ModelLoaderBase +from invokeai.backend.model_manager.load.load_default import ModelLoader +from invokeai.backend.model_manager.load.model_cache.model_cache_default import ModelCache +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase # This registers the subclasses that implement loaders of specific model types loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"] diff --git a/invokeai/backend/model_manager/load/memory_snapshot.py b/invokeai/backend/model_manager/load/memory_snapshot.py index 195e39361b..66dd070963 100644 --- a/invokeai/backend/model_manager/load/memory_snapshot.py +++ b/invokeai/backend/model_manager/load/memory_snapshot.py @@ -5,7 +5,7 @@ import psutil import torch from typing_extensions import Self -from ..util.libc_util import LibcUtil, Struct_mallinfo2 +from invokeai.backend.model_manager.util.libc_util import LibcUtil, Struct_mallinfo2 GB = 2**30 # 1 GB diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index c9e68a926a..9027b7b5b7 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -29,13 +29,17 @@ import torch from invokeai.backend.model_manager import AnyModel, SubModelType from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ( + CacheRecord, + CacheStats, + ModelCacheBase, + ModelLockerBase, +) +from invokeai.backend.model_manager.load.model_cache.model_locker import ModelLocker from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data from invokeai.backend.util.devices import TorchDevice from invokeai.backend.util.logging import InvokeAILogger -from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase -from .model_locker import ModelLocker - # Maximum size of the cache, in gigs # Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously DEFAULT_MAX_CACHE_SIZE = 6.0 diff --git a/invokeai/backend/model_manager/load/model_cache/model_locker.py b/invokeai/backend/model_manager/load/model_cache/model_locker.py index 9de17ca5f5..efbfc726f7 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_locker.py +++ b/invokeai/backend/model_manager/load/model_cache/model_locker.py @@ -7,8 +7,11 @@ from typing import Dict, Optional import torch from invokeai.backend.model_manager import AnyModel - -from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ( + CacheRecord, + ModelCacheBase, + ModelLockerBase, +) class ModelLocker(ModelLockerBase): diff --git a/invokeai/backend/model_manager/load/model_loader_registry.py b/invokeai/backend/model_manager/load/model_loader_registry.py index bb6bd18d7f..0ce8f8a6b4 100644 --- a/invokeai/backend/model_manager/load/model_loader_registry.py +++ b/invokeai/backend/model_manager/load/model_loader_registry.py @@ -18,7 +18,7 @@ Use like this: from abc import ABC, abstractmethod from typing import Callable, Dict, Optional, Tuple, Type, TypeVar -from ..config import ( +from invokeai.backend.model_manager.config import ( AnyModelConfig, BaseModelType, ModelConfigBase, @@ -26,7 +26,7 @@ from ..config import ( ModelType, SubModelType, ) -from . import ModelLoaderBase +from invokeai.backend.model_manager.load import ModelLoaderBase class ModelLoaderRegistryBase(ABC): diff --git a/invokeai/backend/model_manager/load/model_loaders/controlnet.py b/invokeai/backend/model_manager/load/model_loaders/controlnet.py index b2fae37d29..82091874df 100644 --- a/invokeai/backend/model_manager/load/model_loaders/controlnet.py +++ b/invokeai/backend/model_manager/load/model_loaders/controlnet.py @@ -13,9 +13,8 @@ from invokeai.backend.model_manager import ( ModelType, ) from invokeai.backend.model_manager.config import ControlNetCheckpointConfig, SubModelType - -from .. import ModelLoaderRegistry -from .generic_diffusers import GenericDiffusersLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers) diff --git a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py index 6320797b8a..dfe38aa79c 100644 --- a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py +++ b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py @@ -18,8 +18,8 @@ from invokeai.backend.model_manager import ( SubModelType, ) from invokeai.backend.model_manager.config import DiffusersConfigBase - -from .. import ModelLoader, ModelLoaderRegistry +from invokeai.backend.model_manager.load.load_default import ModelLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers) diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py index aa0acab6bc..367107c662 100644 --- a/invokeai/backend/model_manager/load/model_loaders/lora.py +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -15,9 +15,9 @@ from invokeai.backend.model_manager import ( ModelType, SubModelType, ) +from invokeai.backend.model_manager.load.load_default import ModelLoader from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase - -from .. import ModelLoader, ModelLoaderRegistry +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.LoRA, format=ModelFormat.Diffusers) diff --git a/invokeai/backend/model_manager/load/model_loaders/onnx.py b/invokeai/backend/model_manager/load/model_loaders/onnx.py index b43e0a1bdf..0a5d8477c4 100644 --- a/invokeai/backend/model_manager/load/model_loaders/onnx.py +++ b/invokeai/backend/model_manager/load/model_loaders/onnx.py @@ -13,9 +13,8 @@ from invokeai.backend.model_manager import ( ModelType, SubModelType, ) - -from .. import ModelLoaderRegistry -from .generic_diffusers import GenericDiffusersLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.ONNX) diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py index 95caf848e5..d90352f0e6 100644 --- a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -25,11 +25,10 @@ from invokeai.backend.model_manager.config import ( DiffusersConfigBase, MainCheckpointConfig, ) +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader from invokeai.backend.util.silence_warnings import SilenceWarnings -from .. import ModelLoaderRegistry -from .generic_diffusers import GenericDiffusersLoader - VARIANT_TO_IN_CHANNEL_MAP = { ModelVariantType.Normal: 4, ModelVariantType.Depth: 5, diff --git a/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py b/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py index cfdc689cc8..8d0f08f91a 100644 --- a/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py +++ b/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py @@ -12,10 +12,10 @@ from invokeai.backend.model_manager import ( ModelType, SubModelType, ) +from invokeai.backend.model_manager.load.load_default import ModelLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry from invokeai.backend.textual_inversion import TextualInversionModelRaw -from .. import ModelLoader, ModelLoaderRegistry - @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFile) @ModelLoaderRegistry.register( diff --git a/invokeai/backend/model_manager/load/model_loaders/vae.py b/invokeai/backend/model_manager/load/model_loaders/vae.py index 3c496f59ab..bae29ea773 100644 --- a/invokeai/backend/model_manager/load/model_loaders/vae.py +++ b/invokeai/backend/model_manager/load/model_loaders/vae.py @@ -12,9 +12,8 @@ from invokeai.backend.model_manager import ( ModelType, ) from invokeai.backend.model_manager.config import AnyModel, SubModelType, VAECheckpointConfig - -from .. import ModelLoaderRegistry -from .generic_diffusers import GenericDiffusersLoader +from invokeai.backend.model_manager.load.model_loader_registry import ModelLoaderRegistry +from invokeai.backend.model_manager.load.model_loaders.generic_diffusers import GenericDiffusersLoader @ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.VAE, format=ModelFormat.Diffusers) diff --git a/invokeai/backend/model_manager/load/model_util.py b/invokeai/backend/model_manager/load/model_util.py index 57ff81c2ef..f070a42965 100644 --- a/invokeai/backend/model_manager/load/model_util.py +++ b/invokeai/backend/model_manager/load/model_util.py @@ -39,7 +39,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int: else: # TODO(ryand): Promote this from a log to an exception once we are confident that we are handling all of the # supported model types. - logger.error( + logger.warning( f"Failed to calculate model size for unexpected model type: {type(model)}. The model will be treated as " "having size 0." ) diff --git a/invokeai/backend/model_manager/merge.py b/invokeai/backend/model_manager/merge.py index 125e99be93..b00bc99f3e 100644 --- a/invokeai/backend/model_manager/merge.py +++ b/invokeai/backend/model_manager/merge.py @@ -17,16 +17,10 @@ from diffusers.utils import logging as dlogging from invokeai.app.services.model_install import ModelInstallServiceBase from invokeai.app.services.model_records.model_records_base import ModelRecordChanges +from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, ModelType, ModelVariantType +from invokeai.backend.model_manager.config import MainDiffusersConfig from invokeai.backend.util.devices import TorchDevice -from . import ( - AnyModelConfig, - BaseModelType, - ModelType, - ModelVariantType, -) -from .config import MainDiffusersConfig - class MergeInterpolationMethod(str, Enum): WeightedSum = "weighted_sum" diff --git a/invokeai/backend/model_manager/metadata/__init__.py b/invokeai/backend/model_manager/metadata/__init__.py index 1fd080b679..76da268153 100644 --- a/invokeai/backend/model_manager/metadata/__init__.py +++ b/invokeai/backend/model_manager/metadata/__init__.py @@ -16,8 +16,8 @@ data = HuggingFaceMetadataFetch().from_id("") assert isinstance(data, HuggingFaceMetadata) """ -from .fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase -from .metadata_base import ( +from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch, ModelMetadataFetchBase +from invokeai.backend.model_manager.metadata.metadata_base import ( AnyModelRepoMetadata, AnyModelRepoMetadataValidator, BaseMetadata, diff --git a/invokeai/backend/model_manager/metadata/fetch/__init__.py b/invokeai/backend/model_manager/metadata/fetch/__init__.py index 652a3cf6b7..62b3dc4d54 100644 --- a/invokeai/backend/model_manager/metadata/fetch/__init__.py +++ b/invokeai/backend/model_manager/metadata/fetch/__init__.py @@ -10,7 +10,7 @@ data = HuggingFaceMetadataFetch().from_id("") assert isinstance(data, HuggingFaceMetadata) """ -from .fetch_base import ModelMetadataFetchBase -from .huggingface import HuggingFaceMetadataFetch +from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase +from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch __all__ = ["ModelMetadataFetchBase", "HuggingFaceMetadataFetch"] diff --git a/invokeai/backend/model_manager/metadata/fetch/fetch_base.py b/invokeai/backend/model_manager/metadata/fetch/fetch_base.py index f84479404e..b86a029b3e 100644 --- a/invokeai/backend/model_manager/metadata/fetch/fetch_base.py +++ b/invokeai/backend/model_manager/metadata/fetch/fetch_base.py @@ -18,8 +18,11 @@ from pydantic.networks import AnyHttpUrl from requests.sessions import Session from invokeai.backend.model_manager import ModelRepoVariant - -from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator, BaseMetadata +from invokeai.backend.model_manager.metadata.metadata_base import ( + AnyModelRepoMetadata, + AnyModelRepoMetadataValidator, + BaseMetadata, +) class ModelMetadataFetchBase(ABC): diff --git a/invokeai/backend/model_manager/metadata/fetch/huggingface.py b/invokeai/backend/model_manager/metadata/fetch/huggingface.py index ab78b3e064..8787ceeb36 100644 --- a/invokeai/backend/model_manager/metadata/fetch/huggingface.py +++ b/invokeai/backend/model_manager/metadata/fetch/huggingface.py @@ -25,14 +25,13 @@ from pydantic.networks import AnyHttpUrl from requests.sessions import Session from invokeai.backend.model_manager.config import ModelRepoVariant - -from ..metadata_base import ( +from invokeai.backend.model_manager.metadata.fetch.fetch_base import ModelMetadataFetchBase +from invokeai.backend.model_manager.metadata.metadata_base import ( AnyModelRepoMetadata, HuggingFaceMetadata, RemoteModelFile, UnknownMetadataException, ) -from .fetch_base import ModelMetadataFetchBase HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)" diff --git a/invokeai/backend/model_manager/metadata/metadata_base.py b/invokeai/backend/model_manager/metadata/metadata_base.py index f9f5335d17..97fc598380 100644 --- a/invokeai/backend/model_manager/metadata/metadata_base.py +++ b/invokeai/backend/model_manager/metadata/metadata_base.py @@ -24,8 +24,7 @@ from requests.sessions import Session from typing_extensions import Annotated from invokeai.backend.model_manager import ModelRepoVariant - -from ..util import select_hf_files +from invokeai.backend.model_manager.util.select_hf_files import filter_files class UnknownMetadataException(Exception): @@ -112,9 +111,7 @@ class HuggingFaceMetadata(ModelMetadataWithFiles): session = session or Session() configure_http_backend(backend_factory=lambda: session) # used in testing - paths = select_hf_files.filter_files( - [x.path for x in self.files], variant, subfolder - ) # all files in the model + paths = filter_files([x.path for x in self.files], variant, subfolder) # all files in the model prefix = f"{subfolder}/" if subfolder else "" # the next step reads model_index.json to determine which subdirectories belong # to the model diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index 3b36e2f5af..42727a31d7 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -11,10 +11,7 @@ from picklescan.scanner import scan_file_path import invokeai.backend.util.logging as logger from invokeai.app.util.misc import uuid_string from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS, ModelHash -from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel -from invokeai.backend.util.silence_warnings import SilenceWarnings - -from .config import ( +from invokeai.backend.model_manager.config import ( AnyModelConfig, BaseModelType, ControlAdapterDefaultSettings, @@ -28,7 +25,9 @@ from .config import ( ModelVariantType, SchedulerPredictionType, ) -from .util.model_util import lora_token_vector_length, read_checkpoint_meta +from invokeai.backend.model_manager.util.model_util import lora_token_vector_length, read_checkpoint_meta +from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel +from invokeai.backend.util.silence_warnings import SilenceWarnings CkptType = Dict[str | int, Any] diff --git a/invokeai/backend/model_manager/util/select_hf_files.py b/invokeai/backend/model_manager/util/select_hf_files.py index 4a63ab27b7..b0a9551437 100644 --- a/invokeai/backend/model_manager/util/select_hf_files.py +++ b/invokeai/backend/model_manager/util/select_hf_files.py @@ -17,7 +17,7 @@ from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Set -from ..config import ModelRepoVariant +from invokeai.backend.model_manager.config import ModelRepoVariant def filter_files( diff --git a/invokeai/backend/model_patcher.py b/invokeai/backend/model_patcher.py index 051d114276..8c7a62c371 100644 --- a/invokeai/backend/model_patcher.py +++ b/invokeai/backend/model_patcher.py @@ -13,14 +13,13 @@ from diffusers import OnnxRuntimeModel, UNet2DConditionModel from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from invokeai.app.shared.models import FreeUConfig +from invokeai.backend.lora import LoRAModelRaw from invokeai.backend.model_manager import AnyModel from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel +from invokeai.backend.textual_inversion import TextualInversionManager, TextualInversionModelRaw from invokeai.backend.util.devices import TorchDevice -from .lora import LoRAModelRaw -from .textual_inversion import TextualInversionManager, TextualInversionModelRaw - """ loras = [ (lora_model1, 0.7), @@ -338,7 +337,7 @@ class ONNXModelPatcher: loras: List[Tuple[LoRAModelRaw, float]], prefix: str, ) -> None: - from .models.base import IAIOnnxRuntimeModel + from invokeai.backend.models.base import IAIOnnxRuntimeModel if not isinstance(model, IAIOnnxRuntimeModel): raise Exception("Only IAIOnnxRuntimeModel models supported") @@ -425,7 +424,7 @@ class ONNXModelPatcher: text_encoder: IAIOnnxRuntimeModel, ti_list: List[Tuple[str, Any]], ) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]: - from .models.base import IAIOnnxRuntimeModel + from invokeai.backend.models.base import IAIOnnxRuntimeModel if not isinstance(text_encoder, IAIOnnxRuntimeModel): raise Exception("Only IAIOnnxRuntimeModel models supported") diff --git a/invokeai/backend/onnx/onnx_runtime.py b/invokeai/backend/onnx/onnx_runtime.py index 9fcd4d093f..d562a46dff 100644 --- a/invokeai/backend/onnx/onnx_runtime.py +++ b/invokeai/backend/onnx/onnx_runtime.py @@ -10,7 +10,7 @@ import torch from onnx import numpy_helper from onnxruntime import InferenceSession, SessionOptions, get_available_providers -from ..raw_model import RawModel +from invokeai.backend.raw_model import RawModel ONNX_WEIGHTS_NAME = "model.onnx" diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py index ed6782eefa..440cb4410b 100644 --- a/invokeai/backend/stable_diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/__init__.py @@ -2,9 +2,12 @@ Initialization file for the invokeai.backend.stable_diffusion package """ -from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline # noqa: F401 -from .diffusion import InvokeAIDiffuserComponent # noqa: F401 -from .seamless import set_seamless # noqa: F401 +from invokeai.backend.stable_diffusion.diffusers_pipeline import ( # noqa: F401 + PipelineIntermediateState, + StableDiffusionGeneratorPipeline, +) +from invokeai.backend.stable_diffusion.diffusion import InvokeAIDiffuserComponent # noqa: F401 +from invokeai.backend.stable_diffusion.seamless import set_seamless # noqa: F401 __all__ = [ "PipelineIntermediateState", diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py index 854d127a36..712542f79c 100644 --- a/invokeai/backend/stable_diffusion/diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/diffusion/__init__.py @@ -2,4 +2,6 @@ Initialization file for invokeai.models.diffusion """ -from .shared_invokeai_diffusion import InvokeAIDiffuserComponent # noqa: F401 +from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import ( + InvokeAIDiffuserComponent, # noqa: F401 +) diff --git a/invokeai/backend/stable_diffusion/schedulers/__init__.py b/invokeai/backend/stable_diffusion/schedulers/__init__.py index 0b780d3ee2..6c02acda51 100644 --- a/invokeai/backend/stable_diffusion/schedulers/__init__.py +++ b/invokeai/backend/stable_diffusion/schedulers/__init__.py @@ -1,3 +1,3 @@ -from .schedulers import SCHEDULER_MAP # noqa: F401 +from invokeai.backend.stable_diffusion.schedulers.schedulers import SCHEDULER_MAP # noqa: F401 __all__ = ["SCHEDULER_MAP"] diff --git a/invokeai/backend/stable_diffusion/schedulers/schedulers.py b/invokeai/backend/stable_diffusion/schedulers/schedulers.py index 3a55d52d4a..7d6851e278 100644 --- a/invokeai/backend/stable_diffusion/schedulers/schedulers.py +++ b/invokeai/backend/stable_diffusion/schedulers/schedulers.py @@ -1,3 +1,5 @@ +from typing import Any, Literal, Type + from diffusers import ( DDIMScheduler, DDPMScheduler, @@ -16,8 +18,36 @@ from diffusers import ( TCDScheduler, UniPCMultistepScheduler, ) +from diffusers.schedulers.scheduling_utils import SchedulerMixin -SCHEDULER_MAP = { +SCHEDULER_NAME_VALUES = Literal[ + "ddim", + "ddpm", + "deis", + "lms", + "lms_k", + "pndm", + "heun", + "heun_k", + "euler", + "euler_k", + "euler_a", + "kdpm_2", + "kdpm_2_a", + "dpmpp_2s", + "dpmpp_2s_k", + "dpmpp_2m", + "dpmpp_2m_k", + "dpmpp_2m_sde", + "dpmpp_2m_sde_k", + "dpmpp_sde", + "dpmpp_sde_k", + "unipc", + "lcm", + "tcd", +] + +SCHEDULER_MAP: dict[SCHEDULER_NAME_VALUES, tuple[Type[SchedulerMixin], dict[str, Any]]] = { "ddim": (DDIMScheduler, {}), "ddpm": (DDPMScheduler, {}), "deis": (DEISMultistepScheduler, {}), diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py index 4c7625ea37..483f2da88c 100644 --- a/invokeai/backend/textual_inversion.py +++ b/invokeai/backend/textual_inversion.py @@ -9,7 +9,7 @@ from safetensors.torch import load_file from transformers import CLIPTokenizer from typing_extensions import Self -from .raw_model import RawModel +from invokeai.backend.raw_model import RawModel class TextualInversionModelRaw(RawModel): diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py index 1e4d467cd0..101215640a 100644 --- a/invokeai/backend/util/__init__.py +++ b/invokeai/backend/util/__init__.py @@ -2,8 +2,8 @@ Initialization file for invokeai.backend.util """ -from .logging import InvokeAILogger -from .util import GIG, Chdir, directory_size +from invokeai.backend.util.logging import InvokeAILogger +from invokeai.backend.util.util import GIG, Chdir, directory_size __all__ = [ "GIG", diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index dc09ac313f..4b102d6cf3 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -17,9 +17,12 @@ }, "boards": { "addBoard": "Add Board", + "addPrivateBoard": "Add Private Board", + "addSharedBoard": "Add Shared Board", "archiveBoard": "Archive Board", "archived": "Archived", "autoAddBoard": "Auto-Add Board", + "boards": "Boards", "selectedForAutoAdd": "Selected for Auto-Add", "bottomMessage": "Deleting this board and its images will reset any features currently using them.", "cancel": "Cancel", @@ -36,8 +39,10 @@ "movingImagesToBoard_other": "Moving {{count}} images to board:", "myBoard": "My Board", "noMatching": "No matching Boards", + "private": "Private", "searchBoard": "Search Boards...", "selectBoard": "Select a Board", + "shared": "Shared", "topMessage": "This board contains images used in the following features:", "unarchiveBoard": "Unarchive Board", "uncategorized": "Uncategorized", diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index 2d3db05bf7..9698f85219 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -1,7 +1,6 @@ import type { TypedStartListening } from '@reduxjs/toolkit'; import { createListenerMiddleware } from '@reduxjs/toolkit'; import { addCommitStagingAreaImageListener } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener'; -import { addFirstListImagesListener } from 'app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts'; import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued'; import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived'; import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted'; @@ -26,7 +25,7 @@ import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMid import { addGalleryOffsetChangedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryOffsetChanged'; import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema'; import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard'; -import { addRequestedSingleImageDeletionListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDeleted'; +import { addImageDeletionListeners } from 'app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners'; import { addImageDroppedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped'; import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard'; import { addImagesStarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesStarred'; @@ -70,7 +69,7 @@ const startAppListening = listenerMiddleware.startListening as AppStartListening addImageUploadedFulfilledListener(startAppListening); // Image deleted -addRequestedSingleImageDeletionListener(startAppListening); +addImageDeletionListeners(startAppListening); addDeleteBoardAndImagesFulfilledListener(startAppListening); addImageToDeleteSelectedListener(startAppListening); @@ -139,7 +138,6 @@ addModelSelectedListener(startAppListening); addAppStartedListener(startAppListening); addModelsLoadedListener(startAppListening); addAppConfigReceivedListener(startAppListening); -addFirstListImagesListener(startAppListening); // Ad-hoc upscale workflwo addUpscaleRequestedListener(startAppListening); diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts index 0915929245..c569a6e36d 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addArchivedOrDeletedBoardListener.ts @@ -15,8 +15,6 @@ export const addArchivedOrDeletedBoardListener = (startAppListening: AppStartLis matcher: isAnyOf( // Updating a board may change its archived status boardsApi.endpoints.updateBoard.matchFulfilled, - // If the selected/auto-add board was deleted from a different session, we'll only know during the list request, - boardsApi.endpoints.listAllBoards.matchFulfilled, // If a board is deleted, we'll need to reset the auto-add board imagesApi.endpoints.deleteBoard.matchFulfilled, imagesApi.endpoints.deleteBoardAndImages.matchFulfilled, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts deleted file mode 100644 index 5db5f687a1..0000000000 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts +++ /dev/null @@ -1,27 +0,0 @@ -import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; -import { imageSelected } from 'features/gallery/store/gallerySlice'; -import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; -import { imagesApi } from 'services/api/endpoints/images'; -import { getListImagesUrl } from 'services/api/util'; - -export const addFirstListImagesListener = (startAppListening: AppStartListening) => { - startAppListening({ - matcher: imagesApi.endpoints.listImages.matchFulfilled, - effect: async (action, { dispatch, unsubscribe, cancelActiveListeners }) => { - // Only run this listener on the first listImages request for no-board images - if (action.meta.arg.queryCacheKey !== getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })) { - return; - } - - // this should only run once - cancelActiveListeners(); - unsubscribe(); - - const data = action.payload; - - if (data.items.length > 0) { - dispatch(imageSelected(data.items[0] ?? null)); - } - }, - }); -}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts similarity index 70% rename from invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts rename to invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts index 916ec2c47f..056346cb68 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeletionListeners.ts @@ -22,11 +22,11 @@ import { imageSelected } from 'features/gallery/store/gallerySlice'; import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { isImageFieldInputInstance } from 'features/nodes/types/field'; import { isInvocationNode } from 'features/nodes/types/invocation'; -import { forEach } from 'lodash-es'; -import { api } from 'services/api'; +import { forEach, intersectionBy } from 'lodash-es'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; +// Some utils to delete images from different parts of the app const deleteNodesImages = (state: RootState, dispatch: AppDispatch, imageDTO: ImageDTO) => { state.nodes.present.nodes.forEach((node) => { if (!isInvocationNode(node)) { @@ -97,10 +97,11 @@ const deleteControlLayerImages = (state: RootState, dispatch: AppDispatch, image }); }; -export const addRequestedSingleImageDeletionListener = (startAppListening: AppStartListening) => { +export const addImageDeletionListeners = (startAppListening: AppStartListening) => { + // Handle single image deletion startAppListening({ actionCreator: imageDeletionConfirmed, - effect: async (action, { dispatch, getState, condition }) => { + effect: async (action, { dispatch, getState }) => { const { imageDTOs, imagesUsage } = action.payload; if (imageDTOs.length !== 1 || imagesUsage.length !== 1) { @@ -116,49 +117,46 @@ export const addRequestedSingleImageDeletionListener = (startAppListening: AppSt return; } - dispatch(isModalOpenChanged(false)); - const state = getState(); + try { + const state = getState(); + await dispatch(imagesApi.endpoints.deleteImage.initiate(imageDTO)).unwrap(); - // We need to reset the features where the image is in use - none of these work if their image(s) don't exist - if (imageUsage.isCanvasImage) { - dispatch(resetCanvas()); - } + if (state.gallery.selection.some((i) => i.image_name === imageDTO.image_name)) { + // The deleted image was a selected image, we need to select the next image + const newSelection = state.gallery.selection.filter((i) => i.image_name !== imageDTO.image_name); + + if (newSelection.length > 0) { + return; + } + + // Get the current list of images and select the same index + const baseQueryArgs = selectListImagesQueryArgs(state); + const data = imagesApi.endpoints.listImages.select(baseQueryArgs)(state).data; + + if (data) { + const deletedImageIndex = data.items.findIndex((i) => i.image_name === imageDTO.image_name); + const nextImage = data.items[deletedImageIndex + 1] ?? data.items[0] ?? null; + dispatch(imageSelected(nextImage)); + } + } + + // We need to reset the features where the image is in use - none of these work if their image(s) don't exist + if (imageUsage.isCanvasImage) { + dispatch(resetCanvas()); + } - imageDTOs.forEach((imageDTO) => { deleteControlAdapterImages(state, dispatch, imageDTO); deleteNodesImages(state, dispatch, imageDTO); deleteControlLayerImages(state, dispatch, imageDTO); - }); - - // Delete from server - const { requestId } = dispatch(imagesApi.endpoints.deleteImage.initiate(imageDTO)); - - // Wait for successful deletion, then trigger boards to re-fetch - const wasImageDeleted = await condition( - (action) => imagesApi.endpoints.deleteImage.matchFulfilled(action) && action.meta.requestId === requestId, - 30000 - ); - - if (wasImageDeleted) { - dispatch(api.util.invalidateTags([{ type: 'Board', id: imageDTO.board_id ?? 'none' }])); - } - - const lastSelectedImage = state.gallery.selection[state.gallery.selection.length - 1]?.image_name; - - if (imageDTO && imageDTO?.image_name === lastSelectedImage) { - const baseQueryArgs = selectListImagesQueryArgs(state); - const { data } = imagesApi.endpoints.listImages.select(baseQueryArgs)(state); - - if (data && data.items) { - const newlySelectedImage = data?.items.find((img) => img.image_name !== imageDTO?.image_name); - dispatch(imageSelected(newlySelectedImage || null)); - } else { - dispatch(imageSelected(null)); - } + } catch { + // no-op + } finally { + dispatch(isModalOpenChanged(false)); } }, }); + // Handle multiple image deletion startAppListening({ actionCreator: imageDeletionConfirmed, effect: async (action, { dispatch, getState }) => { @@ -170,20 +168,18 @@ export const addRequestedSingleImageDeletionListener = (startAppListening: AppSt } try { - // Delete from server - await dispatch(imagesApi.endpoints.deleteImages.initiate({ imageDTOs })).unwrap(); const state = getState(); - const queryArgs = selectListImagesQueryArgs(state); - const { data } = imagesApi.endpoints.listImages.select(queryArgs)(state); + await dispatch(imagesApi.endpoints.deleteImages.initiate({ imageDTOs })).unwrap(); - if (data && data.items[0]) { - dispatch(imageSelected(data.items[0])); - } else { - dispatch(imageSelected(null)); + if (intersectionBy(state.gallery.selection, imageDTOs, 'image_name').length > 0) { + // Some selected images were deleted, need to select the next image + const queryArgs = selectListImagesQueryArgs(state); + const { data } = imagesApi.endpoints.listImages.select(queryArgs)(state); + if (data) { + dispatch(imageSelected(null)); + } } - dispatch(isModalOpenChanged(false)); - // We need to reset the features where the image is in use - none of these work if their image(s) don't exist if (imagesUsage.some((i) => i.isCanvasImage)) { @@ -197,14 +193,20 @@ export const addRequestedSingleImageDeletionListener = (startAppListening: AppSt }); } catch { // no-op + } finally { + dispatch(isModalOpenChanged(false)); } }, }); + // When we list images, if no images is selected, select the first one. startAppListening({ - matcher: imagesApi.endpoints.deleteImage.matchPending, - effect: () => { - // + matcher: imagesApi.endpoints.listImages.matchFulfilled, + effect: (action, { dispatch, getState }) => { + const selection = getState().gallery.selection; + if (selection.length === 0) { + dispatch(imageSelected(action.payload.items[0] ?? null)); + } }, }); diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts index 21636ada49..6d7416d95d 100644 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ b/invokeai/frontend/web/src/app/types/invokeai.ts @@ -65,6 +65,7 @@ export type AppConfig = { */ shouldUpdateImagesOnConnect: boolean; shouldFetchMetadataFromApi: boolean; + allowPrivateBoards: boolean; disabledTabs: InvokeTabName[]; disabledFeatures: AppFeature[]; disabledSDFeatures: SDFeature[]; diff --git a/invokeai/frontend/web/src/common/components/IAIDropOverlay.tsx b/invokeai/frontend/web/src/common/components/IAIDropOverlay.tsx index cd3e0cbee1..51e5583bc6 100644 --- a/invokeai/frontend/web/src/common/components/IAIDropOverlay.tsx +++ b/invokeai/frontend/web/src/common/components/IAIDropOverlay.tsx @@ -52,8 +52,8 @@ const IAIDropOverlay = (props: Props) => { bottom={0.5} opacity={1} borderWidth={2} - borderColor={isOver ? 'base.50' : 'base.300'} - borderRadius="lg" + borderColor={isOver ? 'base.300' : 'base.500'} + borderRadius="base" borderStyle="dashed" transitionProperty="common" transitionDuration="0.1s" diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/AutoAddBadge.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/AutoAddBadge.tsx new file mode 100644 index 0000000000..a8b1f9f4fb --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/AutoAddBadge.tsx @@ -0,0 +1,14 @@ +import { Badge } from '@invoke-ai/ui-library'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; + +export const AutoAddBadge = memo(() => { + const { t } = useTranslation(); + return ( + + {t('common.auto')} + + ); +}); + +AutoAddBadge.displayName = 'AutoAddBadge'; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/AutoAddIcon.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/AutoAddIcon.tsx deleted file mode 100644 index 9dd6a59c49..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/AutoAddIcon.tsx +++ /dev/null @@ -1,16 +0,0 @@ -import { Badge, Flex } from '@invoke-ai/ui-library'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; - -const AutoAddIcon = () => { - const { t } = useTranslation(); - return ( - - - {t('common.auto')} - - - ); -}; - -export default memo(AutoAddIcon); diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx index 5cd4d001f4..c6ddb85daa 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/AddBoardButton.tsx @@ -1,26 +1,48 @@ import { IconButton } from '@invoke-ai/ui-library'; -import { memo, useCallback } from 'react'; +import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { boardIdSelected } from 'features/gallery/store/gallerySlice'; +import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiPlusBold } from 'react-icons/pi'; import { useCreateBoardMutation } from 'services/api/endpoints/boards'; -const AddBoardButton = () => { +type Props = { + isPrivateBoard: boolean; +}; + +const AddBoardButton = ({ isPrivateBoard }: Props) => { const { t } = useTranslation(); + const dispatch = useAppDispatch(); + const allowPrivateBoards = useAppSelector((s) => s.config.allowPrivateBoards); const [createBoard, { isLoading }] = useCreateBoardMutation(); - const DEFAULT_BOARD_NAME = t('boards.myBoard'); - const handleCreateBoard = useCallback(() => { - createBoard(DEFAULT_BOARD_NAME); - }, [createBoard, DEFAULT_BOARD_NAME]); + const label = useMemo(() => { + if (!allowPrivateBoards) { + return t('boards.addBoard'); + } + if (isPrivateBoard) { + return t('boards.addPrivateBoard'); + } + return t('boards.addSharedBoard'); + }, [allowPrivateBoards, isPrivateBoard, t]); + const handleCreateBoard = useCallback(async () => { + try { + const board = await createBoard({ board_name: t('boards.myBoard'), is_private: isPrivateBoard }).unwrap(); + dispatch(boardIdSelected({ boardId: board.board_id })); + } catch { + //no-op + } + }, [t, createBoard, isPrivateBoard, dispatch]); return ( } isLoading={isLoading} - tooltip={t('boards.addBoard')} - aria-label={t('boards.addBoard')} + tooltip={label} + aria-label={label} onClick={handleCreateBoard} - size="sm" + size="md" data-testid="add-board-button" + variant="ghost" /> ); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx index e47edd21fc..6f37dbcdb5 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/BoardsList.tsx @@ -1,11 +1,15 @@ -import { Collapse, Flex, Grid, GridItem } from '@invoke-ai/ui-library'; +import { Collapse, Flex, Icon, Text, useDisclosure } from '@invoke-ai/ui-library'; +import { EMPTY_ARRAY } from 'app/store/constants'; import { useAppSelector } from 'app/store/storeHooks'; import { overlayScrollbarsParams } from 'common/components/OverlayScrollbars/constants'; import DeleteBoardModal from 'features/gallery/components/Boards/DeleteBoardModal'; +import GallerySettingsPopover from 'features/gallery/components/GallerySettingsPopover/GallerySettingsPopover'; import { selectListBoardsQueryArgs } from 'features/gallery/store/gallerySelectors'; import { OverlayScrollbarsComponent } from 'overlayscrollbars-react'; import type { CSSProperties } from 'react'; -import { memo, useState } from 'react'; +import { memo, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiCaretUpBold } from 'react-icons/pi'; import { useListAllBoardsQuery } from 'services/api/endpoints/boards'; import type { BoardDTO } from 'services/api/types'; @@ -19,56 +23,112 @@ const overlayScrollbarsStyles: CSSProperties = { width: '100%', }; -type Props = { - isOpen: boolean; -}; - -const BoardsList = (props: Props) => { - const { isOpen } = props; +const BoardsList = () => { const selectedBoardId = useAppSelector((s) => s.gallery.selectedBoardId); const boardSearchText = useAppSelector((s) => s.gallery.boardSearchText); + const allowPrivateBoards = useAppSelector((s) => s.config.allowPrivateBoards); const queryArgs = useAppSelector(selectListBoardsQueryArgs); const { data: boards } = useListAllBoardsQuery(queryArgs); - const filteredBoards = boardSearchText - ? boards?.filter((board) => board.board_name.toLowerCase().includes(boardSearchText.toLowerCase())) - : boards; const [boardToDelete, setBoardToDelete] = useState(); + const privateBoardsDisclosure = useDisclosure({ defaultIsOpen: false }); + const sharedBoardsDisclosure = useDisclosure({ defaultIsOpen: false }); + const { t } = useTranslation(); + + const { filteredPrivateBoards, filteredSharedBoards } = useMemo(() => { + const filteredBoards = boardSearchText + ? boards?.filter((board) => board.board_name.toLowerCase().includes(boardSearchText.toLowerCase())) + : boards; + const filteredPrivateBoards = filteredBoards?.filter((board) => board.is_private) ?? EMPTY_ARRAY; + const filteredSharedBoards = filteredBoards?.filter((board) => !board.is_private) ?? EMPTY_ARRAY; + return { filteredPrivateBoards, filteredSharedBoards }; + }, [boardSearchText, boards]); return ( <> - - - - - - - - - - - - {filteredBoards && - filteredBoards.map((board, index) => ( - + + + + + + {allowPrivateBoards && ( + <> + + + + + {t('boards.private')} + + + + + + + + {allowPrivateBoards && } + {filteredPrivateBoards.map((board) => ( - - ))} - - + ))} + + + + + )} + + + + + {allowPrivateBoards ? t('boards.shared') : t('boards.boards')} + + + - + + + + {!allowPrivateBoards && } + {filteredSharedBoards.map((board) => ( + + ))} + + + + ); }; - export default memo(BoardsList); diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx index ad40fdbf6a..32ed84558c 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/GalleryBoard.tsx @@ -1,36 +1,41 @@ import type { SystemStyleObject } from '@invoke-ai/ui-library'; -import { Box, Editable, EditableInput, EditablePreview, Flex, Icon, Image, Text, Tooltip } from '@invoke-ai/ui-library'; -import { createSelector } from '@reduxjs/toolkit'; +import { + Editable, + EditableInput, + EditablePreview, + Flex, + Icon, + Image, + Text, + Tooltip, + useDisclosure, +} from '@invoke-ai/ui-library'; import { skipToken } from '@reduxjs/toolkit/query'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIDroppable from 'common/components/IAIDroppable'; -import SelectionOverlay from 'common/components/SelectionOverlay'; import type { AddToBoardDropData } from 'features/dnd/types'; -import AutoAddIcon from 'features/gallery/components/Boards/AutoAddIcon'; +import { AutoAddBadge } from 'features/gallery/components/Boards/AutoAddBadge'; import BoardContextMenu from 'features/gallery/components/Boards/BoardContextMenu'; import { BoardTotalsTooltip } from 'features/gallery/components/Boards/BoardsList/BoardTotalsTooltip'; -import { autoAddBoardIdChanged, boardIdSelected, selectGallerySlice } from 'features/gallery/store/gallerySlice'; +import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice'; import { memo, useCallback, useMemo, useState } from 'react'; import { useTranslation } from 'react-i18next'; -import { PiArchiveBold, PiImagesSquare } from 'react-icons/pi'; +import { PiArchiveBold, PiImageSquare } from 'react-icons/pi'; import { useUpdateBoardMutation } from 'services/api/endpoints/boards'; import { useGetImageDTOQuery } from 'services/api/endpoints/images'; import type { BoardDTO } from 'services/api/types'; const editableInputStyles: SystemStyleObject = { p: 0, + fontSize: 'md', + w: '100%', _focusVisible: { p: 0, - textAlign: 'center', }, }; -const ArchivedIcon = () => { - return ( - - - - ); +const _hover: SystemStyleObject = { + bg: 'base.800', }; interface GalleryBoardProps { @@ -42,71 +47,53 @@ interface GalleryBoardProps { const GalleryBoard = ({ board, isSelected, setBoardToDelete }: GalleryBoardProps) => { const dispatch = useAppDispatch(); const { t } = useTranslation(); + const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId); const autoAssignBoardOnClick = useAppSelector((s) => s.gallery.autoAssignBoardOnClick); - const selectIsSelectedForAutoAdd = useMemo( - () => createSelector(selectGallerySlice, (gallery) => board.board_id === gallery.autoAddBoardId), - [board.board_id] - ); - - const isSelectedForAutoAdd = useAppSelector(selectIsSelectedForAutoAdd); - const [isHovered, setIsHovered] = useState(false); - const handleMouseOver = useCallback(() => { - setIsHovered(true); - }, []); - const handleMouseOut = useCallback(() => { - setIsHovered(false); - }, []); - - const { currentData: coverImage } = useGetImageDTOQuery(board.cover_image_name ?? skipToken); - - const { board_name, board_id } = board; - const [localBoardName, setLocalBoardName] = useState(board_name); + const editingDisclosure = useDisclosure(); + const [localBoardName, setLocalBoardName] = useState(board.board_name); const handleSelectBoard = useCallback(() => { - dispatch(boardIdSelected({ boardId: board_id })); + dispatch(boardIdSelected({ boardId: board.board_id })); if (autoAssignBoardOnClick) { - dispatch(autoAddBoardIdChanged(board_id)); + dispatch(autoAddBoardIdChanged(board.board_id)); } - }, [board_id, autoAssignBoardOnClick, dispatch]); + }, [dispatch, board.board_id, autoAssignBoardOnClick]); const [updateBoard, { isLoading: isUpdateBoardLoading }] = useUpdateBoardMutation(); const droppableData: AddToBoardDropData = useMemo( () => ({ - id: board_id, + id: board.board_id, actionType: 'ADD_TO_BOARD', - context: { boardId: board_id }, + context: { boardId: board.board_id }, }), - [board_id] + [board.board_id] ); const handleSubmit = useCallback( async (newBoardName: string) => { - // empty strings are not allowed if (!newBoardName.trim()) { - setLocalBoardName(board_name); - return; - } + // empty strings are not allowed + setLocalBoardName(board.board_name); + } else if (newBoardName === board.board_name) { + // don't updated the board name if it hasn't changed + } else { + try { + const { board_name } = await updateBoard({ + board_id: board.board_id, + changes: { board_name: newBoardName }, + }).unwrap(); - // don't updated the board name if it hasn't changed - if (newBoardName === board_name) { - return; - } - - try { - const { board_name } = await updateBoard({ - board_id, - changes: { board_name: newBoardName }, - }).unwrap(); - - // update local state - setLocalBoardName(board_name); - } catch { - // revert on error - setLocalBoardName(board_name); + // update local state + setLocalBoardName(board_name); + } catch { + // revert on error + setLocalBoardName(board.board_name); + } } + editingDisclosure.onClose(); }, - [board_id, board_name, updateBoard] + [board.board_id, board.board_name, editingDisclosure, updateBoard] ); const handleChange = useCallback((newBoardName: string) => { @@ -114,98 +101,91 @@ const GalleryBoard = ({ board, isSelected, setBoardToDelete }: GalleryBoardProps }, []); return ( - - - - {(ref) => ( - } - openDelay={1000} + + {(ref) => ( + } + openDelay={1000} + > + + + - - {board.archived && } - {coverImage?.thumbnail_url ? ( - - ) : ( - - - - )} - {isSelectedForAutoAdd && } - - - - - - - + + + + {autoAddBoardId === board.board_id && !editingDisclosure.isOpen && } + {board.archived && !editingDisclosure.isOpen && ( + + )} + {!editingDisclosure.isOpen && {board.image_count}} - {t('unifiedCanvas.move')}} /> - - - )} - - - + {t('unifiedCanvas.move')}} /> + + + )} + ); }; export default memo(GalleryBoard); + +const CoverImage = ({ board }: { board: BoardDTO }) => { + const { currentData: coverImage } = useGetImageDTOQuery(board.cover_image_name ?? skipToken); + + if (coverImage) { + return ( + + ); + } + + return ( + + + + ); +}; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx index 2e823ea25b..fb47bf5810 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardsList/NoBoardBoard.tsx @@ -1,23 +1,32 @@ -import { Box, Flex, Image, Text, Tooltip } from '@invoke-ai/ui-library'; +import type { SystemStyleObject } from '@invoke-ai/ui-library'; +import { Flex, Icon, Text, Tooltip } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import IAIDroppable from 'common/components/IAIDroppable'; -import SelectionOverlay from 'common/components/SelectionOverlay'; import type { RemoveFromBoardDropData } from 'features/dnd/types'; -import AutoAddIcon from 'features/gallery/components/Boards/AutoAddIcon'; +import { AutoAddBadge } from 'features/gallery/components/Boards/AutoAddBadge'; import { BoardTotalsTooltip } from 'features/gallery/components/Boards/BoardsList/BoardTotalsTooltip'; import NoBoardBoardContextMenu from 'features/gallery/components/Boards/NoBoardBoardContextMenu'; import { autoAddBoardIdChanged, boardIdSelected } from 'features/gallery/store/gallerySlice'; -import InvokeLogoSVG from 'public/assets/images/invoke-symbol-wht-lrg.svg'; -import { memo, useCallback, useMemo, useState } from 'react'; +import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; +import { useGetBoardImagesTotalQuery } from 'services/api/endpoints/boards'; import { useBoardName } from 'services/api/hooks/useBoardName'; interface Props { isSelected: boolean; } +const _hover: SystemStyleObject = { + bg: 'base.800', +}; + const NoBoardBoard = memo(({ isSelected }: Props) => { const dispatch = useAppDispatch(); + const { imagesTotal } = useGetBoardImagesTotalQuery('none', { + selectFromResult: ({ data }) => { + return { imagesTotal: data?.total ?? 0 }; + }, + }); const autoAddBoardId = useAppSelector((s) => s.gallery.autoAddBoardId); const autoAssignBoardOnClick = useAppSelector((s) => s.gallery.autoAssignBoardOnClick); const boardName = useBoardName('none'); @@ -27,15 +36,6 @@ const NoBoardBoard = memo(({ isSelected }: Props) => { dispatch(autoAddBoardIdChanged('none')); } }, [dispatch, autoAssignBoardOnClick]); - const [isHovered, setIsHovered] = useState(false); - - const handleMouseOver = useCallback(() => { - setIsHovered(true); - }, []); - - const handleMouseOut = useCallback(() => { - setIsHovered(false); - }, []); const droppableData: RemoveFromBoardDropData = useMemo( () => ({ @@ -46,74 +46,49 @@ const NoBoardBoard = memo(({ isSelected }: Props) => { ); const { t } = useTranslation(); return ( - - - - {(ref) => ( - } openDelay={1000}> - - - invoke-ai-logo - - {autoAddBoardId === 'none' && } - - {boardName} - - - {t('unifiedCanvas.move')}} /> - - - )} - - - + + {(ref) => ( + } openDelay={1000}> + + + {/* iconified from public/assets/images/invoke-symbol-wht-lrg.svg */} + + + + + + + {boardName} + + {autoAddBoardId === 'none' && } + {imagesTotal} + {t('unifiedCanvas.move')}} /> + + + )} + ); }); diff --git a/invokeai/frontend/web/src/features/gallery/components/GalleryBoardName.tsx b/invokeai/frontend/web/src/features/gallery/components/GalleryBoardName.tsx index 55aec17ab2..233aa8a8c1 100644 --- a/invokeai/frontend/web/src/features/gallery/components/GalleryBoardName.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/GalleryBoardName.tsx @@ -1,48 +1,17 @@ -import { Button, Flex, Icon, Spacer } from '@invoke-ai/ui-library'; +import { Flex, Text } from '@invoke-ai/ui-library'; import { useAppSelector } from 'app/store/storeHooks'; -import { memo, useMemo } from 'react'; -import { PiCaretUpBold } from 'react-icons/pi'; +import { memo } from 'react'; import { useBoardName } from 'services/api/hooks/useBoardName'; -type Props = { - isOpen: boolean; - onToggle: () => void; -}; - -const GalleryBoardName = (props: Props) => { - const { isOpen, onToggle } = props; +const GalleryBoardName = () => { const selectedBoardId = useAppSelector((s) => s.gallery.selectedBoardId); const boardName = useBoardName(selectedBoardId); - const formattedBoardName = useMemo(() => { - if (boardName.length > 20) { - return `${boardName.substring(0, 20)}...`; - } - return boardName; - }, [boardName]); - return ( - - - {formattedBoardName} - - + + + {boardName} + ); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx index b0b147b510..665d96a006 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGalleryContent.tsx @@ -1,4 +1,4 @@ -import { Box, Button, ButtonGroup, Flex, Tab, TabList, Tabs, useDisclosure } from '@invoke-ai/ui-library'; +import { Button, ButtonGroup, Flex, Tab, TabList, Tabs } from '@invoke-ai/ui-library'; import { useStore } from '@nanostores/react'; import { $galleryHeader } from 'app/store/nanostores/galleryHeader'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; @@ -10,7 +10,6 @@ import { RiServerLine } from 'react-icons/ri'; import BoardsList from './Boards/BoardsList/BoardsList'; import GalleryBoardName from './GalleryBoardName'; -import GallerySettingsPopover from './GallerySettingsPopover/GallerySettingsPopover'; import GalleryImageGrid from './ImageGrid/GalleryImageGrid'; import { GalleryPagination } from './ImageGrid/GalleryPagination'; import { GallerySearch } from './ImageGrid/GallerySearch'; @@ -20,7 +19,6 @@ const ImageGalleryContent = () => { const galleryView = useAppSelector((s) => s.gallery.galleryView); const dispatch = useAppDispatch(); const galleryHeader = useStore($galleryHeader); - const { isOpen: isBoardListOpen, onToggle: onToggleBoardList } = useDisclosure({ defaultIsOpen: true }); const handleClickImages = useCallback(() => { dispatch(galleryViewChanged('images')); @@ -42,15 +40,8 @@ const ImageGalleryContent = () => { gap={2} > {galleryHeader} - - - - - - - - - + + diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index c94115ecfc..487c07dbc2 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -19,7 +19,7 @@ const initialGalleryState: GalleryState = { limit: 20, offset: 0, starredFirst: true, - orderDir: 'ASC', + orderDir: 'DESC', searchTerm: '', isImageViewerOpen: true, imageToCompare: null, @@ -122,6 +122,7 @@ export const gallerySlice = createSlice({ }, searchTermChanged: (state, action: PayloadAction) => { state.searchTerm = action.payload; + state.offset = 0; }, }, }); diff --git a/invokeai/frontend/web/src/features/system/store/configSlice.ts b/invokeai/frontend/web/src/features/system/store/configSlice.ts index 7d26dbd34c..8901365556 100644 --- a/invokeai/frontend/web/src/features/system/store/configSlice.ts +++ b/invokeai/frontend/web/src/features/system/store/configSlice.ts @@ -18,6 +18,7 @@ const initialConfigState: AppConfig = { isLocal: true, shouldUpdateImagesOnConnect: false, shouldFetchMetadataFromApi: false, + allowPrivateBoards: false, disabledTabs: [], disabledFeatures: ['lightbox', 'faceRestore', 'batches'], disabledSDFeatures: ['variation', 'symmetry', 'hires', 'perlinNoise', 'noiseThreshold'], diff --git a/invokeai/frontend/web/src/services/api/endpoints/boards.ts b/invokeai/frontend/web/src/services/api/endpoints/boards.ts index 177aa0e340..55ebeab318 100644 --- a/invokeai/frontend/web/src/services/api/endpoints/boards.ts +++ b/invokeai/frontend/web/src/services/api/endpoints/boards.ts @@ -1,5 +1,11 @@ import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types'; -import type { BoardDTO, ListBoardsArgs, OffsetPaginatedResults_ImageDTO_, UpdateBoardArg } from 'services/api/types'; +import type { + BoardDTO, + CreateBoardArg, + ListBoardsArgs, + OffsetPaginatedResults_ImageDTO_, + UpdateBoardArg, +} from 'services/api/types'; import { getListImagesUrl } from 'services/api/util'; import type { ApiTagDescription } from '..'; @@ -87,11 +93,11 @@ export const boardsApi = api.injectEndpoints({ * Boards Mutations */ - createBoard: build.mutation({ - query: (board_name) => ({ + createBoard: build.mutation({ + query: ({ board_name, is_private }) => ({ url: buildBoardsUrl(), method: 'POST', - params: { board_name }, + params: { board_name, is_private }, }), invalidatesTags: [{ type: 'Board', id: LIST_TAG }], }), diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 3bd322278b..890de69e35 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -1058,6 +1058,11 @@ export type components = { * @description Whether or not the board is archived. */ archived: boolean; + /** + * Is Private + * @description Whether the board is private. + */ + is_private?: boolean | null; /** * Image Count * @description The number of images in the board. @@ -7299,146 +7304,146 @@ export type components = { project_id: string | null; }; InvocationOutputMap: { - range_of_size: components["schemas"]["IntegerCollectionOutput"]; + noise: components["schemas"]["NoiseOutput"]; + pair_tile_image: components["schemas"]["PairTileImageOutput"]; color_correct: components["schemas"]["ImageOutput"]; - image: components["schemas"]["ImageOutput"]; - latents_collection: components["schemas"]["LatentsCollectionOutput"]; - img_blur: components["schemas"]["ImageOutput"]; - lineart_anime_image_processor: components["schemas"]["ImageOutput"]; - sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - boolean: components["schemas"]["BooleanOutput"]; - infill_cv2: components["schemas"]["ImageOutput"]; - i2l: components["schemas"]["LatentsOutput"]; - tile_image_processor: components["schemas"]["ImageOutput"]; - dw_openpose_image_processor: components["schemas"]["ImageOutput"]; + tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; + float_to_int: components["schemas"]["IntegerOutput"]; + rand_int: components["schemas"]["IntegerOutput"]; + latents: components["schemas"]["LatentsOutput"]; canvas_paste_back: components["schemas"]["ImageOutput"]; - heuristic_resize: components["schemas"]["ImageOutput"]; - mediapipe_face_processor: components["schemas"]["ImageOutput"]; - conditioning: components["schemas"]["ConditioningOutput"]; + controlnet: components["schemas"]["ControlOutput"]; + img_blur: components["schemas"]["ImageOutput"]; + freeu: components["schemas"]["UNetOutput"]; + string: components["schemas"]["StringOutput"]; + boolean_collection: components["schemas"]["BooleanCollectionOutput"]; + boolean: components["schemas"]["BooleanOutput"]; + lresize: components["schemas"]["LatentsOutput"]; mask_from_id: components["schemas"]["ImageOutput"]; + string_split: components["schemas"]["String2Output"]; + create_gradient_mask: components["schemas"]["GradientMaskOutput"]; + seamless: components["schemas"]["SeamlessModeOutput"]; + merge_tiles_to_image: components["schemas"]["ImageOutput"]; + canny_image_processor: components["schemas"]["ImageOutput"]; + crop_latents: components["schemas"]["LatentsOutput"]; + mask_edge: components["schemas"]["ImageOutput"]; + img_paste: components["schemas"]["ImageOutput"]; + zoe_depth_image_processor: components["schemas"]["ImageOutput"]; img_nsfw: components["schemas"]["ImageOutput"]; - conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + img_mul: components["schemas"]["ImageOutput"]; + spandrel_image_to_image: components["schemas"]["ImageOutput"]; + tomask: components["schemas"]["ImageOutput"]; + color_map_image_processor: components["schemas"]["ImageOutput"]; + sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + infill_rgba: components["schemas"]["ImageOutput"]; + model_identifier: components["schemas"]["ModelIdentifierOutput"]; + metadata: components["schemas"]["MetadataOutput"]; + img_ilerp: components["schemas"]["ImageOutput"]; + add: components["schemas"]["IntegerOutput"]; + img_channel_multiply: components["schemas"]["ImageOutput"]; + integer: components["schemas"]["IntegerOutput"]; + integer_collection: components["schemas"]["IntegerCollectionOutput"]; + img_crop: components["schemas"]["ImageOutput"]; + show_image: components["schemas"]["ImageOutput"]; + string_replace: components["schemas"]["StringOutput"]; + prompt_from_file: components["schemas"]["StringCollectionOutput"]; + string_join: components["schemas"]["StringOutput"]; + metadata_item: components["schemas"]["MetadataItemOutput"]; + lblend: components["schemas"]["LatentsOutput"]; + t2i_adapter: components["schemas"]["T2IAdapterOutput"]; + infill_cv2: components["schemas"]["ImageOutput"]; + sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; + core_metadata: components["schemas"]["MetadataOutput"]; invert_tensor_mask: components["schemas"]["MaskOutput"]; + integer_math: components["schemas"]["IntegerOutput"]; + content_shuffle_image_processor: components["schemas"]["ImageOutput"]; + dynamic_prompt: components["schemas"]["StringCollectionOutput"]; + lineart_anime_image_processor: components["schemas"]["ImageOutput"]; + string_split_neg: components["schemas"]["StringPosNegOutput"]; + round_float: components["schemas"]["FloatOutput"]; + rand_float: components["schemas"]["FloatOutput"]; + lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + midas_depth_image_processor: components["schemas"]["ImageOutput"]; + random_range: components["schemas"]["IntegerCollectionOutput"]; sub: components["schemas"]["IntegerOutput"]; infill_lama: components["schemas"]["ImageOutput"]; - calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; - leres_image_processor: components["schemas"]["ImageOutput"]; - img_scale: components["schemas"]["ImageOutput"]; - mask_edge: components["schemas"]["ImageOutput"]; - esrgan: components["schemas"]["ImageOutput"]; - sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; - pair_tile_image: components["schemas"]["PairTileImageOutput"]; - string: components["schemas"]["StringOutput"]; - calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; - t2i_adapter: components["schemas"]["T2IAdapterOutput"]; - img_paste: components["schemas"]["ImageOutput"]; - midas_depth_image_processor: components["schemas"]["ImageOutput"]; - pidi_image_processor: components["schemas"]["ImageOutput"]; - sdxl_refiner_compel_prompt: components["schemas"]["ConditioningOutput"]; - tile_to_properties: components["schemas"]["TileToPropertiesOutput"]; - clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; - blank_image: components["schemas"]["ImageOutput"]; - img_channel_multiply: components["schemas"]["ImageOutput"]; - canny_image_processor: components["schemas"]["ImageOutput"]; - l2i: components["schemas"]["ImageOutput"]; - img_mul: components["schemas"]["ImageOutput"]; - vae_loader: components["schemas"]["VAEOutput"]; - string_collection: components["schemas"]["StringCollectionOutput"]; - tomask: components["schemas"]["ImageOutput"]; - infill_patchmatch: components["schemas"]["ImageOutput"]; - compel: components["schemas"]["ConditioningOutput"]; - calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; - tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; - string_join_three: components["schemas"]["StringOutput"]; - rectangle_mask: components["schemas"]["MaskOutput"]; - crop_latents: components["schemas"]["LatentsOutput"]; - mul: components["schemas"]["IntegerOutput"]; - merge_tiles_to_image: components["schemas"]["ImageOutput"]; - integer_math: components["schemas"]["IntegerOutput"]; - iterate: components["schemas"]["IterateInvocationOutput"]; - range: components["schemas"]["IntegerCollectionOutput"]; - collect: components["schemas"]["CollectInvocationOutput"]; - img_ilerp: components["schemas"]["ImageOutput"]; - rand_float: components["schemas"]["FloatOutput"]; - latents: components["schemas"]["LatentsOutput"]; - face_identifier: components["schemas"]["ImageOutput"]; - depth_anything_image_processor: components["schemas"]["ImageOutput"]; - round_float: components["schemas"]["FloatOutput"]; - create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; - core_metadata: components["schemas"]["MetadataOutput"]; float_range: components["schemas"]["FloatCollectionOutput"]; - face_mask_detection: components["schemas"]["FaceMaskOutput"]; - ip_adapter: components["schemas"]["IPAdapterOutput"]; - lscale: components["schemas"]["LatentsOutput"]; - lineart_image_processor: components["schemas"]["ImageOutput"]; - integer_collection: components["schemas"]["IntegerCollectionOutput"]; - cv_inpaint: components["schemas"]["ImageOutput"]; - mlsd_image_processor: components["schemas"]["ImageOutput"]; - ideal_size: components["schemas"]["IdealSizeOutput"]; - segment_anything_processor: components["schemas"]["ImageOutput"]; - img_watermark: components["schemas"]["ImageOutput"]; - hed_image_processor: components["schemas"]["ImageOutput"]; - normalbae_image_processor: components["schemas"]["ImageOutput"]; - infill_tile: components["schemas"]["ImageOutput"]; - sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; - denoise_latents: components["schemas"]["LatentsOutput"]; - unsharp_mask: components["schemas"]["ImageOutput"]; - float_collection: components["schemas"]["FloatCollectionOutput"]; - show_image: components["schemas"]["ImageOutput"]; - img_conv: components["schemas"]["ImageOutput"]; - model_identifier: components["schemas"]["ModelIdentifierOutput"]; - step_param_easing: components["schemas"]["FloatCollectionOutput"]; - float_math: components["schemas"]["FloatOutput"]; - color_map_image_processor: components["schemas"]["ImageOutput"]; - spandrel_image_to_image: components["schemas"]["ImageOutput"]; - img_crop: components["schemas"]["ImageOutput"]; - lblend: components["schemas"]["LatentsOutput"]; - random_range: components["schemas"]["IntegerCollectionOutput"]; - float: components["schemas"]["FloatOutput"]; - merge_metadata: components["schemas"]["MetadataOutput"]; - alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; - image_collection: components["schemas"]["ImageCollectionOutput"]; - image_mask_to_tensor: components["schemas"]["MaskOutput"]; - color: components["schemas"]["ColorOutput"]; - img_hue_adjust: components["schemas"]["ImageOutput"]; - string_split: components["schemas"]["String2Output"]; - prompt_from_file: components["schemas"]["StringCollectionOutput"]; - metadata: components["schemas"]["MetadataOutput"]; - freeu: components["schemas"]["UNetOutput"]; - create_gradient_mask: components["schemas"]["GradientMaskOutput"]; - img_chan: components["schemas"]["ImageOutput"]; - div: components["schemas"]["IntegerOutput"]; save_image: components["schemas"]["ImageOutput"]; - img_pad_crop: components["schemas"]["ImageOutput"]; - lora_collection_loader: components["schemas"]["LoRALoaderOutput"]; + iterate: components["schemas"]["IterateInvocationOutput"]; + hed_image_processor: components["schemas"]["ImageOutput"]; + dw_openpose_image_processor: components["schemas"]["ImageOutput"]; scheduler: components["schemas"]["SchedulerOutput"]; - face_off: components["schemas"]["FaceOffOutput"]; - img_lerp: components["schemas"]["ImageOutput"]; - zoe_depth_image_processor: components["schemas"]["ImageOutput"]; - add: components["schemas"]["IntegerOutput"]; - string_split_neg: components["schemas"]["StringPosNegOutput"]; - img_channel_offset: components["schemas"]["ImageOutput"]; - main_model_loader: components["schemas"]["ModelLoaderOutput"]; - rand_int: components["schemas"]["IntegerOutput"]; - float_to_int: components["schemas"]["IntegerOutput"]; - controlnet: components["schemas"]["ControlOutput"]; - metadata_item: components["schemas"]["MetadataItemOutput"]; - integer: components["schemas"]["IntegerOutput"]; - dynamic_prompt: components["schemas"]["StringCollectionOutput"]; - sdxl_refiner_model_loader: components["schemas"]["SDXLRefinerModelLoaderOutput"]; + string_collection: components["schemas"]["StringCollectionOutput"]; + lineart_image_processor: components["schemas"]["ImageOutput"]; + image: components["schemas"]["ImageOutput"]; + merge_metadata: components["schemas"]["MetadataOutput"]; + image_collection: components["schemas"]["ImageCollectionOutput"]; + img_watermark: components["schemas"]["ImageOutput"]; + pidi_image_processor: components["schemas"]["ImageOutput"]; + sdxl_lora_collection_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + collect: components["schemas"]["CollectInvocationOutput"]; lora_selector: components["schemas"]["LoRASelectorOutput"]; - lresize: components["schemas"]["LatentsOutput"]; - noise: components["schemas"]["NoiseOutput"]; + tile_image_processor: components["schemas"]["ImageOutput"]; + denoise_latents: components["schemas"]["LatentsOutput"]; + sdxl_lora_loader: components["schemas"]["SDXLLoRALoaderOutput"]; + img_conv: components["schemas"]["ImageOutput"]; + face_mask_detection: components["schemas"]["FaceMaskOutput"]; + infill_patchmatch: components["schemas"]["ImageOutput"]; + rectangle_mask: components["schemas"]["MaskOutput"]; + img_lerp: components["schemas"]["ImageOutput"]; + tiled_multi_diffusion_denoise_latents: components["schemas"]["LatentsOutput"]; + face_identifier: components["schemas"]["ImageOutput"]; + step_param_easing: components["schemas"]["FloatCollectionOutput"]; + unsharp_mask: components["schemas"]["ImageOutput"]; + mediapipe_face_processor: components["schemas"]["ImageOutput"]; + calculate_image_tiles: components["schemas"]["CalculateImageTilesOutput"]; + lscale: components["schemas"]["LatentsOutput"]; + color: components["schemas"]["ColorOutput"]; lora_loader: components["schemas"]["LoRALoaderOutput"]; - seamless: components["schemas"]["SeamlessModeOutput"]; - mask_combine: components["schemas"]["ImageOutput"]; - string_join: components["schemas"]["StringOutput"]; - string_replace: components["schemas"]["StringOutput"]; sdxl_compel_prompt: components["schemas"]["ConditioningOutput"]; + calculate_image_tiles_even_split: components["schemas"]["CalculateImageTilesOutput"]; + conditioning: components["schemas"]["ConditioningOutput"]; + float_collection: components["schemas"]["FloatCollectionOutput"]; + img_pad_crop: components["schemas"]["ImageOutput"]; + mul: components["schemas"]["IntegerOutput"]; + heuristic_resize: components["schemas"]["ImageOutput"]; + create_denoise_mask: components["schemas"]["DenoiseMaskOutput"]; + img_chan: components["schemas"]["ImageOutput"]; + leres_image_processor: components["schemas"]["ImageOutput"]; + infill_tile: components["schemas"]["ImageOutput"]; + i2l: components["schemas"]["LatentsOutput"]; + string_join_three: components["schemas"]["StringOutput"]; + ip_adapter: components["schemas"]["IPAdapterOutput"]; + main_model_loader: components["schemas"]["ModelLoaderOutput"]; + float: components["schemas"]["FloatOutput"]; + compel: components["schemas"]["ConditioningOutput"]; + range_of_size: components["schemas"]["IntegerCollectionOutput"]; + normalbae_image_processor: components["schemas"]["ImageOutput"]; + ideal_size: components["schemas"]["IdealSizeOutput"]; + conditioning_collection: components["schemas"]["ConditioningCollectionOutput"]; + depth_anything_image_processor: components["schemas"]["ImageOutput"]; + mask_combine: components["schemas"]["ImageOutput"]; + l2i: components["schemas"]["ImageOutput"]; + latents_collection: components["schemas"]["LatentsCollectionOutput"]; + float_math: components["schemas"]["FloatOutput"]; + img_hue_adjust: components["schemas"]["ImageOutput"]; + img_scale: components["schemas"]["ImageOutput"]; + esrgan: components["schemas"]["ImageOutput"]; + vae_loader: components["schemas"]["VAEOutput"]; + sdxl_model_loader: components["schemas"]["SDXLModelLoaderOutput"]; + clip_skip: components["schemas"]["CLIPSkipInvocationOutput"]; + segment_anything_processor: components["schemas"]["ImageOutput"]; img_resize: components["schemas"]["ImageOutput"]; - boolean_collection: components["schemas"]["BooleanCollectionOutput"]; - infill_rgba: components["schemas"]["ImageOutput"]; - content_shuffle_image_processor: components["schemas"]["ImageOutput"]; + range: components["schemas"]["IntegerCollectionOutput"]; + calculate_image_tiles_min_overlap: components["schemas"]["CalculateImageTilesOutput"]; + mlsd_image_processor: components["schemas"]["ImageOutput"]; + img_channel_offset: components["schemas"]["ImageOutput"]; + cv_inpaint: components["schemas"]["ImageOutput"]; + image_mask_to_tensor: components["schemas"]["MaskOutput"]; + blank_image: components["schemas"]["ImageOutput"]; + div: components["schemas"]["IntegerOutput"]; + alpha_mask_to_tensor: components["schemas"]["MaskOutput"]; + face_off: components["schemas"]["FaceOffOutput"]; }; /** * InvocationStartedEvent @@ -15142,6 +15147,8 @@ export type operations = { query: { /** @description The name of the board to create */ board_name: string; + /** @description Whether the board is private */ + is_private?: boolean; }; }; responses: { diff --git a/invokeai/frontend/web/src/services/api/types.ts b/invokeai/frontend/web/src/services/api/types.ts index fdfa62342d..b8ffa46c82 100644 --- a/invokeai/frontend/web/src/services/api/types.ts +++ b/invokeai/frontend/web/src/services/api/types.ts @@ -11,6 +11,8 @@ export type ListBoardsArgs = NonNullable