diff --git a/.github/actions/install-frontend-deps/action.yml b/.github/actions/install-frontend-deps/action.yml new file mode 100644 index 0000000000..b9d910ca99 --- /dev/null +++ b/.github/actions/install-frontend-deps/action.yml @@ -0,0 +1,33 @@ +name: Install frontend dependencies +description: Installs frontend dependencies with pnpm, with caching +runs: + using: 'composite' + steps: + - name: Setup Node 18 + uses: actions/setup-node@v4 + with: + node-version: '18' + + - name: Setup pnpm + uses: pnpm/action-setup@v2 + with: + version: 8 + run_install: false + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install frontend dependencies + run: pnpm install --prefer-frozen-lockfile + shell: bash + working-directory: invokeai/frontend/web diff --git a/.github/actions/install-python-deps/action.yml b/.github/actions/install-python-deps/action.yml new file mode 100644 index 0000000000..4c0d351899 --- /dev/null +++ b/.github/actions/install-python-deps/action.yml @@ -0,0 +1,11 @@ +name: Install python dependencies +description: Install python dependencies with pip, with caching +runs: + using: 'composite' + steps: + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + cache: pip + cache-dependency-path: pyproject.toml diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 74fcc02ab3..a8bfaa540c 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -11,7 +11,7 @@ on: - 'docker/docker-entrypoint.sh' - 'workflows/build-container.yml' tags: - - 'v*' + - 'v*.*.*' workflow_dispatch: permissions: diff --git a/.github/workflows/check-frontend.yml b/.github/workflows/check-frontend.yml new file mode 100644 index 0000000000..8134926556 --- /dev/null +++ b/.github/workflows/check-frontend.yml @@ -0,0 +1,43 @@ +# This workflow runs the frontend code quality checks. +# +# It may be triggered via dispatch, or by another workflow. + +name: 'Check: frontend' + +on: + workflow_dispatch: + workflow_call: + +defaults: + run: + working-directory: invokeai/frontend/web + +jobs: + check-frontend: + runs-on: ubuntu-latest + timeout-minutes: 10 # expected run time: <2 min + steps: + - uses: actions/checkout@v4 + + - name: Set up frontend + uses: ./.github/actions/install-frontend-deps + + - name: Run tsc check + run: 'pnpm run lint:tsc' + shell: bash + + - name: Run dpdm check + run: 'pnpm run lint:dpdm' + shell: bash + + - name: Run eslint check + run: 'pnpm run lint:eslint' + shell: bash + + - name: Run prettier check + run: 'pnpm run lint:prettier' + shell: bash + + - name: Run knip check + run: 'pnpm run lint:knip' + shell: bash diff --git a/.github/workflows/check-pytest.yml b/.github/workflows/check-pytest.yml new file mode 100644 index 0000000000..aedc0e59c3 --- /dev/null +++ b/.github/workflows/check-pytest.yml @@ -0,0 +1,72 @@ +# This workflow runs pytest on the codebase in a matrix of platforms. +# +# It may be triggered via dispatch, or by another workflow. + +name: 'Check: pytest' + +on: + workflow_dispatch: + workflow_call: + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + matrix: + strategy: + matrix: + python-version: + - '3.10' + pytorch: + - linux-cuda-11_7 + - linux-rocm-5_2 + - linux-cpu + - macos-default + - windows-cpu + include: + - pytorch: linux-cuda-11_7 + os: ubuntu-22.04 + github-env: $GITHUB_ENV + - pytorch: linux-rocm-5_2 + os: ubuntu-22.04 + extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' + github-env: $GITHUB_ENV + - pytorch: linux-cpu + os: ubuntu-22.04 + extra-index-url: 'https://download.pytorch.org/whl/cpu' + github-env: $GITHUB_ENV + - pytorch: macos-default + os: macOS-12 + github-env: $GITHUB_ENV + - pytorch: windows-cpu + os: windows-2022 + github-env: $env:GITHUB_ENV + name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} + runs-on: ${{ matrix.os }} + timeout-minutes: 30 # expected run time: <10 min, depending on platform + env: + PIP_USE_PEP517: '1' + steps: + - uses: actions/checkout@v4 + + - name: set test prompt to main branch validation + run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} + + - name: setup python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + cache-dependency-path: pyproject.toml + + - name: install invokeai + env: + PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }} + run: > + pip3 install + --editable=".[test]" + + - name: run pytest + id: run-pytest + run: pytest diff --git a/.github/workflows/check-python.yml b/.github/workflows/check-python.yml new file mode 100644 index 0000000000..63a6c46b0a --- /dev/null +++ b/.github/workflows/check-python.yml @@ -0,0 +1,33 @@ +# This workflow runs the python code quality checks. +# +# It may be triggered via dispatch, or by another workflow. +# +# TODO: Add mypy or pyright to the checks. + +name: 'Check: python' + +on: + workflow_dispatch: + workflow_call: + +jobs: + check-backend: + runs-on: ubuntu-latest + timeout-minutes: 5 # expected run time: <1 min + steps: + - uses: actions/checkout@v4 + + - name: Install python dependencies + uses: ./.github/actions/install-python-deps + + - name: Install ruff + run: pip install ruff + shell: bash + + - name: Ruff check + run: ruff check --output-format=github . + shell: bash + + - name: Ruff format + run: ruff format --check . + shell: bash diff --git a/.github/workflows/lint-frontend.yml b/.github/workflows/lint-frontend.yml deleted file mode 100644 index a4e1bba428..0000000000 --- a/.github/workflows/lint-frontend.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Lint frontend - -on: - pull_request: - types: - - 'ready_for_review' - - 'opened' - - 'synchronize' - push: - branches: - - 'main' - merge_group: - workflow_dispatch: - -defaults: - run: - working-directory: invokeai/frontend/web - -jobs: - lint-frontend: - if: github.event.pull_request.draft == false - runs-on: ubuntu-22.04 - steps: - - name: Setup Node 18 - uses: actions/setup-node@v4 - with: - node-version: '18' - - name: Checkout - uses: actions/checkout@v4 - - name: Setup pnpm - uses: pnpm/action-setup@v2 - with: - version: '8.12.1' - - name: Install dependencies - run: 'pnpm install --prefer-frozen-lockfile' - - name: Typescript - run: 'pnpm run lint:tsc' - - name: Madge - run: 'pnpm run lint:madge' - - name: ESLint - run: 'pnpm run lint:eslint' - - name: Prettier - run: 'pnpm run lint:prettier' diff --git a/.github/workflows/mkdocs-material.yml b/.github/workflows/mkdocs-material.yml index af634c4906..cbcfbf0835 100644 --- a/.github/workflows/mkdocs-material.yml +++ b/.github/workflows/mkdocs-material.yml @@ -1,51 +1,38 @@ -name: mkdocs-material +# This is a mostly a copy-paste from https://github.com/squidfunk/mkdocs-material/blob/master/docs/publishing-your-site.md + +name: mkdocs + on: push: branches: - - 'refs/heads/main' + - main + workflow_dispatch: permissions: - contents: write + contents: write jobs: - mkdocs-material: + deploy: if: github.event.pull_request.draft == false runs-on: ubuntu-latest env: REPO_URL: '${{ github.server_url }}/${{ github.repository }}' REPO_NAME: '${{ github.repository }}' SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI' - steps: - - name: checkout sources - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - name: setup python - uses: actions/setup-python@v4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: '3.10' cache: pip cache-dependency-path: pyproject.toml - - - name: install requirements - env: - PIP_USE_PEP517: 1 - run: | - python -m \ - pip install ".[docs]" - - - name: confirm buildability - run: | - python -m \ - mkdocs build \ - --clean \ - --verbose - - - name: deploy to gh-pages - if: ${{ github.ref == 'refs/heads/main' }} - run: | - python -m \ - mkdocs gh-deploy \ - --clean \ - --force + - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV + - uses: actions/cache@v4 + with: + key: mkdocs-material-${{ env.cache_id }} + path: .cache + restore-keys: | + mkdocs-material- + - run: python -m pip install ".[docs]" + - run: mkdocs gh-deploy --force diff --git a/.github/workflows/on-change-check-frontend.yml b/.github/workflows/on-change-check-frontend.yml new file mode 100644 index 0000000000..5e8704ad71 --- /dev/null +++ b/.github/workflows/on-change-check-frontend.yml @@ -0,0 +1,39 @@ +# This workflow runs of `check-frontend.yml` on push or pull request. +# +# The actual checks are in a separate workflow to support simpler workflow +# composition without awkward or complicated conditionals. + +name: 'On change: run check-frontend' + +on: + push: + branches: + - 'main' + pull_request: + types: + - 'ready_for_review' + - 'opened' + - 'synchronize' + merge_group: + +jobs: + check-changed-frontend-files: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + outputs: + frontend_any_changed: ${{ steps.changed-files.outputs.frontend_any_changed }} + steps: + - uses: actions/checkout@v4 + + - name: Check for changed frontend files + id: changed-files + uses: tj-actions/changed-files@v41 + with: + files_yaml: | + frontend: + - 'invokeai/frontend/web/**' + + run-check-frontend: + needs: check-changed-frontend-files + if: ${{ needs.check-changed-frontend-files.outputs.frontend_any_changed == 'true' }} + uses: ./.github/workflows/check-frontend.yml diff --git a/.github/workflows/on-change-check-python.yml b/.github/workflows/on-change-check-python.yml new file mode 100644 index 0000000000..e73198b3fa --- /dev/null +++ b/.github/workflows/on-change-check-python.yml @@ -0,0 +1,42 @@ +# This workflow runs of `check-python.yml` on push or pull request. +# +# The actual checks are in a separate workflow to support simpler workflow +# composition without awkward or complicated conditionals. + +name: 'On change: run check-python' + +on: + push: + branches: + - 'main' + pull_request: + types: + - 'ready_for_review' + - 'opened' + - 'synchronize' + merge_group: + +jobs: + check-changed-python-files: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + outputs: + python_any_changed: ${{ steps.changed-files.outputs.python_any_changed }} + steps: + - uses: actions/checkout@v4 + + - name: Check for changed python files + id: changed-files + uses: tj-actions/changed-files@v41 + with: + files_yaml: | + python: + - 'pyproject.toml' + - 'invokeai/**' + - '!invokeai/frontend/web/**' + - 'tests/**' + + run-check-python: + needs: check-changed-python-files + if: ${{ needs.check-changed-python-files.outputs.python_any_changed == 'true' }} + uses: ./.github/workflows/check-python.yml diff --git a/.github/workflows/on-change-pytest.yml b/.github/workflows/on-change-pytest.yml new file mode 100644 index 0000000000..0c174098bb --- /dev/null +++ b/.github/workflows/on-change-pytest.yml @@ -0,0 +1,42 @@ +# This workflow runs of `check-pytest.yml` on push or pull request. +# +# The actual checks are in a separate workflow to support simpler workflow +# composition without awkward or complicated conditionals. + +name: 'On change: run pytest' + +on: + push: + branches: + - 'main' + pull_request: + types: + - 'ready_for_review' + - 'opened' + - 'synchronize' + merge_group: + +jobs: + check-changed-python-files: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + outputs: + python_any_changed: ${{ steps.changed-files.outputs.python_any_changed }} + steps: + - uses: actions/checkout@v4 + + - name: Check for changed python files + id: changed-files + uses: tj-actions/changed-files@v41 + with: + files_yaml: | + python: + - 'pyproject.toml' + - 'invokeai/**' + - '!invokeai/frontend/web/**' + - 'tests/**' + + run-pytest: + needs: check-changed-python-files + if: ${{ needs.check-changed-python-files.outputs.python_any_changed == 'true' }} + uses: ./.github/workflows/check-pytest.yml diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml deleted file mode 100644 index 162cbe3427..0000000000 --- a/.github/workflows/pypi-release.yml +++ /dev/null @@ -1,67 +0,0 @@ -name: PyPI Release - -on: - workflow_dispatch: - inputs: - publish_package: - description: 'Publish build on PyPi? [true/false]' - required: true - default: 'false' - -jobs: - build-and-release: - if: github.repository == 'invoke-ai/InvokeAI' - runs-on: ubuntu-22.04 - env: - TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} - TWINE_NON_INTERACTIVE: 1 - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Node 18 - uses: actions/setup-node@v4 - with: - node-version: '18' - - - name: Setup pnpm - uses: pnpm/action-setup@v2 - with: - version: '8.12.1' - - - name: Install frontend dependencies - run: pnpm install --prefer-frozen-lockfile - working-directory: invokeai/frontend/web - - - name: Build frontend - run: pnpm run build - working-directory: invokeai/frontend/web - - - name: Install python dependencies - run: pip install --upgrade build twine - - - name: Build python package - run: python3 -m build - - - name: Upload build as workflow artifact - uses: actions/upload-artifact@v4 - with: - name: dist - path: dist - - - name: Check distribution - run: twine check dist/* - - - name: Check PyPI versions - if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') - run: | - pip install --upgrade requests - python -c "\ - import scripts.pypi_helper; \ - EXISTS=scripts.pypi_helper.local_on_pypi(); \ - print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV - - - name: Publish build on PyPi - if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != '' && github.event.inputs.publish_package == 'true' - run: twine upload dist/* diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..0f9ca098d5 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,103 @@ +name: Release + +on: + push: + tags: + - 'v*' + workflow_dispatch: + inputs: + skip_code_checks: + description: 'Skip code checks' + required: true + default: true + type: boolean + +jobs: + check-version: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: samuelcolvin/check-python-version@v4 + id: check-python-version + with: + version_file_path: invokeai/version/invokeai_version.py + + check-frontend: + if: github.event.inputs.skip_code_checks != 'true' + uses: ./.github/workflows/check-frontend.yml + + check-python: + if: github.event.inputs.skip_code_checks != 'true' + uses: ./.github/workflows/check-python.yml + + check-pytest: + if: github.event.inputs.skip_code_checks != 'true' + uses: ./.github/workflows/check-pytest.yml + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install python dependencies + uses: ./.github/actions/install-python-deps + + - name: Install pypa/build + run: pip install --upgrade build + + - name: Setup frontend + uses: ./.github/actions/install-frontend-deps + + - name: Run create_installer.sh + id: create_installer + run: ./create_installer.sh --skip_frontend_checks + working-directory: installer + + - name: Upload python distribution artifact + uses: actions/upload-artifact@v4 + with: + name: dist + path: ${{ steps.create_installer.outputs.DIST_PATH }} + + - name: Upload installer artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ steps.create_installer.outputs.INSTALLER_FILENAME }} + path: ${{ steps.create_installer.outputs.INSTALLER_PATH }} + + publish-testpypi: + runs-on: ubuntu-latest + needs: [check-version, check-frontend, check-python, check-pytest, build] + if: github.event_name != 'workflow_dispatch' + environment: + name: testpypi + url: https://test.pypi.org/p/invokeai + steps: + - name: Download distribution from build job + uses: actions/download-artifact@v4 + with: + name: dist + path: dist/ + + - name: Publish distribution to TestPyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + + publish-pypi: + runs-on: ubuntu-latest + needs: [check-version, check-frontend, check-python, check-pytest, build] + if: github.event_name != 'workflow_dispatch' + environment: + name: pypi + url: https://pypi.org/p/invokeai + steps: + - name: Download distribution from build job + uses: actions/download-artifact@v4 + with: + name: dist + path: dist/ + + - name: Publish distribution to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/style-checks.yml b/.github/workflows/style-checks.yml deleted file mode 100644 index 7e99702213..0000000000 --- a/.github/workflows/style-checks.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: style checks - -on: - pull_request: - push: - branches: main - -jobs: - ruff: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - - name: Install dependencies with pip - run: | - pip install ruff - - - run: ruff check --output-format=github . - - run: ruff format --check . diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml deleted file mode 100644 index bbbc66828c..0000000000 --- a/.github/workflows/test-invoke-pip.yml +++ /dev/null @@ -1,129 +0,0 @@ -name: Test invoke.py pip -on: - push: - branches: - - 'main' - pull_request: - types: - - 'ready_for_review' - - 'opened' - - 'synchronize' - merge_group: - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - matrix: - if: github.event.pull_request.draft == false - strategy: - matrix: - python-version: - # - '3.9' - - '3.10' - pytorch: - - linux-cuda-11_7 - - linux-rocm-5_2 - - linux-cpu - - macos-default - - windows-cpu - include: - - pytorch: linux-cuda-11_7 - os: ubuntu-22.04 - github-env: $GITHUB_ENV - - pytorch: linux-rocm-5_2 - os: ubuntu-22.04 - extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' - github-env: $GITHUB_ENV - - pytorch: linux-cpu - os: ubuntu-22.04 - extra-index-url: 'https://download.pytorch.org/whl/cpu' - github-env: $GITHUB_ENV - - pytorch: macos-default - os: macOS-12 - github-env: $GITHUB_ENV - - pytorch: windows-cpu - os: windows-2022 - github-env: $env:GITHUB_ENV - name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} - runs-on: ${{ matrix.os }} - env: - PIP_USE_PEP517: '1' - steps: - - name: Checkout sources - id: checkout-sources - uses: actions/checkout@v3 - - - name: Check for changed python files - id: changed-files - uses: tj-actions/changed-files@v41 - with: - files_yaml: | - python: - - 'pyproject.toml' - - 'invokeai/**' - - '!invokeai/frontend/web/**' - - 'tests/**' - - - name: set test prompt to main branch validation - if: steps.changed-files.outputs.python_any_changed == 'true' - run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} - - - name: setup python - if: steps.changed-files.outputs.python_any_changed == 'true' - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: pip - cache-dependency-path: pyproject.toml - - - name: install invokeai - if: steps.changed-files.outputs.python_any_changed == 'true' - env: - PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }} - run: > - pip3 install - --editable=".[test]" - - - name: run pytest - if: steps.changed-files.outputs.python_any_changed == 'true' - id: run-pytest - run: pytest - - # - name: run invokeai-configure - # env: - # HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }} - # run: > - # invokeai-configure - # --yes - # --default_only - # --full-precision - # # can't use fp16 weights without a GPU - - # - name: run invokeai - # id: run-invokeai - # env: - # # Set offline mode to make sure configure preloaded successfully. - # HF_HUB_OFFLINE: 1 - # HF_DATASETS_OFFLINE: 1 - # TRANSFORMERS_OFFLINE: 1 - # INVOKEAI_OUTDIR: ${{ github.workspace }}/results - # run: > - # invokeai - # --no-patchmatch - # --no-nsfw_checker - # --precision=float32 - # --always_use_cpu - # --use_memory_db - # --outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }} - # --from_file ${{ env.TEST_PROMPTS }} - - # - name: Archive results - # env: - # INVOKEAI_OUTDIR: ${{ github.workspace }}/results - # uses: actions/upload-artifact@v3 - # with: - # name: results - # path: ${{ env.INVOKEAI_OUTDIR }} diff --git a/.prettierrc.yaml b/.prettierrc.yaml index ce4b99a07b..3d2ce3b880 100644 --- a/.prettierrc.yaml +++ b/.prettierrc.yaml @@ -7,7 +7,7 @@ embeddedLanguageFormatting: auto overrides: - files: '*.md' options: - proseWrap: always + proseWrap: preserve printWidth: 80 parser: markdown cursorOffset: -1 diff --git a/Makefile b/Makefile index 10d7a257c5..c3eec094f7 100644 --- a/Makefile +++ b/Makefile @@ -6,33 +6,44 @@ default: help help: @echo Developer commands: @echo - @echo "ruff Run ruff, fixing any safely-fixable errors and formatting" - @echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting" - @echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors" - @echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports" - @echo "frontend-build Build the frontend in order to run on localhost:9090" - @echo "frontend-dev Run the frontend in developer mode on localhost:5173" - @echo "installer-zip Build the installer .zip file for the current version" - @echo "tag-release Tag the GitHub repository with the current version (use at release time only!)" + @echo "ruff Run ruff, fixing any safely-fixable errors and formatting" + @echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting" + @echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors" + @echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports" + @echo "test" Run the unit tests. + @echo "frontend-install" Install the pnpm modules needed for the front end + @echo "frontend-build Build the frontend in order to run on localhost:9090" + @echo "frontend-dev Run the frontend in developer mode on localhost:5173" + @echo "installer-zip Build the installer .zip file for the current version" + @echo "tag-release Tag the GitHub repository with the current version (use at release time only!)" # Runs ruff, fixing any safely-fixable errors and formatting ruff: - ruff check . --fix - ruff format . + ruff check . --fix + ruff format . # Runs ruff, fixing all errors it can fix and formatting ruff-unsafe: - ruff check . --fix --unsafe-fixes - ruff format . + ruff check . --fix --unsafe-fixes + ruff format . # Runs mypy, using the config in pyproject.toml mypy: - mypy scripts/invokeai-web.py + mypy scripts/invokeai-web.py # Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports # (many files are ignored by the config, so this is useful for checking all files) mypy-all: - mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports + mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports + +# Run the unit tests +test: + pytest ./tests + +# Install the pnpm modules needed for the front end +frontend-install: + rm -rf invokeai/frontend/web/node_modules + cd invokeai/frontend/web && pnpm install # Build the frontend frontend-build: diff --git a/docker/Dockerfile b/docker/Dockerfile index c89a5773f7..2de4d0ffce 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai ENV VIRTUAL_ENV=/opt/venv/invokeai ENV PATH="$VIRTUAL_ENV/bin:$PATH" -ARG TORCH_VERSION=2.1.0 -ARG TORCHVISION_VERSION=0.16 +ARG TORCH_VERSION=2.1.2 +ARG TORCHVISION_VERSION=0.16.2 ARG GPU_DRIVER=cuda ARG TARGETPLATFORM="linux/amd64" # unused but available @@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ elif [ "$GPU_DRIVER" = "rocm" ]; then \ - extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \ else \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \ fi &&\ @@ -54,7 +54,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \ pip install -e ".[xformers]"; \ else \ - pip install -e "."; \ + pip install $extra_index_url_arg -e "."; \ fi # #### Build the Web UI ------------------------------------ diff --git a/docker/run.sh b/docker/run.sh index 409df508dd..d413e53453 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -21,7 +21,7 @@ run() { printf "%s\n" "$build_args" fi - docker compose build $build_args + docker compose build $build_args $service_name unset build_args printf "%s\n" "starting service $service_name" diff --git a/docs/RELEASE.md b/docs/RELEASE.md new file mode 100644 index 0000000000..3a0375b027 --- /dev/null +++ b/docs/RELEASE.md @@ -0,0 +1,139 @@ +# Release Process + +The app is published in twice, in different build formats. + +- A [PyPI] distribution. This includes both a source distribution and built distribution (a wheel). Users install with `pip install invokeai`. The updater uses this build. +- An installer on the [InvokeAI Releases Page]. This is a zip file with install scripts and a wheel. This is only used for new installs. + +## General Prep + +Make a developer call-out for PRs to merge. Merge and test things out. + +While the release workflow does not include end-to-end tests, it does pause before publishing so you can download and test the final build. + +## Release Workflow + +The `release.yml` workflow runs a number of jobs to handle code checks, tests, build and publish on PyPI. + +It is triggered on **tag push**, when the tag matches `v*`. It doesn't matter if you've prepped a release branch like `release/v3.5.0` or are releasing from `main` - it works the same. + +> Because commits are reference-counted, it is safe to create a release branch, tag it, let the workflow run, then delete the branch. So long as the tag exists, that commit will exist. + +### Triggering the Workflow + +Run `make tag-release` to tag the current commit and kick off the workflow. + +The release may also be run [manually]. + +### Workflow Jobs and Process + +The workflow consists of a number of concurrently-run jobs, and two final publish jobs. + +The publish jobs run if the 5 concurrent jobs all succeed and if/when the publish jobs are approved. + +#### `check-version` Job + +This job checks that the git ref matches the app version. It matches the ref against the `__version__` variable in `invokeai/version/invokeai_version.py`. + +When the workflow is triggered by tag push, the ref is the tag. If the workflow is run manually, the ref is the target selected from the **Use workflow from** dropdown. + +This job uses [samuelcolvin/check-python-version]. + +> Any valid [version specifier] works, so long as the tag matches the version. The release workflow works exactly the same for `RC`, `post`, `dev`, etc. + +#### Check and Test Jobs + +This is our test suite. + +- **`check-pytest`**: runs `pytest` on matrix of platforms +- **`check-python`**: runs `ruff` (format and lint) +- **`check-frontend`**: runs `prettier` (format), `eslint` (lint), `madge` (circular refs) and `tsc` (static type check) + +> **TODO** We should add `mypy` or `pyright` to the **`check-python`** job. + +> **TODO** We should add an end-to-end test job that generates an image. + +#### `build` Job + +This sets up both python and frontend dependencies and builds the python package. Internally, this runs `installer/create_installer.sh` and uploads two artifacts: + +- **`dist`**: the python distribution, to be published on PyPI +- **`InvokeAI-installer-${VERSION}.zip`**: the installer to be included in the GitHub release + +#### Sanity Check & Smoke Test + +At this point, the release workflow pauses (the remaining jobs all require approval). + +A maintainer should go to the **Summary** tab of the workflow, download the installer and test it. Ensure the app loads and generates. + +> The same wheel file is bundled in the installer and in the `dist` artifact, which is uploaded to PyPI. You should end up with the exactly the same installation of the `invokeai` package from any of these methods. + +#### PyPI Publish Jobs + +The publish jobs will skip if any of the previous jobs skip or fail. + +They use [GitHub environments], which are configured as [trusted publishers] on PyPI. + +Both jobs require a maintainer to approve them from the workflow's **Summary** tab. + +- Click the **Review deployments** button +- Select the environment (either `testpypi` or `pypi`) +- Click **Approve and deploy** + +> **If the version already exists on PyPI, the publish jobs will fail.** PyPI only allows a given version to be published once - you cannot change it. If version published on PyPI has a problem, you'll need to "fail forward" by bumping the app version and publishing a followup release. + +#### `publish-testpypi` Job + +Publishes the distribution on the [Test PyPI] index, using the `testpypi` GitHub environment. + +This job is not required for the production PyPI publish, but included just in case you want to test the PyPI release. + +If approved and successful, you could try out the test release like this: + +```sh +# Create a new virtual environment +python -m venv ~/.test-invokeai-dist --prompt test-invokeai-dist +# Install the distribution from Test PyPI +pip install --index-url https://test.pypi.org/simple/ invokeai +# Run and test the app +invokeai-web +# Cleanup +deactivate +rm -rf ~/.test-invokeai-dist +``` + +#### `publish-pypi` Job + +Publishes the distribution on the production PyPI index, using the `pypi` GitHub environment. + +## Publish the GitHub Release with installer + +Once the release is published to PyPI, it's time to publish the GitHub release. + +1. [Draft a new release] on GitHub, choosing the tag that triggered the release. +2. Write the release notes, describing important changes. The **Generate release notes** button automatically inserts the changelog and new contributors, and you can copy/paste the intro from previous releases. +3. Upload the zip file created in **`build`** job into the Assets section of the release notes. You can also upload the zip into the body of the release notes, since it can be hard for users to find the Assets section. +4. Check the **Set as a pre-release** and **Create a discussion for this release** checkboxes at the bottom of the release page. +5. Publish the pre-release. +6. Announce the pre-release in Discord. + +> **TODO** Workflows can create a GitHub release from a template and upload release assets. One popular action to handle this is [ncipollo/release-action]. A future enhancement to the release process could set this up. + +## Manually Running the Release Workflow + +The release workflow can be run manually. This is useful to get an installer build and test it out without needing to push a tag. + +When run this way, you'll see **Skip code checks** checkbox. This allows the workflow to run without the time-consuming 3 code quality check jobs. + +The publish jobs will skip if the workflow was run manually. + +[InvokeAI Releases Page]: https://github.com/invoke-ai/InvokeAI/releases +[PyPI]: https://pypi.org/ +[Draft a new release]: https://github.com/invoke-ai/InvokeAI/releases/new +[Test PyPI]: https://test.pypi.org/ +[version specifier]: https://packaging.python.org/en/latest/specifications/version-specifiers/ +[ncipollo/release-action]: https://github.com/ncipollo/release-action +[GitHub environments]: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment +[trusted publishers]: https://docs.pypi.org/trusted-publishers/ +[samuelcolvin/check-python-version]: https://github.com/samuelcolvin/check-python-version +[manually]: #manually-running-the-release-workflow diff --git a/docs/contributing/INVOCATIONS.md b/docs/contributing/INVOCATIONS.md index 124589f44c..ce1ee9e808 100644 --- a/docs/contributing/INVOCATIONS.md +++ b/docs/contributing/INVOCATIONS.md @@ -9,11 +9,15 @@ complex functionality. ## Invocations Directory -InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These can be used as examples to create your own nodes. +InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These +can be used as examples to create your own nodes. -New nodes should be added to a subfolder in `nodes` direction found at the root level of the InvokeAI installation location. Nodes added to this folder will be able to be used upon application startup. +New nodes should be added to a subfolder in `nodes` direction found at the root +level of the InvokeAI installation location. Nodes added to this folder will be +able to be used upon application startup. + +Example `nodes` subfolder structure: -Example `nodes` subfolder structure: ```py ├── __init__.py # Invoke-managed custom node loader │ @@ -30,14 +34,14 @@ Example `nodes` subfolder structure: └── fancy_node.py ``` -Each node folder must have an `__init__.py` file that imports its nodes. Only nodes imported in the `__init__.py` file are loaded. - See the README in the nodes folder for more examples: +Each node folder must have an `__init__.py` file that imports its nodes. Only +nodes imported in the `__init__.py` file are loaded. See the README in the nodes +folder for more examples: ```py from .cool_node import CoolInvocation ``` - ## Creating A New Invocation In order to understand the process of creating a new Invocation, let us actually @@ -131,7 +135,6 @@ from invokeai.app.invocations.primitives import ImageField class ResizeInvocation(BaseInvocation): '''Resizes an image''' - # Inputs image: ImageField = InputField(description="The input image") width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") @@ -167,7 +170,6 @@ from invokeai.app.invocations.primitives import ImageField class ResizeInvocation(BaseInvocation): '''Resizes an image''' - # Inputs image: ImageField = InputField(description="The input image") width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") @@ -197,7 +199,6 @@ from invokeai.app.invocations.image import ImageOutput class ResizeInvocation(BaseInvocation): '''Resizes an image''' - # Inputs image: ImageField = InputField(description="The input image") width: int = InputField(default=512, ge=64, le=2048, description="Width of the new image") height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") @@ -229,30 +230,17 @@ class ResizeInvocation(BaseInvocation): height: int = InputField(default=512, ge=64, le=2048, description="Height of the new image") def invoke(self, context: InvocationContext) -> ImageOutput: - # Load the image using InvokeAI's predefined Image Service. Returns the PIL image. - image = context.services.images.get_pil_image(self.image.image_name) + # Load the input image as a PIL image + image = context.images.get_pil(self.image.image_name) - # Resizing the image + # Resize the image resized_image = image.resize((self.width, self.height)) - # Save the image using InvokeAI's predefined Image Service. Returns the prepared PIL image. - output_image = context.services.images.create( - image=resized_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - ) + # Save the image + image_dto = context.images.save(image=resized_image) - # Returning the Image - return ImageOutput( - image=ImageField( - image_name=output_image.image_name, - ), - width=output_image.width, - height=output_image.height, - ) + # Return an ImageOutput + return ImageOutput.build(image_dto) ``` **Note:** Do not be overwhelmed by the `ImageOutput` process. InvokeAI has a @@ -343,27 +331,25 @@ class ImageColorStringOutput(BaseInvocationOutput): That's all there is to it. - +Custom fields only support connection inputs in the Workflow Editor. diff --git a/docs/contributing/MODEL_MANAGER.md b/docs/contributing/MODEL_MANAGER.md index 880c8b2480..8351904b61 100644 --- a/docs/contributing/MODEL_MANAGER.md +++ b/docs/contributing/MODEL_MANAGER.md @@ -28,7 +28,7 @@ model. These are the: Hugging Face, as well as discriminating among model versions in Civitai, but can be used for arbitrary content. - * _ModelLoadServiceBase_ (**CURRENTLY UNDER DEVELOPMENT - NOT IMPLEMENTED**) + * _ModelLoadServiceBase_ Responsible for loading a model from disk into RAM and VRAM and getting it ready for inference. @@ -41,10 +41,10 @@ The four main services can be found in * `invokeai/app/services/model_records/` * `invokeai/app/services/model_install/` * `invokeai/app/services/downloads/` -* `invokeai/app/services/model_loader/` (**under development**) +* `invokeai/app/services/model_load/` Code related to the FastAPI web API can be found in -`invokeai/app/api/routers/model_records.py`. +`invokeai/app/api/routers/model_manager_v2.py`. *** @@ -84,10 +84,10 @@ diffusers model. When this happens, `original_hash` is unchanged, but `ModelType`, `ModelFormat` and `BaseModelType` are string enums that are defined in `invokeai.backend.model_manager.config`. They are also imported by, and can be reexported from, -`invokeai.app.services.model_record_service`: +`invokeai.app.services.model_manager.model_records`: ``` -from invokeai.app.services.model_record_service import ModelType, ModelFormat, BaseModelType +from invokeai.app.services.model_records import ModelType, ModelFormat, BaseModelType ``` The `path` field can be absolute or relative. If relative, it is taken @@ -123,7 +123,7 @@ taken to be the `models_dir` directory. `variant` is an enumerated string class with values `normal`, `inpaint` and `depth`. If needed, it can be imported if needed from -either `invokeai.app.services.model_record_service` or +either `invokeai.app.services.model_records` or `invokeai.backend.model_manager.config`. ### ONNXSD2Config @@ -134,7 +134,7 @@ either `invokeai.app.services.model_record_service` or | `upcast_attention` | bool | Model requires its attention module to be upcast | The `SchedulerPredictionType` enum can be imported from either -`invokeai.app.services.model_record_service` or +`invokeai.app.services.model_records` or `invokeai.backend.model_manager.config`. ### Other config classes @@ -157,15 +157,6 @@ indicates that the model is compatible with any of the base models. This works OK for some models, such as the IP Adapter image encoders, but is an all-or-nothing proposition. -Another issue is that the config class hierarchy is paralleled to some -extent by a `ModelBase` class hierarchy defined in -`invokeai.backend.model_manager.models.base` and its subclasses. These -are classes representing the models after they are loaded into RAM and -include runtime information such as load status and bytes used. Some -of the fields, including `name`, `model_type` and `base_model`, are -shared between `ModelConfigBase` and `ModelBase`, and this is a -potential source of confusion. - ## Reading and Writing Model Configuration Records The `ModelRecordService` provides the ability to retrieve model @@ -177,11 +168,11 @@ initialization and can be retrieved within an invocation from the `InvocationContext` object: ``` -store = context.services.model_record_store +store = context.services.model_manager.store ``` or from elsewhere in the code by accessing -`ApiDependencies.invoker.services.model_record_store`. +`ApiDependencies.invoker.services.model_manager.store`. ### Creating a `ModelRecordService` @@ -190,7 +181,7 @@ you can directly create either a `ModelRecordServiceSQL` or a `ModelRecordServiceFile` object: ``` -from invokeai.app.services.model_record_service import ModelRecordServiceSQL, ModelRecordServiceFile +from invokeai.app.services.model_records import ModelRecordServiceSQL, ModelRecordServiceFile store = ModelRecordServiceSQL.from_connection(connection, lock) store = ModelRecordServiceSQL.from_db_file('/path/to/sqlite_database.db') @@ -252,7 +243,7 @@ So a typical startup pattern would be: ``` import sqlite3 from invokeai.app.services.thread import lock -from invokeai.app.services.model_record_service import ModelRecordServiceBase +from invokeai.app.services.model_records import ModelRecordServiceBase from invokeai.app.services.config import InvokeAIAppConfig config = InvokeAIAppConfig.get_config() @@ -260,19 +251,6 @@ db_conn = sqlite3.connect(config.db_path.as_posix(), check_same_thread=False) store = ModelRecordServiceBase.open(config, db_conn, lock) ``` -_A note on simultaneous access to `invokeai.db`_: The current InvokeAI -service architecture for the image and graph databases is careful to -use a shared sqlite3 connection and a thread lock to ensure that two -threads don't attempt to access the database simultaneously. However, -the default `sqlite3` library used by Python reports using -**Serialized** mode, which allows multiple threads to access the -database simultaneously using multiple database connections (see -https://www.sqlite.org/threadsafe.html and -https://ricardoanderegg.com/posts/python-sqlite-thread-safety/). Therefore -it should be safe to allow the record service to open its own SQLite -database connection. Opening a model record service should then be as -simple as `ModelRecordServiceBase.open(config)`. - ### Fetching a Model's Configuration from `ModelRecordServiceBase` Configurations can be retrieved in several ways. @@ -468,6 +446,44 @@ required parameters: Once initialized, the installer will provide the following methods: +#### install_job = installer.heuristic_import(source, [config], [access_token]) + +This is a simplified interface to the installer which takes a source +string, an optional model configuration dictionary and an optional +access token. + +The `source` is a string that can be any of these forms + +1. A path on the local filesystem (`C:\\users\\fred\\model.safetensors`) +2. A Url pointing to a single downloadable model file (`https://civitai.com/models/58390/detail-tweaker-lora-lora`) +3. A HuggingFace repo_id with any of the following formats: + - `model/name` -- entire model + - `model/name:fp32` -- entire model, using the fp32 variant + - `model/name:fp16:vae` -- vae submodel, using the fp16 variant + - `model/name::vae` -- vae submodel, using default precision + - `model/name:fp16:path/to/model.safetensors` -- an individual model file, fp16 variant + - `model/name::path/to/model.safetensors` -- an individual model file, default variant + +Note that by specifying a relative path to the top of the HuggingFace +repo, you can download and install arbitrary models files. + +The variant, if not provided, will be automatically filled in with +`fp32` if the user has requested full precision, and `fp16` +otherwise. If a variant that does not exist is requested, then the +method will install whatever HuggingFace returns as its default +revision. + +`config` is an optional dict of values that will override the +autoprobed values for model type, base, scheduler prediction type, and +so forth. See [Model configuration and +probing](#Model-configuration-and-probing) for details. + +`access_token` is an optional access token for accessing resources +that need authentication. + +The method will return a `ModelInstallJob`. This object is discussed +at length in the following section. + #### install_job = installer.import_model() The `import_model()` method is the core of the installer. The @@ -486,9 +502,10 @@ source2 = LocalModelSource(path='/opt/models/sushi_diffusers') # a local dif source3 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5') # a repo_id source4 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', subfolder='vae') # a subfolder within a repo_id source5 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', variant='fp16') # a named variant of a HF model +source6 = HFModelSource(repo_id='runwayml/stable-diffusion-v1-5', subfolder='OrangeMix/OrangeMix1.ckpt') # path to an individual model file -source6 = URLModelSource(url='https://civitai.com/api/download/models/63006') # model located at a URL -source7 = URLModelSource(url='https://civitai.com/api/download/models/63006', access_token='letmein') # with an access token +source7 = URLModelSource(url='https://civitai.com/api/download/models/63006') # model located at a URL +source8 = URLModelSource(url='https://civitai.com/api/download/models/63006', access_token='letmein') # with an access token for source in [source1, source2, source3, source4, source5, source6, source7]: install_job = installer.install_model(source) @@ -544,7 +561,6 @@ can be passed to `import_model()`. attributes returned by the model prober. See the section below for details. - #### LocalModelSource This is used for a model that is located on a locally-accessible Posix @@ -737,7 +753,7 @@ and `cancelled`, as well as `in_terminal_state`. The last will return True if the job is in the complete, errored or cancelled states. -#### Model confguration and probing +#### Model configuration and probing The install service uses the `invokeai.backend.model_manager.probe` module during import to determine the model's type, base type, and @@ -776,6 +792,14 @@ returns a list of completed jobs. The optional `timeout` argument will return from the call if jobs aren't completed in the specified time. An argument of 0 (the default) will block indefinitely. +#### jobs = installer.wait_for_job(job, [timeout]) + +Like `wait_for_installs()`, but block until a specific job has +completed or errored, and then return the job. The optional `timeout` +argument will return from the call if the job doesn't complete in the +specified time. An argument of 0 (the default) will block +indefinitely. + #### jobs = installer.list_jobs() Return a list of all active and complete `ModelInstallJobs`. @@ -838,6 +862,31 @@ This method is similar to `unregister()`, but also unconditionally deletes the corresponding model weights file(s), regardless of whether they are inside or outside the InvokeAI models hierarchy. + +#### path = installer.download_and_cache(remote_source, [access_token], [timeout]) + +This utility routine will download the model file located at source, +cache it, and return the path to the cached file. It does not attempt +to determine the model type, probe its configuration values, or +register it with the models database. + +You may provide an access token if the remote source requires +authorization. The call will block indefinitely until the file is +completely downloaded, cancelled or raises an error of some sort. If +you provide a timeout (in seconds), the call will raise a +`TimeoutError` exception if the download hasn't completed in the +specified period. + +You may use this mechanism to request any type of file, not just a +model. The file will be stored in a subdirectory of +`INVOKEAI_ROOT/models/.cache`. If the requested file is found in the +cache, its path will be returned without redownloading it. + +Be aware that the models cache is cleared of infrequently-used files +and directories at regular intervals when the size of the cache +exceeds the value specified in Invoke's `convert_cache` configuration +variable. + #### List[str]=installer.scan_directory(scan_dir: Path, install: bool) This method will recursively scan the directory indicated in @@ -1128,7 +1177,7 @@ job = queue.create_download_job( event_handlers=[my_handler1, my_handler2], # if desired start=True, ) - ``` +``` The `filename` argument forces the downloader to use the specified name for the file rather than the name provided by the remote source, @@ -1171,6 +1220,13 @@ queue or was not created by this queue. This method will block until all the active jobs in the queue have reached a terminal state (completed, errored or cancelled). +#### queue.wait_for_job(job, [timeout]) + +This method will block until the indicated job has reached a terminal +state (completed, errored or cancelled). If the optional timeout is +provided, the call will block for at most timeout seconds, and raise a +TimeoutError otherwise. + #### jobs = queue.list_jobs() This will return a list of all jobs, including ones that have not yet @@ -1449,9 +1505,9 @@ set of keys to the corresponding model config objects. Find all model metadata records that have the given author and return a set of keys to the corresponding model config objects. -# The remainder of this documentation is provisional, pending implementation of the Load service +*** -## Let's get loaded, the lowdown on ModelLoadService +## The Lowdown on the ModelLoadService The `ModelLoadService` is responsible for loading a named model into memory so that it can be used for inference. Despite the fact that it @@ -1465,7 +1521,7 @@ create alternative instances if you wish. ### Creating a ModelLoadService object The class is defined in -`invokeai.app.services.model_loader_service`. It is initialized with +`invokeai.app.services.model_load`. It is initialized with an InvokeAIAppConfig object, from which it gets configuration information such as the user's desired GPU and precision, and with a previously-created `ModelRecordServiceBase` object, from which it @@ -1475,26 +1531,29 @@ Here is a typical initialization pattern: ``` from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.app.services.model_record_service import ModelRecordServiceBase -from invokeai.app.services.model_loader_service import ModelLoadService +from invokeai.app.services.model_load import ModelLoadService, ModelLoaderRegistry config = InvokeAIAppConfig.get_config() -store = ModelRecordServiceBase.open(config) -loader = ModelLoadService(config, store) +ram_cache = ModelCache( + max_cache_size=config.ram_cache_size, max_vram_cache_size=config.vram_cache_size, logger=logger +) +convert_cache = ModelConvertCache( + cache_path=config.models_convert_cache_path, max_size=config.convert_cache_size +) +loader = ModelLoadService( + app_config=config, + ram_cache=ram_cache, + convert_cache=convert_cache, + registry=ModelLoaderRegistry +) ``` -Note that we are relying on the contents of the application -configuration to choose the implementation of -`ModelRecordServiceBase`. +### load_model(model_config, [submodel_type], [context]) -> LoadedModel -### get_model(key, [submodel_type], [context]) -> ModelInfo: - -*** TO DO: change to get_model(key, context=None, **kwargs) - -The `get_model()` method, like its similarly-named cousin in -`ModelRecordService`, receives the unique key that identifies the +The `load_model()` method takes an `AnyModelConfig` returned by +`ModelRecordService.get_model()` and returns the corresponding loaded model. It loads the model into memory, gets the model ready for use, -and returns a `ModelInfo` object. +and returns a `LoadedModel` object. The optional second argument, `subtype` is a `SubModelType` string enum, such as "vae". It is mandatory when used with a main model, and @@ -1504,46 +1563,45 @@ The optional third argument, `context` can be provided by an invocation to trigger model load event reporting. See below for details. -The returned `ModelInfo` object shares some fields in common with -`ModelConfigBase`, but is otherwise a completely different beast: +The returned `LoadedModel` object contains a copy of the configuration +record returned by the model record `get_model()` method, as well as +the in-memory loaded model: -| **Field Name** | **Type** | **Description** | + +| **Attribute Name** | **Type** | **Description** | |----------------|-----------------|------------------| -| `key` | str | The model key derived from the ModelRecordService database | -| `name` | str | Name of this model | -| `base_model` | BaseModelType | Base model for this model | -| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)| -| `location` | Path or str | Location of the model on the filesystem | -| `precision` | torch.dtype | The torch.precision to use for inference | -| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use | +| `config` | AnyModelConfig | A copy of the model's configuration record for retrieving base type, etc. | +| `model` | AnyModel | The instantiated model (details below) | +| `locker` | ModelLockerBase | A context manager that mediates the movement of the model into VRAM | -The types for `ModelInfo` and `SubModelType` can be imported from -`invokeai.app.services.model_loader_service`. +Because the loader can return multiple model types, it is typed to +return `AnyModel`, a Union `ModelMixin`, `torch.nn.Module`, +`IAIOnnxRuntimeModel`, `IPAdapter`, `IPAdapterPlus`, and +`EmbeddingModelRaw`. `ModelMixin` is the base class of all diffusers +models, `EmbeddingModelRaw` is used for LoRA and TextualInversion +models. The others are obvious. -To use the model, you use the `ModelInfo` as a context manager using -the following pattern: + +`LoadedModel` acts as a context manager. The context loads the model +into the execution device (e.g. VRAM on CUDA systems), locks the model +in the execution device for the duration of the context, and returns +the model. Use it like this: ``` -model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae')) +model_info = loader.get_model_by_key('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae')) with model_info as vae: image = vae.decode(latents)[0] ``` -The `vae` model will stay locked in the GPU during the period of time -it is in the context manager's scope. +`get_model_by_key()` may raise any of the following exceptions: -`get_model()` may raise any of the following exceptions: - -- `UnknownModelException` -- key not in database -- `ModelNotFoundException` -- key in database but model not found at path -- `InvalidModelException` -- the model is guilty of a variety of sins +- `UnknownModelException` -- key not in database +- `ModelNotFoundException` -- key in database but model not found at path +- `NotImplementedException` -- the loader doesn't know how to load this type of model -** TO DO: ** Resolve discrepancy between ModelInfo.location and -ModelConfig.path. - ### Emitting model loading events -When the `context` argument is passed to `get_model()`, it will +When the `context` argument is passed to `load_model_*()`, it will retrieve the invocation event bus from the passed `InvocationContext` object to emit events on the invocation bus. The two events are "model_load_started" and "model_load_completed". Both carry the @@ -1556,10 +1614,174 @@ payload=dict( queue_batch_id=queue_batch_id, graph_execution_state_id=graph_execution_state_id, model_key=model_key, - submodel=submodel, + submodel_type=submodel, hash=model_info.hash, location=str(model_info.location), precision=str(model_info.precision), ) ``` +### Adding Model Loaders + +Model loaders are small classes that inherit from the `ModelLoader` +base class. They typically implement one method `_load_model()` whose +signature is: + +``` +def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, +) -> AnyModel: +``` + +`_load_model()` will be passed the path to the model on disk, an +optional repository variant (used by the diffusers loaders to select, +e.g. the `fp16` variant, and an optional submodel_type for main and +onnx models. + +To install a new loader, place it in +`invokeai/backend/model_manager/load/model_loaders`. Inherit from +`ModelLoader` and use the `@ModelLoaderRegistry.register()` decorator to +indicate what type of models the loader can handle. + +Here is a complete example from `generic_diffusers.py`, which is able +to load several different diffusers types: + +``` +from pathlib import Path +from typing import Optional + +from invokeai.backend.model_manager import ( + AnyModel, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) +from .. import ModelLoader, ModelLoaderRegistry + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T2IAdapter, format=ModelFormat.Diffusers) +class GenericDiffusersLoader(ModelLoader): + """Class to load simple diffusers models.""" + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + model_class = self._get_hf_load_class(model_path) + if submodel_type is not None: + raise Exception(f"There are no submodels in models of type {model_class}") + variant = model_variant.value if model_variant else None + result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) # type: ignore + return result +``` + +Note that a loader can register itself to handle several different +model types. An exception will be raised if more than one loader tries +to register the same model type. + +#### Conversion + +Some models require conversion to diffusers format before they can be +loaded. These loaders should override two additional methods: + +``` +_needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool +_convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Path) -> Path: +``` + +The first method accepts the model configuration, the path to where +the unmodified model is currently installed, and a proposed +destination for the converted model. This method returns True if the +model needs to be converted. It typically does this by comparing the +last modification time of the original model file to the modification +time of the converted model. In some cases you will also want to check +the modification date of the configuration record, in the event that +the user has changed something like the scheduler prediction type that +will require the model to be re-converted. See `controlnet.py` for an +example of this logic. + +The second method accepts the model configuration, the path to the +original model on disk, and the desired output path for the converted +model. It does whatever it needs to do to get the model into diffusers +format, and returns the Path of the resulting model. (The path should +ordinarily be the same as `output_path`.) + +## The ModelManagerService object + +For convenience, the API provides a `ModelManagerService` object which +gives a single point of access to the major model manager +services. This object is created at initialization time and can be +found in the global `ApiDependencies.invoker.services.model_manager` +object, or in `context.services.model_manager` from within an +invocation. + +In the examples below, we have retrieved the manager using: +``` +mm = ApiDependencies.invoker.services.model_manager +``` + +The following properties and methods will be available: + +### mm.store + +This retrieves the `ModelRecordService` associated with the +manager. Example: + +``` +configs = mm.store.get_model_by_attr(name='stable-diffusion-v1-5') +``` + +### mm.install + +This retrieves the `ModelInstallService` associated with the manager. +Example: + +``` +job = mm.install.heuristic_import(`https://civitai.com/models/58390/detail-tweaker-lora-lora`) +``` + +### mm.load + +This retrieves the `ModelLoaderService` associated with the manager. Example: + +``` +configs = mm.store.get_model_by_attr(name='stable-diffusion-v1-5') +assert len(configs) > 0 + +loaded_model = mm.load.load_model(configs[0]) +``` + +The model manager also offers a few convenience shortcuts for loading +models: + +### mm.load_model_by_config(model_config, [submodel], [context]) -> LoadedModel + +Same as `mm.load.load_model()`. + +### mm.load_model_by_attr(model_name, base_model, model_type, [submodel], [context]) -> LoadedModel + +This accepts the combination of the model's name, type and base, which +it passes to the model record config store for retrieval. If a unique +model config is found, this method returns a `LoadedModel`. It can +raise the following exceptions: + +``` +UnknownModelException -- model with these attributes not known +NotImplementedException -- the loader doesn't know how to load this type of model +ValueError -- more than one model matches this combination of base/type/name +``` + +### mm.load_model_by_key(key, [submodel], [context]) -> LoadedModel + +This method takes a model key, looks it up using the +`ModelRecordServiceBase` object in `mm.store`, and passes the returned +model configuration to `load_model_by_config()`. It may raise a +`NotImplementedException`. diff --git a/docs/nodes/INVOCATION_API.md b/docs/nodes/INVOCATION_API.md new file mode 100644 index 0000000000..c85c152c2b --- /dev/null +++ b/docs/nodes/INVOCATION_API.md @@ -0,0 +1,45 @@ +# Invocation API + +Each invocation's `invoke` method is provided a single arg - the Invocation +Context. + +This object provides access to various methods, used to interact with the +application. Loading and saving images, logging messages, etc. + +!!! warning "" + + This API may shift slightly until the release of v4.0.0 as we work through a few final updates to the Model Manager. + +```py +class MyInvocation(BaseInvocation): + ... + def invoke(self, context: InvocationContext) -> ImageOutput: + image_pil = context.images.get_pil(image_name) + # Do something to the image + image_dto = context.images.save(image_pil) + # Log a message + context.logger.info(f"Did something cool, image saved!") + ... +``` + + +::: invokeai.app.services.shared.invocation_context.InvocationContext + options: + members: false + +::: invokeai.app.services.shared.invocation_context.ImagesInterface + +::: invokeai.app.services.shared.invocation_context.TensorsInterface + +::: invokeai.app.services.shared.invocation_context.ConditioningInterface + +::: invokeai.app.services.shared.invocation_context.ModelsInterface + +::: invokeai.app.services.shared.invocation_context.LoggerInterface + +::: invokeai.app.services.shared.invocation_context.ConfigInterface + +::: invokeai.app.services.shared.invocation_context.UtilInterface + +::: invokeai.app.services.shared.invocation_context.BoardsInterface + diff --git a/docs/nodes/NODES_MIGRATION_V3_V4.md b/docs/nodes/NODES_MIGRATION_V3_V4.md new file mode 100644 index 0000000000..3ba0854581 --- /dev/null +++ b/docs/nodes/NODES_MIGRATION_V3_V4.md @@ -0,0 +1,148 @@ +# Invoke v4.0.0 Nodes API Migration guide + +Invoke v4.0.0 is versioned as such due to breaking changes to the API utilized +by nodes, both core and custom. + +## Motivation + +Prior to v4.0.0, the `invokeai` python package has not be set up to be utilized +as a library. That is to say, it didn't have any explicitly public API, and node +authors had to work with the unstable internal application API. + +v4.0.0 introduces a stable public API for nodes. + +## Changes + +There are two node-author-facing changes: + +1. Import Paths +1. Invocation Context API + +### Import Paths + +All public objects are now exported from `invokeai.invocation_api`: + +```py +# Old +from invokeai.app.invocations.baseinvocation import ( + BaseInvocation, + InputField, + InvocationContext, + invocation, +) +from invokeai.app.invocations.primitives import ImageField + +# New +from invokeai.invocation_api import ( + BaseInvocation, + ImageField, + InputField, + InvocationContext, + invocation, +) +``` + +It's possible that we've missed some classes you need in your node. Please let +us know if that's the case. + +### Invocation Context API + +Most nodes utilize the Invocation Context, an object that is passed to the +`invoke` that provides access to data and services a node may need. + +Until now, that object and the services it exposed were internal. Exposing them +to nodes means that changes to our internal implementation could break nodes. +The methods on the services are also often fairly complicated and allowed nodes +to footgun. + +In v4.0.0, this object has been refactored to be much simpler. + +See [INVOCATION_API](./INVOCATION_API.md) for full details of the API. + +!!! warning "" + + This API may shift slightly until the release of v4.0.0 as we work through a few final updates to the Model Manager. + +#### Improved Service Methods + +The biggest offender was the image save method: + +```py +# Old +image_dto = context.services.images.create( + image=image, + image_origin=ResourceOrigin.INTERNAL, + image_category=ImageCategory.GENERAL, + node_id=self.id, + session_id=context.graph_execution_state_id, + is_intermediate=self.is_intermediate, + metadata=self.metadata, + workflow=context.workflow, +) + +# New +image_dto = context.images.save(image=image) +``` + +Other methods are simplified, or enhanced with additional functionality: + +```py +# Old +image = context.services.images.get_pil_image(image_name) + +# New +image = context.images.get_pil(image_name) +image_cmyk = context.images.get_pil(image_name, "CMYK") +``` + +We also had some typing issues around tensors: + +```py +# Old +# `latents` typed as `torch.Tensor`, but could be `ConditioningFieldData` +latents = context.services.latents.get(self.latents.latents_name) +# `data` typed as `torch.Tenssor,` but could be `ConditioningFieldData` +context.services.latents.save(latents_name, data) + +# New - separate methods for tensors and conditioning data w/ correct typing +# Also, the service generates the names +tensor_name = context.tensors.save(tensor) +tensor = context.tensors.load(tensor_name) +# For conditioning +cond_name = context.conditioning.save(cond_data) +cond_data = context.conditioning.load(cond_name) +``` + +#### Output Construction + +Core Outputs have builder functions right on them - no need to manually +construct these objects, or use an extra utility: + +```py +# Old +image_output = ImageOutput( + image=ImageField(image_name=image_dto.image_name), + width=image_dto.width, + height=image_dto.height, +) +latents_output = build_latents_output(latents_name=name, latents=latents, seed=None) +noise_output = NoiseOutput( + noise=LatentsField(latents_name=latents_name, seed=seed), + width=latents.size()[3] * 8, + height=latents.size()[2] * 8, +) +cond_output = ConditioningOutput( + conditioning=ConditioningField( + conditioning_name=conditioning_name, + ), +) + +# New +image_output = ImageOutput.build(image_dto) +latents_output = LatentsOutput.build(latents_name=name, latents=noise, seed=self.seed) +noise_output = NoiseOutput.build(latents_name=name, latents=noise, seed=self.seed) +cond_output = ConditioningOutput.build(conditioning_name) +``` + +You can still create the objects using constructors if you want, but we suggest +using the builder methods. diff --git a/docs/nodes/communityNodes.md b/docs/nodes/communityNodes.md index 906111f45b..296fbb7ee6 100644 --- a/docs/nodes/communityNodes.md +++ b/docs/nodes/communityNodes.md @@ -32,6 +32,7 @@ To use a community workflow, download the the `.json` node graph file and load i + [Image to Character Art Image Nodes](#image-to-character-art-image-nodes) + [Image Picker](#image-picker) + [Image Resize Plus](#image-resize-plus) + + [Latent Upscale](#latent-upscale) + [Load Video Frame](#load-video-frame) + [Make 3D](#make-3d) + [Mask Operations](#mask-operations) @@ -290,6 +291,13 @@ View:
+-------------------------------- +### Latent Upscale + +**Description:** This node uses a small (~2.4mb) model to upscale the latents used in a Stable Diffusion 1.5 or Stable Diffusion XL image generation, rather than the typical interpolation method, avoiding the traditional downsides of the latent upscale technique. + +**Node Link:** [https://github.com/gogurtenjoyer/latent-upscale](https://github.com/gogurtenjoyer/latent-upscale) + -------------------------------- ### Load Video Frame @@ -346,12 +354,21 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai **Description:** A set of nodes for Metadata. Collect Metadata from within an `iterate` node & extract metadata from an image. -- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node. -- `Metadata From Image` - Provides Metadata from an image. -- `Metadata To String` - Extracts a String value of a label from metadata. -- `Metadata To Integer` - Extracts an Integer value of a label from metadata. -- `Metadata To Float` - Extracts a Float value of a label from metadata. -- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata. +- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node +- `Metadata From Image` - Provides Metadata from an image +- `Metadata To String` - Extracts a String value of a label from metadata +- `Metadata To Integer` - Extracts an Integer value of a label from metadata +- `Metadata To Float` - Extracts a Float value of a label from metadata +- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata +- `Metadata To Bool` - Extracts Bool types from metadata +- `Metadata To Model` - Extracts model types from metadata +- `Metadata To SDXL Model` - Extracts SDXL model types from metadata +- `Metadata To LoRAs` - Extracts Loras from metadata. +- `Metadata To SDXL LoRAs` - Extracts SDXL Loras from metadata +- `Metadata To ControlNets` - Extracts ControNets from metadata +- `Metadata To IP-Adapters` - Extracts IP-Adapters from metadata +- `Metadata To T2I-Adapters` - Extracts T2I-Adapters from metadata +- `Denoise Latents + Metadata` - This is an inherited version of the existing `Denoise Latents` node but with a metadata input and output. **Node Link:** https://github.com/skunkworxdark/metadata-linked-nodes diff --git a/docs/requirements-mkdocs.txt b/docs/requirements-mkdocs.txt deleted file mode 100644 index a637622954..0000000000 --- a/docs/requirements-mkdocs.txt +++ /dev/null @@ -1,5 +0,0 @@ -mkdocs -mkdocs-material>=8, <9 -mkdocs-git-revision-date-localized-plugin -mkdocs-redirects==1.2.0 - diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css deleted file mode 100644 index 42a2cfe74a..0000000000 --- a/docs/stylesheets/extra.css +++ /dev/null @@ -1,5 +0,0 @@ -:root { - --md-primary-fg-color: #35A4DB; - --md-primary-fg-color--light: #35A4DB; - --md-primary-fg-color--dark: #35A4DB; - } \ No newline at end of file diff --git a/installer/create_installer.sh b/installer/create_installer.sh index 57b681c42e..a71b0d9c41 100755 --- a/installer/create_installer.sh +++ b/installer/create_installer.sh @@ -2,22 +2,18 @@ set -e -BCYAN="\e[1;36m" -BYELLOW="\e[1;33m" -BGREEN="\e[1;32m" -BRED="\e[1;31m" -RED="\e[31m" -RESET="\e[0m" - -function is_bin_in_path { - builtin type -P "$1" &>/dev/null -} +BCYAN="\033[1;36m" +BYELLOW="\033[1;33m" +BGREEN="\033[1;32m" +BRED="\033[1;31m" +RED="\033[31m" +RESET="\033[0m" function git_show { git show -s --format=oneline --abbrev-commit "$1" | cat } -if [[ -v "VIRTUAL_ENV" ]]; then +if [[ ! -z "${VIRTUAL_ENV}" ]]; then # we can't just call 'deactivate' because this function is not exported # to the environment of this script from the bash process that runs the script echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}" @@ -26,31 +22,63 @@ fi cd "$(dirname "$0")" -echo -echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}" -echo "The current working directory is $(pwd)" -read -p "If that looks right, press any key to proceed, or CTRL-C to exit..." -echo - -# Some machines only have `python3` in PATH, others have `python` - make an alias. -# We can use a function to approximate an alias within a non-interactive shell. -if ! is_bin_in_path python && is_bin_in_path python3; then - function python { - python3 "$@" - } -fi - VERSION=$( cd .. - python -c "from invokeai.version import __version__ as version; print(version)" + python3 -c "from invokeai.version import __version__ as version; print(version)" ) -PATCH="" -VERSION="v${VERSION}${PATCH}" +VERSION="v${VERSION}" + +if [[ ! -z ${CI} ]]; then + echo + echo -e "${BCYAN}CI environment detected${RESET}" + echo +else + echo + echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}" + echo "The current working directory is $(pwd)" + read -p "If that looks right, press any key to proceed, or CTRL-C to exit..." + echo +fi echo -e "${BGREEN}HEAD${RESET}:" git_show HEAD echo +# ---------------------- FRONTEND ---------------------- + +pushd ../invokeai/frontend/web >/dev/null +echo "Installing frontend dependencies..." +echo +pnpm i --frozen-lockfile +echo +if [[ ! -z ${CI} ]]; then + echo "Building frontend without checks..." + # In CI, we have already done the frontend checks and can just build + pnpm vite build +else + echo "Running checks and building frontend..." + # This runs all the frontend checks and builds + pnpm build +fi +echo +popd + +# ---------------------- BACKEND ---------------------- + +echo +echo "Building wheel..." +echo + +# install the 'build' package in the user site packages, if needed +# could be improved by using a temporary venv, but it's tiny and harmless +if [[ $(python3 -c 'from importlib.util import find_spec; print(find_spec("build") is None)') == "True" ]]; then + pip install --user build +fi + +rm -rf ../build + +python3 -m build --outdir dist/ ../. + # ---------------------- echo @@ -78,10 +106,28 @@ chmod a+x InvokeAI-Installer/install.sh cp install.bat.in InvokeAI-Installer/install.bat cp WinLongPathsEnabled.reg InvokeAI-Installer/ -# Zip everything up -zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer +FILENAME=InvokeAI-installer-$VERSION.zip -# clean up -rm -rf InvokeAI-Installer tmp dist ../invokeai/frontend/web/dist/ +# Zip everything up +zip -r ${FILENAME} InvokeAI-Installer + +echo +echo -e "${BGREEN}Built installer: ./${FILENAME}${RESET}" +echo -e "${BGREEN}Built PyPi distribution: ./dist${RESET}" + +# clean up, but only if we are not in a github action +if [[ -z ${CI} ]]; then + echo + echo "Cleaning up intermediate build files..." + rm -rf InvokeAI-Installer tmp ../invokeai/frontend/web/dist/ +fi + +if [[ ! -z ${CI} ]]; then + echo + echo "Setting GitHub action outputs..." + echo "INSTALLER_FILENAME=${FILENAME}" >>$GITHUB_OUTPUT + echo "INSTALLER_PATH=installer/${FILENAME}" >>$GITHUB_OUTPUT + echo "DIST_PATH=installer/dist/" >>$GITHUB_OUTPUT +fi exit 0 diff --git a/installer/tag_release.sh b/installer/tag_release.sh index a914c1a505..b6d1830902 100755 --- a/installer/tag_release.sh +++ b/installer/tag_release.sh @@ -2,12 +2,12 @@ set -e -BCYAN="\e[1;36m" -BYELLOW="\e[1;33m" -BGREEN="\e[1;32m" -BRED="\e[1;31m" -RED="\e[31m" -RESET="\e[0m" +BCYAN="\033[1;36m" +BYELLOW="\033[1;33m" +BGREEN="\033[1;32m" +BRED="\033[1;31m" +RED="\033[31m" +RESET="\033[0m" function does_tag_exist { git rev-parse --quiet --verify "refs/tags/$1" >/dev/null @@ -23,49 +23,40 @@ function git_show { VERSION=$( cd .. - python -c "from invokeai.version import __version__ as version; print(version)" + python3 -c "from invokeai.version import __version__ as version; print(version)" ) PATCH="" -MAJOR_VERSION=$(echo $VERSION | sed 's/\..*$//') VERSION="v${VERSION}${PATCH}" -LATEST_TAG="v${MAJOR_VERSION}-latest" if does_tag_exist $VERSION; then echo -e "${BCYAN}${VERSION}${RESET} already exists:" git_show_ref tags/$VERSION echo fi -if does_tag_exist $LATEST_TAG; then - echo -e "${BCYAN}${LATEST_TAG}${RESET} already exists:" - git_show_ref tags/$LATEST_TAG - echo -fi echo -e "${BGREEN}HEAD${RESET}:" git_show echo -echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on remote${RESET}? " +echo -e "${BGREEN}git remote -v${RESET}:" +git remote -v +echo + +echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on origin remote${RESET}? " read -e -p 'y/n [n]: ' input RESPONSE=${input:='n'} if [ "$RESPONSE" == 'y' ]; then echo - echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on remote..." - git push --delete origin $VERSION + echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on origin remote..." + git push origin :refs/tags/$VERSION - echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} locally..." + echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} on locally..." if ! git tag -fa $VERSION; then echo "Existing/invalid tag" exit -1 fi - echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on remote..." - git push --delete origin $LATEST_TAG - - echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${LATEST_TAG}${RESET} locally..." - git tag -fa $LATEST_TAG - - echo -e "Pushing updated tags to remote..." + echo -e "Pushing updated tags to origin remote..." git push origin --tags fi exit 0 diff --git a/invokeai/app/services/invocation_processor/__init__.py b/invokeai/app/__init__.py similarity index 100% rename from invokeai/app/services/invocation_processor/__init__.py rename to invokeai/app/__init__.py diff --git a/invokeai/app/api/dependencies.py b/invokeai/app/api/dependencies.py index c8309e1729..95407291ec 100644 --- a/invokeai/app/api/dependencies.py +++ b/invokeai/app/api/dependencies.py @@ -2,9 +2,12 @@ from logging import Logger -from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory +import torch + +from invokeai.app.services.object_serializer.object_serializer_disk import ObjectSerializerDisk +from invokeai.app.services.object_serializer.object_serializer_forward_cache import ObjectSerializerForwardCache from invokeai.app.services.shared.sqlite.sqlite_util import init_db -from invokeai.backend.model_manager.metadata import ModelMetadataStore +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData from invokeai.backend.util.logging import InvokeAILogger from invokeai.version.invokeai_version import __version__ @@ -12,26 +15,22 @@ from ..services.board_image_records.board_image_records_sqlite import SqliteBoar from ..services.board_images.board_images_default import BoardImagesService from ..services.board_records.board_records_sqlite import SqliteBoardRecordStorage from ..services.boards.boards_default import BoardService +from ..services.bulk_download.bulk_download_default import BulkDownloadService from ..services.config import InvokeAIAppConfig from ..services.download import DownloadQueueService from ..services.image_files.image_files_disk import DiskImageFileStorage from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage from ..services.images.images_default import ImageService from ..services.invocation_cache.invocation_cache_memory import MemoryInvocationCache -from ..services.invocation_processor.invocation_processor_default import DefaultInvocationProcessor -from ..services.invocation_queue.invocation_queue_memory import MemoryInvocationQueue from ..services.invocation_services import InvocationServices from ..services.invocation_stats.invocation_stats_default import InvocationStatsService from ..services.invoker import Invoker -from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage -from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage -from ..services.model_install import ModelInstallService from ..services.model_manager.model_manager_default import ModelManagerService +from ..services.model_metadata import ModelMetadataStoreSQL from ..services.model_records import ModelRecordServiceSQL from ..services.names.names_default import SimpleNameService from ..services.session_processor.session_processor_default import DefaultSessionProcessor from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue -from ..services.shared.graph import GraphExecutionState from ..services.urls.urls_default import LocalUrlService from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage from .events import FastAPIEventService @@ -68,6 +67,9 @@ class ApiDependencies: logger.debug(f"Internet connectivity is {config.internet_available}") output_folder = config.output_path + if output_folder is None: + raise ValueError("Output folder is not set") + image_files = DiskImageFileStorage(f"{output_folder}/images") db = init_db(config=config, logger=logger, image_files=image_files) @@ -80,26 +82,26 @@ class ApiDependencies: board_records = SqliteBoardRecordStorage(db=db) boards = BoardService() events = FastAPIEventService(event_handler_id) - graph_execution_manager = ItemStorageMemory[GraphExecutionState]() + bulk_download = BulkDownloadService() image_records = SqliteImageRecordStorage(db=db) images = ImageService() invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size) - latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents")) - model_manager = ModelManagerService(config, logger) - model_record_service = ModelRecordServiceSQL(db=db) + tensors = ObjectSerializerForwardCache( + ObjectSerializerDisk[torch.Tensor](output_folder / "tensors", ephemeral=True) + ) + conditioning = ObjectSerializerForwardCache( + ObjectSerializerDisk[ConditioningFieldData](output_folder / "conditioning", ephemeral=True) + ) download_queue_service = DownloadQueueService(event_bus=events) - metadata_store = ModelMetadataStore(db=db) - model_install_service = ModelInstallService( - app_config=config, - record_store=model_record_service, + model_metadata_service = ModelMetadataStoreSQL(db=db) + model_manager = ModelManagerService.build_model_manager( + app_config=configuration, + model_record_service=ModelRecordServiceSQL(db=db, metadata_store=model_metadata_service), download_queue=download_queue_service, - metadata_store=metadata_store, - event_bus=events, + events=events, ) names = SimpleNameService() performance_statistics = InvocationStatsService() - processor = DefaultInvocationProcessor() - queue = MemoryInvocationQueue() session_processor = DefaultSessionProcessor() session_queue = SqliteSessionQueue(db=db) urls = LocalUrlService() @@ -110,27 +112,24 @@ class ApiDependencies: board_images=board_images, board_records=board_records, boards=boards, + bulk_download=bulk_download, configuration=configuration, events=events, - graph_execution_manager=graph_execution_manager, image_files=image_files, image_records=image_records, images=images, invocation_cache=invocation_cache, - latents=latents, logger=logger, model_manager=model_manager, - model_records=model_record_service, download_queue=download_queue_service, - model_install=model_install_service, names=names, performance_statistics=performance_statistics, - processor=processor, - queue=queue, session_processor=session_processor, session_queue=session_queue, urls=urls, workflow_records=workflow_records, + tensors=tensors, + conditioning=conditioning, ) ApiDependencies.invoker = Invoker(services) diff --git a/invokeai/app/api/routers/download_queue.py b/invokeai/app/api/routers/download_queue.py index 92b658c370..a6e53c7a5c 100644 --- a/invokeai/app/api/routers/download_queue.py +++ b/invokeai/app/api/routers/download_queue.py @@ -36,7 +36,7 @@ async def list_downloads() -> List[DownloadJob]: 400: {"description": "Bad request"}, }, ) -async def prune_downloads(): +async def prune_downloads() -> Response: """Prune completed and errored jobs.""" queue = ApiDependencies.invoker.services.download_queue queue.prune_jobs() @@ -55,7 +55,7 @@ async def download( ) -> DownloadJob: """Download the source URL to the file or directory indicted in dest.""" queue = ApiDependencies.invoker.services.download_queue - return queue.download(source, dest, priority, access_token) + return queue.download(source, Path(dest), priority, access_token) @download_queue_router.get( @@ -87,7 +87,7 @@ async def get_download_job( ) async def cancel_download_job( id: int = Path(description="ID of the download job to cancel."), -): +) -> Response: """Cancel a download job using its ID.""" try: queue = ApiDependencies.invoker.services.download_queue @@ -105,7 +105,7 @@ async def cancel_download_job( 204: {"description": "Download jobs have been cancelled"}, }, ) -async def cancel_all_download_jobs(): +async def cancel_all_download_jobs() -> Response: """Cancel all download jobs.""" ApiDependencies.invoker.services.download_queue.cancel_all_jobs() return Response(status_code=204) diff --git a/invokeai/app/api/routers/images.py b/invokeai/app/api/routers/images.py index 125896b8d3..dc8a04b711 100644 --- a/invokeai/app/api/routers/images.py +++ b/invokeai/app/api/routers/images.py @@ -2,13 +2,13 @@ import io import traceback from typing import Optional -from fastapi import Body, HTTPException, Path, Query, Request, Response, UploadFile +from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request, Response, UploadFile from fastapi.responses import FileResponse from fastapi.routing import APIRouter from PIL import Image from pydantic import BaseModel, Field, ValidationError -from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator +from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO from invokeai.app.services.shared.pagination import OffsetPaginatedResults @@ -375,16 +375,67 @@ async def unstar_images_in_list( class ImagesDownloaded(BaseModel): response: Optional[str] = Field( - description="If defined, the message to display to the user when images begin downloading" + default=None, description="The message to display to the user when images begin downloading" + ) + bulk_download_item_name: Optional[str] = Field( + default=None, description="The name of the bulk download item for which events will be emitted" ) -@images_router.post("/download", operation_id="download_images_from_list", response_model=ImagesDownloaded) +@images_router.post( + "/download", operation_id="download_images_from_list", response_model=ImagesDownloaded, status_code=202 +) async def download_images_from_list( - image_names: list[str] = Body(description="The list of names of images to download", embed=True), + background_tasks: BackgroundTasks, + image_names: Optional[list[str]] = Body( + default=None, description="The list of names of images to download", embed=True + ), board_id: Optional[str] = Body( - default=None, description="The board from which image should be downloaded from", embed=True + default=None, description="The board from which image should be downloaded", embed=True ), ) -> ImagesDownloaded: - # return ImagesDownloaded(response="Your images are downloading") - raise HTTPException(status_code=501, detail="Endpoint is not yet implemented") + if (image_names is None or len(image_names) == 0) and board_id is None: + raise HTTPException(status_code=400, detail="No images or board id specified.") + bulk_download_item_id: str = ApiDependencies.invoker.services.bulk_download.generate_item_id(board_id) + + background_tasks.add_task( + ApiDependencies.invoker.services.bulk_download.handler, + image_names, + board_id, + bulk_download_item_id, + ) + return ImagesDownloaded(bulk_download_item_name=bulk_download_item_id + ".zip") + + +@images_router.api_route( + "/download/{bulk_download_item_name}", + methods=["GET"], + operation_id="get_bulk_download_item", + response_class=Response, + responses={ + 200: { + "description": "Return the complete bulk download item", + "content": {"application/zip": {}}, + }, + 404: {"description": "Image not found"}, + }, +) +async def get_bulk_download_item( + background_tasks: BackgroundTasks, + bulk_download_item_name: str = Path(description="The bulk_download_item_name of the bulk download item to get"), +) -> FileResponse: + """Gets a bulk download zip file""" + try: + path = ApiDependencies.invoker.services.bulk_download.get_path(bulk_download_item_name) + + response = FileResponse( + path, + media_type="application/zip", + filename=bulk_download_item_name, + content_disposition_type="inline", + ) + response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}" + background_tasks.add_task(ApiDependencies.invoker.services.bulk_download.delete, bulk_download_item_name) + return response + except Exception: + raise HTTPException(status_code=404) diff --git a/invokeai/app/api/routers/model_manager.py b/invokeai/app/api/routers/model_manager.py new file mode 100644 index 0000000000..50ebe5ce64 --- /dev/null +++ b/invokeai/app/api/routers/model_manager.py @@ -0,0 +1,751 @@ +# Copyright (c) 2023 Lincoln D. Stein +"""FastAPI route for model configuration records.""" + +import pathlib +import shutil +from hashlib import sha1 +from random import randbytes +from typing import Any, Dict, List, Optional, Set + +from fastapi import Body, Path, Query, Response +from fastapi.routing import APIRouter +from pydantic import BaseModel, ConfigDict, Field +from starlette.exceptions import HTTPException +from typing_extensions import Annotated + +from invokeai.app.services.model_install import ModelInstallJob +from invokeai.app.services.model_records import ( + DuplicateModelException, + InvalidModelException, + ModelRecordOrderBy, + ModelSummary, + UnknownModelException, +) +from invokeai.app.services.shared.pagination import PaginatedResults +from invokeai.backend.model_manager.config import ( + AnyModelConfig, + BaseModelType, + MainCheckpointConfig, + ModelFormat, + ModelType, + SubModelType, +) +from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger +from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata +from invokeai.backend.model_manager.search import ModelSearch + +from ..dependencies import ApiDependencies + +model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"]) + + +class ModelsList(BaseModel): + """Return list of configs.""" + + models: List[AnyModelConfig] + + model_config = ConfigDict(use_enum_values=True) + + +class ModelTagSet(BaseModel): + """Return tags for a set of models.""" + + key: str + name: str + author: str + tags: Set[str] + + +############################################################################## +# These are example inputs and outputs that are used in places where Swagger +# is unable to generate a correct example. +############################################################################## +example_model_config = { + "path": "string", + "name": "string", + "base": "sd-1", + "type": "main", + "format": "checkpoint", + "config": "string", + "key": "string", + "original_hash": "string", + "current_hash": "string", + "description": "string", + "source": "string", + "last_modified": 0, + "vae": "string", + "variant": "normal", + "prediction_type": "epsilon", + "repo_variant": "fp16", + "upcast_attention": False, + "ztsnr_training": False, +} + +example_model_input = { + "path": "/path/to/model", + "name": "model_name", + "base": "sd-1", + "type": "main", + "format": "checkpoint", + "config": "configs/stable-diffusion/v1-inference.yaml", + "description": "Model description", + "vae": None, + "variant": "normal", +} + +example_model_metadata = { + "name": "ip_adapter_sd_image_encoder", + "author": "InvokeAI", + "tags": [ + "transformers", + "safetensors", + "clip_vision_model", + "endpoints_compatible", + "region:us", + "has_space", + "license:apache-2.0", + ], + "files": [ + { + "url": "https://huggingface.co/InvokeAI/ip_adapter_sd_image_encoder/resolve/main/README.md", + "path": "ip_adapter_sd_image_encoder/README.md", + "size": 628, + "sha256": None, + }, + { + "url": "https://huggingface.co/InvokeAI/ip_adapter_sd_image_encoder/resolve/main/config.json", + "path": "ip_adapter_sd_image_encoder/config.json", + "size": 560, + "sha256": None, + }, + { + "url": "https://huggingface.co/InvokeAI/ip_adapter_sd_image_encoder/resolve/main/model.safetensors", + "path": "ip_adapter_sd_image_encoder/model.safetensors", + "size": 2528373448, + "sha256": "6ca9667da1ca9e0b0f75e46bb030f7e011f44f86cbfb8d5a36590fcd7507b030", + }, + ], + "type": "huggingface", + "id": "InvokeAI/ip_adapter_sd_image_encoder", + "tag_dict": {"license": "apache-2.0"}, + "last_modified": "2023-09-23T17:33:25Z", +} + +############################################################################## +# ROUTES +############################################################################## + + +@model_manager_router.get( + "/", + operation_id="list_model_records", +) +async def list_model_records( + base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"), + model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"), + model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"), + model_format: Optional[ModelFormat] = Query( + default=None, description="Exact match on the format of the model (e.g. 'diffusers')" + ), +) -> ModelsList: + """Get a list of models.""" + record_store = ApiDependencies.invoker.services.model_manager.store + found_models: list[AnyModelConfig] = [] + if base_models: + for base_model in base_models: + found_models.extend( + record_store.search_by_attr( + base_model=base_model, model_type=model_type, model_name=model_name, model_format=model_format + ) + ) + else: + found_models.extend( + record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format) + ) + return ModelsList(models=found_models) + + +@model_manager_router.get( + "/get_by_attrs", + operation_id="get_model_records_by_attrs", + response_model=AnyModelConfig, +) +async def get_model_records_by_attrs( + name: str = Query(description="The name of the model"), + type: ModelType = Query(description="The type of the model"), + base: BaseModelType = Query(description="The base model of the model"), +) -> AnyModelConfig: + """Gets a model by its attributes. The main use of this route is to provide backwards compatibility with the old + model manager, which identified models by a combination of name, base and type.""" + configs = ApiDependencies.invoker.services.model_manager.store.search_by_attr( + base_model=base, model_type=type, model_name=name + ) + if not configs: + raise HTTPException(status_code=404, detail="No model found with these attributes") + + return configs[0] + + +@model_manager_router.get( + "/i/{key}", + operation_id="get_model_record", + responses={ + 200: { + "description": "The model configuration was retrieved successfully", + "content": {"application/json": {"example": example_model_config}}, + }, + 400: {"description": "Bad request"}, + 404: {"description": "The model could not be found"}, + }, +) +async def get_model_record( + key: str = Path(description="Key of the model record to fetch."), +) -> AnyModelConfig: + """Get a model record""" + record_store = ApiDependencies.invoker.services.model_manager.store + try: + config: AnyModelConfig = record_store.get_model(key) + return config + except UnknownModelException as e: + raise HTTPException(status_code=404, detail=str(e)) + + +@model_manager_router.get("/summary", operation_id="list_model_summary") +async def list_model_summary( + page: int = Query(default=0, description="The page to get"), + per_page: int = Query(default=10, description="The number of models per page"), + order_by: ModelRecordOrderBy = Query(default=ModelRecordOrderBy.Default, description="The attribute to order by"), +) -> PaginatedResults[ModelSummary]: + """Gets a page of model summary data.""" + record_store = ApiDependencies.invoker.services.model_manager.store + results: PaginatedResults[ModelSummary] = record_store.list_models(page=page, per_page=per_page, order_by=order_by) + return results + + +@model_manager_router.get( + "/i/{key}/metadata", + operation_id="get_model_metadata", + responses={ + 200: { + "description": "The model metadata was retrieved successfully", + "content": {"application/json": {"example": example_model_metadata}}, + }, + 400: {"description": "Bad request"}, + }, +) +async def get_model_metadata( + key: str = Path(description="Key of the model repo metadata to fetch."), +) -> Optional[AnyModelRepoMetadata]: + """Get a model metadata object.""" + record_store = ApiDependencies.invoker.services.model_manager.store + result: Optional[AnyModelRepoMetadata] = record_store.get_metadata(key) + + return result + + +@model_manager_router.get( + "/tags", + operation_id="list_tags", +) +async def list_tags() -> Set[str]: + """Get a unique set of all the model tags.""" + record_store = ApiDependencies.invoker.services.model_manager.store + result: Set[str] = record_store.list_tags() + return result + + +class FoundModel(BaseModel): + path: str = Field(description="Path to the model") + is_installed: bool = Field(description="Whether or not the model is already installed") + + +@model_manager_router.get( + "/scan_folder", + operation_id="scan_for_models", + responses={ + 200: {"description": "Directory scanned successfully"}, + 400: {"description": "Invalid directory path"}, + }, + status_code=200, + response_model=List[FoundModel], +) +async def scan_for_models( + scan_path: str = Query(description="Directory path to search for models", default=None), +) -> List[FoundModel]: + path = pathlib.Path(scan_path) + if not scan_path or not path.is_dir(): + raise HTTPException( + status_code=400, + detail=f"The search path '{scan_path}' does not exist or is not directory", + ) + + search = ModelSearch() + try: + found_model_paths = search.search(path) + models_path = ApiDependencies.invoker.services.configuration.models_path + + # If the search path includes the main models directory, we need to exclude core models from the list. + # TODO(MM2): Core models should be handled by the model manager so we can determine if they are installed + # without needing to crawl the filesystem. + core_models_path = pathlib.Path(models_path, "core").resolve() + non_core_model_paths = [p for p in found_model_paths if not p.is_relative_to(core_models_path)] + + installed_models = ApiDependencies.invoker.services.model_manager.store.search_by_attr() + resolved_installed_model_paths: list[str] = [] + installed_model_sources: list[str] = [] + + # This call lists all installed models. + for model in installed_models: + path = pathlib.Path(model.path) + # If the model has a source, we need to add it to the list of installed sources. + if model.source: + installed_model_sources.append(model.source) + # If the path is not absolute, that means it is in the app models directory, and we need to join it with + # the models path before resolving. + if not path.is_absolute(): + resolved_installed_model_paths.append(str(pathlib.Path(models_path, path).resolve())) + continue + resolved_installed_model_paths.append(str(path.resolve())) + + scan_results: list[FoundModel] = [] + + # Check if the model is installed by comparing the resolved paths, appending to the scan result. + for p in non_core_model_paths: + path = str(p) + is_installed = path in resolved_installed_model_paths or path in installed_model_sources + found_model = FoundModel(path=path, is_installed=is_installed) + scan_results.append(found_model) + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"An error occurred while searching the directory: {e}", + ) + return scan_results + + +@model_manager_router.get( + "/tags/search", + operation_id="search_by_metadata_tags", +) +async def search_by_metadata_tags( + tags: Set[str] = Query(default=None, description="Tags to search for"), +) -> ModelsList: + """Get a list of models.""" + record_store = ApiDependencies.invoker.services.model_manager.store + results = record_store.search_by_metadata_tag(tags) + return ModelsList(models=results) + + +@model_manager_router.patch( + "/i/{key}", + operation_id="update_model_record", + responses={ + 200: { + "description": "The model was updated successfully", + "content": {"application/json": {"example": example_model_config}}, + }, + 400: {"description": "Bad request"}, + 404: {"description": "The model could not be found"}, + 409: {"description": "There is already a model corresponding to the new name"}, + }, + status_code=200, +) +async def update_model_record( + key: Annotated[str, Path(description="Unique key of model")], + info: Annotated[ + AnyModelConfig, Body(description="Model config", discriminator="type", example=example_model_input) + ], +) -> AnyModelConfig: + """Update model contents with a new config. If the model name or base fields are changed, then the model is renamed.""" + logger = ApiDependencies.invoker.services.logger + record_store = ApiDependencies.invoker.services.model_manager.store + try: + model_response: AnyModelConfig = record_store.update_model(key, config=info) + logger.info(f"Updated model: {key}") + except UnknownModelException as e: + raise HTTPException(status_code=404, detail=str(e)) + except ValueError as e: + logger.error(str(e)) + raise HTTPException(status_code=409, detail=str(e)) + return model_response + + +@model_manager_router.delete( + "/i/{key}", + operation_id="del_model_record", + responses={ + 204: {"description": "Model deleted successfully"}, + 404: {"description": "Model not found"}, + }, + status_code=204, +) +async def del_model_record( + key: str = Path(description="Unique key of model to remove from model registry."), +) -> Response: + """ + Delete model record from database. + + The configuration record will be removed. The corresponding weights files will be + deleted as well if they reside within the InvokeAI "models" directory. + """ + logger = ApiDependencies.invoker.services.logger + + try: + installer = ApiDependencies.invoker.services.model_manager.install + installer.delete(key) + logger.info(f"Deleted model: {key}") + return Response(status_code=204) + except UnknownModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=404, detail=str(e)) + + +@model_manager_router.post( + "/i/", + operation_id="add_model_record", + responses={ + 201: { + "description": "The model added successfully", + "content": {"application/json": {"example": example_model_config}}, + }, + 409: {"description": "There is already a model corresponding to this path or repo_id"}, + 415: {"description": "Unrecognized file/folder format"}, + }, + status_code=201, +) +async def add_model_record( + config: Annotated[ + AnyModelConfig, Body(description="Model config", discriminator="type", example=example_model_input) + ], +) -> AnyModelConfig: + """Add a model using the configuration information appropriate for its type.""" + logger = ApiDependencies.invoker.services.logger + record_store = ApiDependencies.invoker.services.model_manager.store + if config.key == "": + config.key = sha1(randbytes(100)).hexdigest() + logger.info(f"Created model {config.key} for {config.name}") + try: + record_store.add_model(config.key, config) + except DuplicateModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=409, detail=str(e)) + except InvalidModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=415) + + # now fetch it out + result: AnyModelConfig = record_store.get_model(config.key) + return result + + +@model_manager_router.post( + "/install", + operation_id="install_model", + responses={ + 201: {"description": "The model imported successfully"}, + 415: {"description": "Unrecognized file/folder format"}, + 424: {"description": "The model appeared to import successfully, but could not be found in the model manager"}, + 409: {"description": "There is already a model corresponding to this path or repo_id"}, + }, + status_code=201, +) +async def install_model( + source: str = Query(description="Model source to install, can be a local path, repo_id, or remote URL"), + # TODO(MM2): Can we type this? + config: Optional[Dict[str, Any]] = Body( + description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ", + default=None, + example={"name": "string", "description": "string"}, + ), + access_token: Optional[str] = None, +) -> ModelInstallJob: + """Install a model using a string identifier. + + `source` can be any of the following. + + 1. A path on the local filesystem ('C:\\users\\fred\\model.safetensors') + 2. A Url pointing to a single downloadable model file + 3. A HuggingFace repo_id with any of the following formats: + - model/name + - model/name:fp16:vae + - model/name::vae -- use default precision + - model/name:fp16:path/to/model.safetensors + - model/name::path/to/model.safetensors + + `config` is an optional dict containing model configuration values that will override + the ones that are probed automatically. + + `access_token` is an optional access token for use with Urls that require + authentication. + + Models will be downloaded, probed, configured and installed in a + series of background threads. The return object has `status` attribute + that can be used to monitor progress. + + See the documentation for `import_model_record` for more information on + interpreting the job information returned by this route. + """ + logger = ApiDependencies.invoker.services.logger + + try: + installer = ApiDependencies.invoker.services.model_manager.install + result: ModelInstallJob = installer.heuristic_import( + source=source, + config=config, + access_token=access_token, + ) + logger.info(f"Started installation of {source}") + except UnknownModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=424, detail=str(e)) + except InvalidModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=415) + except ValueError as e: + logger.error(str(e)) + raise HTTPException(status_code=409, detail=str(e)) + return result + + +@model_manager_router.get( + "/import", + operation_id="list_model_install_jobs", +) +async def list_model_install_jobs() -> List[ModelInstallJob]: + """Return the list of model install jobs. + + Install jobs have a numeric `id`, a `status`, and other fields that provide information on + the nature of the job and its progress. The `status` is one of: + + * "waiting" -- Job is waiting in the queue to run + * "downloading" -- Model file(s) are downloading + * "running" -- Model has downloaded and the model probing and registration process is running + * "completed" -- Installation completed successfully + * "error" -- An error occurred. Details will be in the "error_type" and "error" fields. + * "cancelled" -- Job was cancelled before completion. + + Once completed, information about the model such as its size, base + model, type, and metadata can be retrieved from the `config_out` + field. For multi-file models such as diffusers, information on individual files + can be retrieved from `download_parts`. + + See the example and schema below for more information. + """ + jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_manager.install.list_jobs() + return jobs + + +@model_manager_router.get( + "/import/{id}", + operation_id="get_model_install_job", + responses={ + 200: {"description": "Success"}, + 404: {"description": "No such job"}, + }, +) +async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob: + """ + Return model install job corresponding to the given source. See the documentation for 'List Model Install Jobs' + for information on the format of the return value. + """ + try: + result: ModelInstallJob = ApiDependencies.invoker.services.model_manager.install.get_job_by_id(id) + return result + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) + + +@model_manager_router.delete( + "/import/{id}", + operation_id="cancel_model_install_job", + responses={ + 201: {"description": "The job was cancelled successfully"}, + 415: {"description": "No such job"}, + }, + status_code=201, +) +async def cancel_model_install_job(id: int = Path(description="Model install job ID")) -> None: + """Cancel the model install job(s) corresponding to the given job ID.""" + installer = ApiDependencies.invoker.services.model_manager.install + try: + job = installer.get_job_by_id(id) + except ValueError as e: + raise HTTPException(status_code=415, detail=str(e)) + installer.cancel_job(job) + + +@model_manager_router.patch( + "/import", + operation_id="prune_model_install_jobs", + responses={ + 204: {"description": "All completed and errored jobs have been pruned"}, + 400: {"description": "Bad request"}, + }, +) +async def prune_model_install_jobs() -> Response: + """Prune all completed and errored jobs from the install job list.""" + ApiDependencies.invoker.services.model_manager.install.prune_jobs() + return Response(status_code=204) + + +@model_manager_router.patch( + "/sync", + operation_id="sync_models_to_config", + responses={ + 204: {"description": "Model config record database resynced with files on disk"}, + 400: {"description": "Bad request"}, + }, +) +async def sync_models_to_config() -> Response: + """ + Traverse the models and autoimport directories. + + Model files without a corresponding + record in the database are added. Orphan records without a models file are deleted. + """ + ApiDependencies.invoker.services.model_manager.install.sync_to_config() + return Response(status_code=204) + + +@model_manager_router.put( + "/convert/{key}", + operation_id="convert_model", + responses={ + 200: { + "description": "Model converted successfully", + "content": {"application/json": {"example": example_model_config}}, + }, + 400: {"description": "Bad request"}, + 404: {"description": "Model not found"}, + 409: {"description": "There is already a model registered at this location"}, + }, +) +async def convert_model( + key: str = Path(description="Unique key of the safetensors main model to convert to diffusers format."), +) -> AnyModelConfig: + """ + Permanently convert a model into diffusers format, replacing the safetensors version. + Note that during the conversion process the key and model hash will change. + The return value is the model configuration for the converted model. + """ + model_manager = ApiDependencies.invoker.services.model_manager + logger = ApiDependencies.invoker.services.logger + loader = ApiDependencies.invoker.services.model_manager.load + store = ApiDependencies.invoker.services.model_manager.store + installer = ApiDependencies.invoker.services.model_manager.install + + try: + model_config = store.get_model(key) + except UnknownModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=424, detail=str(e)) + + if not isinstance(model_config, MainCheckpointConfig): + logger.error(f"The model with key {key} is not a main checkpoint model.") + raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.") + + # loading the model will convert it into a cached diffusers file + model_manager.load_model_by_config(model_config, submodel_type=SubModelType.Scheduler) + + # Get the path of the converted model from the loader + cache_path = loader.convert_cache.cache_path(key) + assert cache_path.exists() + + # temporarily rename the original safetensors file so that there is no naming conflict + original_name = model_config.name + model_config.name = f"{original_name}.DELETE" + store.update_model(key, config=model_config) + + # install the diffusers + try: + new_key = installer.install_path( + cache_path, + config={ + "name": original_name, + "description": model_config.description, + "original_hash": model_config.original_hash, + "source": model_config.source, + }, + ) + except DuplicateModelException as e: + logger.error(str(e)) + raise HTTPException(status_code=409, detail=str(e)) + + # get the original metadata + if orig_metadata := store.get_metadata(key): + store.metadata_store.add_metadata(new_key, orig_metadata) + + # delete the original safetensors file + installer.delete(key) + + # delete the cached version + shutil.rmtree(cache_path) + + # return the config record for the new diffusers directory + new_config: AnyModelConfig = store.get_model(new_key) + return new_config + + +@model_manager_router.put( + "/merge", + operation_id="merge", + responses={ + 200: { + "description": "Model converted successfully", + "content": {"application/json": {"example": example_model_config}}, + }, + 400: {"description": "Bad request"}, + 404: {"description": "Model not found"}, + 409: {"description": "There is already a model registered at this location"}, + }, +) +async def merge( + keys: List[str] = Body(description="Keys for two to three models to merge", min_length=2, max_length=3), + merged_model_name: Optional[str] = Body(description="Name of destination model", default=None), + alpha: float = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5), + force: bool = Body( + description="Force merging of models created with different versions of diffusers", + default=False, + ), + interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method", default=None), + merge_dest_directory: Optional[str] = Body( + description="Save the merged model to the designated directory (with 'merged_model_name' appended)", + default=None, + ), +) -> AnyModelConfig: + """ + Merge diffusers models. The process is controlled by a set parameters provided in the body of the request. + ``` + Argument Description [default] + -------- ---------------------- + keys List of 2-3 model keys to merge together. All models must use the same base type. + merged_model_name Name for the merged model [Concat model names] + alpha Alpha value (0.0-1.0). Higher values give more weight to the second model [0.5] + force If true, force the merge even if the models were generated by different versions of the diffusers library [False] + interp Interpolation method. One of "weighted_sum", "sigmoid", "inv_sigmoid" or "add_difference" [weighted_sum] + merge_dest_directory Specify a directory to store the merged model in [models directory] + ``` + """ + logger = ApiDependencies.invoker.services.logger + try: + logger.info(f"Merging models: {keys} into {merge_dest_directory or ''}/{merged_model_name}") + dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None + installer = ApiDependencies.invoker.services.model_manager.install + merger = ModelMerger(installer) + model_names = [installer.record_store.get_model(x).name for x in keys] + response = merger.merge_diffusion_models_and_save( + model_keys=keys, + merged_model_name=merged_model_name or "+".join(model_names), + alpha=alpha, + interp=interp, + force=force, + merge_dest_directory=dest, + ) + except UnknownModelException: + raise HTTPException( + status_code=404, + detail=f"One or more of the models '{keys}' not found", + ) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + return response diff --git a/invokeai/app/api/routers/model_records.py b/invokeai/app/api/routers/model_records.py deleted file mode 100644 index f9a3e40898..0000000000 --- a/invokeai/app/api/routers/model_records.py +++ /dev/null @@ -1,472 +0,0 @@ -# Copyright (c) 2023 Lincoln D. Stein -"""FastAPI route for model configuration records.""" - -import pathlib -from hashlib import sha1 -from random import randbytes -from typing import Any, Dict, List, Optional, Set - -from fastapi import Body, Path, Query, Response -from fastapi.routing import APIRouter -from pydantic import BaseModel, ConfigDict -from starlette.exceptions import HTTPException -from typing_extensions import Annotated - -from invokeai.app.services.model_install import ModelInstallJob, ModelSource -from invokeai.app.services.model_records import ( - DuplicateModelException, - InvalidModelException, - ModelRecordOrderBy, - ModelSummary, - UnknownModelException, -) -from invokeai.app.services.shared.pagination import PaginatedResults -from invokeai.backend.model_manager.config import ( - AnyModelConfig, - BaseModelType, - ModelFormat, - ModelType, -) -from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger -from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata - -from ..dependencies import ApiDependencies - -model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager_v2_unstable"]) - - -class ModelsList(BaseModel): - """Return list of configs.""" - - models: List[AnyModelConfig] - - model_config = ConfigDict(use_enum_values=True) - - -class ModelTagSet(BaseModel): - """Return tags for a set of models.""" - - key: str - name: str - author: str - tags: Set[str] - - -@model_records_router.get( - "/", - operation_id="list_model_records", -) -async def list_model_records( - base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"), - model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"), - model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"), - model_format: Optional[ModelFormat] = Query( - default=None, description="Exact match on the format of the model (e.g. 'diffusers')" - ), -) -> ModelsList: - """Get a list of models.""" - record_store = ApiDependencies.invoker.services.model_records - found_models: list[AnyModelConfig] = [] - if base_models: - for base_model in base_models: - found_models.extend( - record_store.search_by_attr( - base_model=base_model, model_type=model_type, model_name=model_name, model_format=model_format - ) - ) - else: - found_models.extend( - record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format) - ) - return ModelsList(models=found_models) - - -@model_records_router.get( - "/i/{key}", - operation_id="get_model_record", - responses={ - 200: {"description": "Success"}, - 400: {"description": "Bad request"}, - 404: {"description": "The model could not be found"}, - }, -) -async def get_model_record( - key: str = Path(description="Key of the model record to fetch."), -) -> AnyModelConfig: - """Get a model record""" - record_store = ApiDependencies.invoker.services.model_records - try: - return record_store.get_model(key) - except UnknownModelException as e: - raise HTTPException(status_code=404, detail=str(e)) - - -@model_records_router.get("/meta", operation_id="list_model_summary") -async def list_model_summary( - page: int = Query(default=0, description="The page to get"), - per_page: int = Query(default=10, description="The number of models per page"), - order_by: ModelRecordOrderBy = Query(default=ModelRecordOrderBy.Default, description="The attribute to order by"), -) -> PaginatedResults[ModelSummary]: - """Gets a page of model summary data.""" - return ApiDependencies.invoker.services.model_records.list_models(page=page, per_page=per_page, order_by=order_by) - - -@model_records_router.get( - "/meta/i/{key}", - operation_id="get_model_metadata", - responses={ - 200: {"description": "Success"}, - 400: {"description": "Bad request"}, - 404: {"description": "No metadata available"}, - }, -) -async def get_model_metadata( - key: str = Path(description="Key of the model repo metadata to fetch."), -) -> Optional[AnyModelRepoMetadata]: - """Get a model metadata object.""" - record_store = ApiDependencies.invoker.services.model_records - result = record_store.get_metadata(key) - if not result: - raise HTTPException(status_code=404, detail="No metadata for a model with this key") - return result - - -@model_records_router.get( - "/tags", - operation_id="list_tags", -) -async def list_tags() -> Set[str]: - """Get a unique set of all the model tags.""" - record_store = ApiDependencies.invoker.services.model_records - return record_store.list_tags() - - -@model_records_router.get( - "/tags/search", - operation_id="search_by_metadata_tags", -) -async def search_by_metadata_tags( - tags: Set[str] = Query(default=None, description="Tags to search for"), -) -> ModelsList: - """Get a list of models.""" - record_store = ApiDependencies.invoker.services.model_records - results = record_store.search_by_metadata_tag(tags) - return ModelsList(models=results) - - -@model_records_router.patch( - "/i/{key}", - operation_id="update_model_record", - responses={ - 200: {"description": "The model was updated successfully"}, - 400: {"description": "Bad request"}, - 404: {"description": "The model could not be found"}, - 409: {"description": "There is already a model corresponding to the new name"}, - }, - status_code=200, - response_model=AnyModelConfig, -) -async def update_model_record( - key: Annotated[str, Path(description="Unique key of model")], - info: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")], -) -> AnyModelConfig: - """Update model contents with a new config. If the model name or base fields are changed, then the model is renamed.""" - logger = ApiDependencies.invoker.services.logger - record_store = ApiDependencies.invoker.services.model_records - try: - model_response = record_store.update_model(key, config=info) - logger.info(f"Updated model: {key}") - except UnknownModelException as e: - raise HTTPException(status_code=404, detail=str(e)) - except ValueError as e: - logger.error(str(e)) - raise HTTPException(status_code=409, detail=str(e)) - return model_response - - -@model_records_router.delete( - "/i/{key}", - operation_id="del_model_record", - responses={ - 204: {"description": "Model deleted successfully"}, - 404: {"description": "Model not found"}, - }, - status_code=204, -) -async def del_model_record( - key: str = Path(description="Unique key of model to remove from model registry."), -) -> Response: - """ - Delete model record from database. - - The configuration record will be removed. The corresponding weights files will be - deleted as well if they reside within the InvokeAI "models" directory. - """ - logger = ApiDependencies.invoker.services.logger - - try: - installer = ApiDependencies.invoker.services.model_install - installer.delete(key) - logger.info(f"Deleted model: {key}") - return Response(status_code=204) - except UnknownModelException as e: - logger.error(str(e)) - raise HTTPException(status_code=404, detail=str(e)) - - -@model_records_router.post( - "/i/", - operation_id="add_model_record", - responses={ - 201: {"description": "The model added successfully"}, - 409: {"description": "There is already a model corresponding to this path or repo_id"}, - 415: {"description": "Unrecognized file/folder format"}, - }, - status_code=201, -) -async def add_model_record( - config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")], -) -> AnyModelConfig: - """Add a model using the configuration information appropriate for its type.""" - logger = ApiDependencies.invoker.services.logger - record_store = ApiDependencies.invoker.services.model_records - if config.key == "": - config.key = sha1(randbytes(100)).hexdigest() - logger.info(f"Created model {config.key} for {config.name}") - try: - record_store.add_model(config.key, config) - except DuplicateModelException as e: - logger.error(str(e)) - raise HTTPException(status_code=409, detail=str(e)) - except InvalidModelException as e: - logger.error(str(e)) - raise HTTPException(status_code=415) - - # now fetch it out - return record_store.get_model(config.key) - - -@model_records_router.post( - "/import", - operation_id="import_model_record", - responses={ - 201: {"description": "The model imported successfully"}, - 415: {"description": "Unrecognized file/folder format"}, - 424: {"description": "The model appeared to import successfully, but could not be found in the model manager"}, - 409: {"description": "There is already a model corresponding to this path or repo_id"}, - }, - status_code=201, -) -async def import_model( - source: ModelSource, - config: Optional[Dict[str, Any]] = Body( - description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ", - default=None, - ), -) -> ModelInstallJob: - """Add a model using its local path, repo_id, or remote URL. - - Models will be downloaded, probed, configured and installed in a - series of background threads. The return object has `status` attribute - that can be used to monitor progress. - - The source object is a discriminated Union of LocalModelSource, - HFModelSource and URLModelSource. Set the "type" field to the - appropriate value: - - * To install a local path using LocalModelSource, pass a source of form: - `{ - "type": "local", - "path": "/path/to/model", - "inplace": false - }` - The "inplace" flag, if true, will register the model in place in its - current filesystem location. Otherwise, the model will be copied - into the InvokeAI models directory. - - * To install a HuggingFace repo_id using HFModelSource, pass a source of form: - `{ - "type": "hf", - "repo_id": "stabilityai/stable-diffusion-2.0", - "variant": "fp16", - "subfolder": "vae", - "access_token": "f5820a918aaf01" - }` - The `variant`, `subfolder` and `access_token` fields are optional. - - * To install a remote model using an arbitrary URL, pass: - `{ - "type": "url", - "url": "http://www.civitai.com/models/123456", - "access_token": "f5820a918aaf01" - }` - The `access_token` field is optonal - - The model's configuration record will be probed and filled in - automatically. To override the default guesses, pass "metadata" - with a Dict containing the attributes you wish to override. - - Installation occurs in the background. Either use list_model_install_jobs() - to poll for completion, or listen on the event bus for the following events: - - "model_install_running" - "model_install_completed" - "model_install_error" - - On successful completion, the event's payload will contain the field "key" - containing the installed ID of the model. On an error, the event's payload - will contain the fields "error_type" and "error" describing the nature of the - error and its traceback, respectively. - - """ - logger = ApiDependencies.invoker.services.logger - - try: - installer = ApiDependencies.invoker.services.model_install - result: ModelInstallJob = installer.import_model( - source=source, - config=config, - ) - logger.info(f"Started installation of {source}") - except UnknownModelException as e: - logger.error(str(e)) - raise HTTPException(status_code=424, detail=str(e)) - except InvalidModelException as e: - logger.error(str(e)) - raise HTTPException(status_code=415) - except ValueError as e: - logger.error(str(e)) - raise HTTPException(status_code=409, detail=str(e)) - return result - - -@model_records_router.get( - "/import", - operation_id="list_model_install_jobs", -) -async def list_model_install_jobs() -> List[ModelInstallJob]: - """Return list of model install jobs.""" - jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs() - return jobs - - -@model_records_router.get( - "/import/{id}", - operation_id="get_model_install_job", - responses={ - 200: {"description": "Success"}, - 404: {"description": "No such job"}, - }, -) -async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob: - """Return model install job corresponding to the given source.""" - try: - return ApiDependencies.invoker.services.model_install.get_job_by_id(id) - except ValueError as e: - raise HTTPException(status_code=404, detail=str(e)) - - -@model_records_router.delete( - "/import/{id}", - operation_id="cancel_model_install_job", - responses={ - 201: {"description": "The job was cancelled successfully"}, - 415: {"description": "No such job"}, - }, - status_code=201, -) -async def cancel_model_install_job(id: int = Path(description="Model install job ID")) -> None: - """Cancel the model install job(s) corresponding to the given job ID.""" - installer = ApiDependencies.invoker.services.model_install - try: - job = installer.get_job_by_id(id) - except ValueError as e: - raise HTTPException(status_code=415, detail=str(e)) - installer.cancel_job(job) - - -@model_records_router.patch( - "/import", - operation_id="prune_model_install_jobs", - responses={ - 204: {"description": "All completed and errored jobs have been pruned"}, - 400: {"description": "Bad request"}, - }, -) -async def prune_model_install_jobs() -> Response: - """Prune all completed and errored jobs from the install job list.""" - ApiDependencies.invoker.services.model_install.prune_jobs() - return Response(status_code=204) - - -@model_records_router.patch( - "/sync", - operation_id="sync_models_to_config", - responses={ - 204: {"description": "Model config record database resynced with files on disk"}, - 400: {"description": "Bad request"}, - }, -) -async def sync_models_to_config() -> Response: - """ - Traverse the models and autoimport directories. - - Model files without a corresponding - record in the database are added. Orphan records without a models file are deleted. - """ - ApiDependencies.invoker.services.model_install.sync_to_config() - return Response(status_code=204) - - -@model_records_router.put( - "/merge", - operation_id="merge", -) -async def merge( - keys: List[str] = Body(description="Keys for two to three models to merge", min_length=2, max_length=3), - merged_model_name: Optional[str] = Body(description="Name of destination model", default=None), - alpha: float = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5), - force: bool = Body( - description="Force merging of models created with different versions of diffusers", - default=False, - ), - interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method", default=None), - merge_dest_directory: Optional[str] = Body( - description="Save the merged model to the designated directory (with 'merged_model_name' appended)", - default=None, - ), -) -> AnyModelConfig: - """ - Merge diffusers models. - - keys: List of 2-3 model keys to merge together. All models must use the same base type. - merged_model_name: Name for the merged model [Concat model names] - alpha: Alpha value (0.0-1.0). Higher values give more weight to the second model [0.5] - force: If true, force the merge even if the models were generated by different versions of the diffusers library [False] - interp: Interpolation method. One of "weighted_sum", "sigmoid", "inv_sigmoid" or "add_difference" [weighted_sum] - merge_dest_directory: Specify a directory to store the merged model in [models directory] - """ - print(f"here i am, keys={keys}") - logger = ApiDependencies.invoker.services.logger - try: - logger.info(f"Merging models: {keys} into {merge_dest_directory or ''}/{merged_model_name}") - dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None - installer = ApiDependencies.invoker.services.model_install - merger = ModelMerger(installer) - model_names = [installer.record_store.get_model(x).name for x in keys] - response = merger.merge_diffusion_models_and_save( - model_keys=keys, - merged_model_name=merged_model_name or "+".join(model_names), - alpha=alpha, - interp=interp, - force=force, - merge_dest_directory=dest, - ) - except UnknownModelException: - raise HTTPException( - status_code=404, - detail=f"One or more of the models '{keys}' not found", - ) - except ValueError as e: - raise HTTPException(status_code=400, detail=str(e)) - return response diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py deleted file mode 100644 index 8f83820cf8..0000000000 --- a/invokeai/app/api/routers/models.py +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654), 2023 Kent Keirsey (https://github.com/hipsterusername), 2023 Lincoln D. Stein - -import pathlib -from typing import Annotated, List, Literal, Optional, Union - -from fastapi import Body, Path, Query, Response -from fastapi.routing import APIRouter -from pydantic import BaseModel, ConfigDict, Field, TypeAdapter -from starlette.exceptions import HTTPException - -from invokeai.backend import BaseModelType, ModelType -from invokeai.backend.model_management import MergeInterpolationMethod -from invokeai.backend.model_management.models import ( - OPENAPI_MODEL_CONFIGS, - InvalidModelException, - ModelNotFoundException, - SchedulerPredictionType, -) - -from ..dependencies import ApiDependencies - -models_router = APIRouter(prefix="/v1/models", tags=["models"]) - -UpdateModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -UpdateModelResponseValidator = TypeAdapter(UpdateModelResponse) - -ImportModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -ImportModelResponseValidator = TypeAdapter(ImportModelResponse) - -ConvertModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -ConvertModelResponseValidator = TypeAdapter(ConvertModelResponse) - -MergeModelResponse = Union[tuple(OPENAPI_MODEL_CONFIGS)] -ImportModelAttributes = Union[tuple(OPENAPI_MODEL_CONFIGS)] - - -class ModelsList(BaseModel): - models: list[Union[tuple(OPENAPI_MODEL_CONFIGS)]] - - model_config = ConfigDict(use_enum_values=True) - - -ModelsListValidator = TypeAdapter(ModelsList) - - -@models_router.get( - "/", - operation_id="list_models", - responses={200: {"model": ModelsList}}, -) -async def list_models( - base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"), - model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"), -) -> ModelsList: - """Gets a list of models""" - if base_models and len(base_models) > 0: - models_raw = [] - for base_model in base_models: - models_raw.extend(ApiDependencies.invoker.services.model_manager.list_models(base_model, model_type)) - else: - models_raw = ApiDependencies.invoker.services.model_manager.list_models(None, model_type) - models = ModelsListValidator.validate_python({"models": models_raw}) - return models - - -@models_router.patch( - "/{base_model}/{model_type}/{model_name}", - operation_id="update_model", - responses={ - 200: {"description": "The model was updated successfully"}, - 400: {"description": "Bad request"}, - 404: {"description": "The model could not be found"}, - 409: {"description": "There is already a model corresponding to the new name"}, - }, - status_code=200, - response_model=UpdateModelResponse, -) -async def update_model( - base_model: BaseModelType = Path(description="Base model"), - model_type: ModelType = Path(description="The type of model"), - model_name: str = Path(description="model name"), - info: Union[tuple(OPENAPI_MODEL_CONFIGS)] = Body(description="Model configuration"), -) -> UpdateModelResponse: - """Update model contents with a new config. If the model name or base fields are changed, then the model is renamed.""" - logger = ApiDependencies.invoker.services.logger - - try: - previous_info = ApiDependencies.invoker.services.model_manager.list_model( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ) - - # rename operation requested - if info.model_name != model_name or info.base_model != base_model: - ApiDependencies.invoker.services.model_manager.rename_model( - base_model=base_model, - model_type=model_type, - model_name=model_name, - new_name=info.model_name, - new_base=info.base_model, - ) - logger.info(f"Successfully renamed {base_model.value}/{model_name}=>{info.base_model}/{info.model_name}") - # update information to support an update of attributes - model_name = info.model_name - base_model = info.base_model - new_info = ApiDependencies.invoker.services.model_manager.list_model( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ) - if new_info.get("path") != previous_info.get( - "path" - ): # model manager moved model path during rename - don't overwrite it - info.path = new_info.get("path") - - # replace empty string values with None/null to avoid phenomenon of vae: '' - info_dict = info.model_dump() - info_dict = {x: info_dict[x] if info_dict[x] else None for x in info_dict.keys()} - - ApiDependencies.invoker.services.model_manager.update_model( - model_name=model_name, - base_model=base_model, - model_type=model_type, - model_attributes=info_dict, - ) - - model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ) - model_response = UpdateModelResponseValidator.validate_python(model_raw) - except ModelNotFoundException as e: - raise HTTPException(status_code=404, detail=str(e)) - except ValueError as e: - logger.error(str(e)) - raise HTTPException(status_code=409, detail=str(e)) - except Exception as e: - logger.error(str(e)) - raise HTTPException(status_code=400, detail=str(e)) - - return model_response - - -@models_router.post( - "/import", - operation_id="import_model", - responses={ - 201: {"description": "The model imported successfully"}, - 404: {"description": "The model could not be found"}, - 415: {"description": "Unrecognized file/folder format"}, - 424: {"description": "The model appeared to import successfully, but could not be found in the model manager"}, - 409: {"description": "There is already a model corresponding to this path or repo_id"}, - }, - status_code=201, - response_model=ImportModelResponse, -) -async def import_model( - location: str = Body(description="A model path, repo_id or URL to import"), - prediction_type: Optional[Literal["v_prediction", "epsilon", "sample"]] = Body( - description="Prediction type for SDv2 checkpoints and rare SDv1 checkpoints", - default=None, - ), -) -> ImportModelResponse: - """Add a model using its local path, repo_id, or remote URL. Model characteristics will be probed and configured automatically""" - - location = location.strip("\"' ") - items_to_import = {location} - prediction_types = {x.value: x for x in SchedulerPredictionType} - logger = ApiDependencies.invoker.services.logger - - try: - installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( - items_to_import=items_to_import, - prediction_type_helper=lambda x: prediction_types.get(prediction_type), - ) - info = installed_models.get(location) - - if not info: - logger.error("Import failed") - raise HTTPException(status_code=415) - - logger.info(f"Successfully imported {location}, got {info}") - model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name=info.name, base_model=info.base_model, model_type=info.model_type - ) - return ImportModelResponseValidator.validate_python(model_raw) - - except ModelNotFoundException as e: - logger.error(str(e)) - raise HTTPException(status_code=404, detail=str(e)) - except InvalidModelException as e: - logger.error(str(e)) - raise HTTPException(status_code=415) - except ValueError as e: - logger.error(str(e)) - raise HTTPException(status_code=409, detail=str(e)) - - -@models_router.post( - "/add", - operation_id="add_model", - responses={ - 201: {"description": "The model added successfully"}, - 404: {"description": "The model could not be found"}, - 424: {"description": "The model appeared to add successfully, but could not be found in the model manager"}, - 409: {"description": "There is already a model corresponding to this path or repo_id"}, - }, - status_code=201, - response_model=ImportModelResponse, -) -async def add_model( - info: Union[tuple(OPENAPI_MODEL_CONFIGS)] = Body(description="Model configuration"), -) -> ImportModelResponse: - """Add a model using the configuration information appropriate for its type. Only local models can be added by path""" - - logger = ApiDependencies.invoker.services.logger - - try: - ApiDependencies.invoker.services.model_manager.add_model( - info.model_name, - info.base_model, - info.model_type, - model_attributes=info.model_dump(), - ) - logger.info(f"Successfully added {info.model_name}") - model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name=info.model_name, - base_model=info.base_model, - model_type=info.model_type, - ) - return ImportModelResponseValidator.validate_python(model_raw) - except ModelNotFoundException as e: - logger.error(str(e)) - raise HTTPException(status_code=404, detail=str(e)) - except ValueError as e: - logger.error(str(e)) - raise HTTPException(status_code=409, detail=str(e)) - - -@models_router.delete( - "/{base_model}/{model_type}/{model_name}", - operation_id="del_model", - responses={ - 204: {"description": "Model deleted successfully"}, - 404: {"description": "Model not found"}, - }, - status_code=204, - response_model=None, -) -async def delete_model( - base_model: BaseModelType = Path(description="Base model"), - model_type: ModelType = Path(description="The type of model"), - model_name: str = Path(description="model name"), -) -> Response: - """Delete Model""" - logger = ApiDependencies.invoker.services.logger - - try: - ApiDependencies.invoker.services.model_manager.del_model( - model_name, base_model=base_model, model_type=model_type - ) - logger.info(f"Deleted model: {model_name}") - return Response(status_code=204) - except ModelNotFoundException as e: - logger.error(str(e)) - raise HTTPException(status_code=404, detail=str(e)) - - -@models_router.put( - "/convert/{base_model}/{model_type}/{model_name}", - operation_id="convert_model", - responses={ - 200: {"description": "Model converted successfully"}, - 400: {"description": "Bad request"}, - 404: {"description": "Model not found"}, - }, - status_code=200, - response_model=ConvertModelResponse, -) -async def convert_model( - base_model: BaseModelType = Path(description="Base model"), - model_type: ModelType = Path(description="The type of model"), - model_name: str = Path(description="model name"), - convert_dest_directory: Optional[str] = Query( - default=None, description="Save the converted model to the designated directory" - ), -) -> ConvertModelResponse: - """Convert a checkpoint model into a diffusers model, optionally saving to the indicated destination directory, or `models` if none.""" - logger = ApiDependencies.invoker.services.logger - try: - logger.info(f"Converting model: {model_name}") - dest = pathlib.Path(convert_dest_directory) if convert_dest_directory else None - ApiDependencies.invoker.services.model_manager.convert_model( - model_name, - base_model=base_model, - model_type=model_type, - convert_dest_directory=dest, - ) - model_raw = ApiDependencies.invoker.services.model_manager.list_model( - model_name, base_model=base_model, model_type=model_type - ) - response = ConvertModelResponseValidator.validate_python(model_raw) - except ModelNotFoundException as e: - raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found: {str(e)}") - except ValueError as e: - raise HTTPException(status_code=400, detail=str(e)) - return response - - -@models_router.get( - "/search", - operation_id="search_for_models", - responses={ - 200: {"description": "Directory searched successfully"}, - 404: {"description": "Invalid directory path"}, - }, - status_code=200, - response_model=List[pathlib.Path], -) -async def search_for_models( - search_path: pathlib.Path = Query(description="Directory path to search for models"), -) -> List[pathlib.Path]: - if not search_path.is_dir(): - raise HTTPException( - status_code=404, - detail=f"The search path '{search_path}' does not exist or is not directory", - ) - return ApiDependencies.invoker.services.model_manager.search_for_models(search_path) - - -@models_router.get( - "/ckpt_confs", - operation_id="list_ckpt_configs", - responses={ - 200: {"description": "paths retrieved successfully"}, - }, - status_code=200, - response_model=List[pathlib.Path], -) -async def list_ckpt_configs() -> List[pathlib.Path]: - """Return a list of the legacy checkpoint configuration files stored in `ROOT/configs/stable-diffusion`, relative to ROOT.""" - return ApiDependencies.invoker.services.model_manager.list_checkpoint_configs() - - -@models_router.post( - "/sync", - operation_id="sync_to_config", - responses={ - 201: {"description": "synchronization successful"}, - }, - status_code=201, - response_model=bool, -) -async def sync_to_config() -> bool: - """Call after making changes to models.yaml, autoimport directories or models directory to synchronize - in-memory data structures with disk data structures.""" - ApiDependencies.invoker.services.model_manager.sync_to_config() - return True - - -# There's some weird pydantic-fastapi behaviour that requires this to be a separate class -# TODO: After a few updates, see if it works inside the route operation handler? -class MergeModelsBody(BaseModel): - model_names: List[str] = Field(description="model name", min_length=2, max_length=3) - merged_model_name: Optional[str] = Field(description="Name of destination model") - alpha: Optional[float] = Field(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5) - interp: Optional[MergeInterpolationMethod] = Field(description="Interpolation method") - force: Optional[bool] = Field( - description="Force merging of models created with different versions of diffusers", - default=False, - ) - - merge_dest_directory: Optional[str] = Field( - description="Save the merged model to the designated directory (with 'merged_model_name' appended)", - default=None, - ) - - model_config = ConfigDict(protected_namespaces=()) - - -@models_router.put( - "/merge/{base_model}", - operation_id="merge_models", - responses={ - 200: {"description": "Model converted successfully"}, - 400: {"description": "Incompatible models"}, - 404: {"description": "One or more models not found"}, - }, - status_code=200, - response_model=MergeModelResponse, -) -async def merge_models( - body: Annotated[MergeModelsBody, Body(description="Model configuration", embed=True)], - base_model: BaseModelType = Path(description="Base model"), -) -> MergeModelResponse: - """Convert a checkpoint model into a diffusers model""" - logger = ApiDependencies.invoker.services.logger - try: - logger.info( - f"Merging models: {body.model_names} into {body.merge_dest_directory or ''}/{body.merged_model_name}" - ) - dest = pathlib.Path(body.merge_dest_directory) if body.merge_dest_directory else None - result = ApiDependencies.invoker.services.model_manager.merge_models( - model_names=body.model_names, - base_model=base_model, - merged_model_name=body.merged_model_name or "+".join(body.model_names), - alpha=body.alpha, - interp=body.interp, - force=body.force, - merge_dest_directory=dest, - ) - model_raw = ApiDependencies.invoker.services.model_manager.list_model( - result.name, - base_model=base_model, - model_type=ModelType.Main, - ) - response = ConvertModelResponseValidator.validate_python(model_raw) - except ModelNotFoundException: - raise HTTPException( - status_code=404, - detail=f"One or more of the models '{body.model_names}' not found", - ) - except ValueError as e: - raise HTTPException(status_code=400, detail=str(e)) - return response diff --git a/invokeai/app/api/routers/sessions.py b/invokeai/app/api/routers/sessions.py deleted file mode 100644 index fb850d0b2b..0000000000 --- a/invokeai/app/api/routers/sessions.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - - -from fastapi import HTTPException, Path -from fastapi.routing import APIRouter - -from ...services.shared.graph import GraphExecutionState -from ..dependencies import ApiDependencies - -session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"]) - - -# @session_router.post( -# "/", -# operation_id="create_session", -# responses={ -# 200: {"model": GraphExecutionState}, -# 400: {"description": "Invalid json"}, -# }, -# deprecated=True, -# ) -# async def create_session( -# queue_id: str = Query(default="", description="The id of the queue to associate the session with"), -# graph: Optional[Graph] = Body(default=None, description="The graph to initialize the session with"), -# ) -> GraphExecutionState: -# """Creates a new session, optionally initializing it with an invocation graph""" -# session = ApiDependencies.invoker.create_execution_state(queue_id=queue_id, graph=graph) -# return session - - -# @session_router.get( -# "/", -# operation_id="list_sessions", -# responses={200: {"model": PaginatedResults[GraphExecutionState]}}, -# deprecated=True, -# ) -# async def list_sessions( -# page: int = Query(default=0, description="The page of results to get"), -# per_page: int = Query(default=10, description="The number of results per page"), -# query: str = Query(default="", description="The query string to search for"), -# ) -> PaginatedResults[GraphExecutionState]: -# """Gets a list of sessions, optionally searching""" -# if query == "": -# result = ApiDependencies.invoker.services.graph_execution_manager.list(page, per_page) -# else: -# result = ApiDependencies.invoker.services.graph_execution_manager.search(query, page, per_page) -# return result - - -@session_router.get( - "/{session_id}", - operation_id="get_session", - responses={ - 200: {"model": GraphExecutionState}, - 404: {"description": "Session not found"}, - }, -) -async def get_session( - session_id: str = Path(description="The id of the session to get"), -) -> GraphExecutionState: - """Gets a session""" - session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) - if session is None: - raise HTTPException(status_code=404) - else: - return session - - -# @session_router.post( -# "/{session_id}/nodes", -# operation_id="add_node", -# responses={ -# 200: {"model": str}, -# 400: {"description": "Invalid node or link"}, -# 404: {"description": "Session not found"}, -# }, -# deprecated=True, -# ) -# async def add_node( -# session_id: str = Path(description="The id of the session"), -# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore -# description="The node to add" -# ), -# ) -> str: -# """Adds a node to the graph""" -# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) -# if session is None: -# raise HTTPException(status_code=404) - -# try: -# session.add_node(node) -# ApiDependencies.invoker.services.graph_execution_manager.set( -# session -# ) # TODO: can this be done automatically, or add node through an API? -# return session.id -# except NodeAlreadyExecutedError: -# raise HTTPException(status_code=400) -# except IndexError: -# raise HTTPException(status_code=400) - - -# @session_router.put( -# "/{session_id}/nodes/{node_path}", -# operation_id="update_node", -# responses={ -# 200: {"model": GraphExecutionState}, -# 400: {"description": "Invalid node or link"}, -# 404: {"description": "Session not found"}, -# }, -# deprecated=True, -# ) -# async def update_node( -# session_id: str = Path(description="The id of the session"), -# node_path: str = Path(description="The path to the node in the graph"), -# node: Annotated[Union[BaseInvocation.get_invocations()], Field(discriminator="type")] = Body( # type: ignore -# description="The new node" -# ), -# ) -> GraphExecutionState: -# """Updates a node in the graph and removes all linked edges""" -# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) -# if session is None: -# raise HTTPException(status_code=404) - -# try: -# session.update_node(node_path, node) -# ApiDependencies.invoker.services.graph_execution_manager.set( -# session -# ) # TODO: can this be done automatically, or add node through an API? -# return session -# except NodeAlreadyExecutedError: -# raise HTTPException(status_code=400) -# except IndexError: -# raise HTTPException(status_code=400) - - -# @session_router.delete( -# "/{session_id}/nodes/{node_path}", -# operation_id="delete_node", -# responses={ -# 200: {"model": GraphExecutionState}, -# 400: {"description": "Invalid node or link"}, -# 404: {"description": "Session not found"}, -# }, -# deprecated=True, -# ) -# async def delete_node( -# session_id: str = Path(description="The id of the session"), -# node_path: str = Path(description="The path to the node to delete"), -# ) -> GraphExecutionState: -# """Deletes a node in the graph and removes all linked edges""" -# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) -# if session is None: -# raise HTTPException(status_code=404) - -# try: -# session.delete_node(node_path) -# ApiDependencies.invoker.services.graph_execution_manager.set( -# session -# ) # TODO: can this be done automatically, or add node through an API? -# return session -# except NodeAlreadyExecutedError: -# raise HTTPException(status_code=400) -# except IndexError: -# raise HTTPException(status_code=400) - - -# @session_router.post( -# "/{session_id}/edges", -# operation_id="add_edge", -# responses={ -# 200: {"model": GraphExecutionState}, -# 400: {"description": "Invalid node or link"}, -# 404: {"description": "Session not found"}, -# }, -# deprecated=True, -# ) -# async def add_edge( -# session_id: str = Path(description="The id of the session"), -# edge: Edge = Body(description="The edge to add"), -# ) -> GraphExecutionState: -# """Adds an edge to the graph""" -# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) -# if session is None: -# raise HTTPException(status_code=404) - -# try: -# session.add_edge(edge) -# ApiDependencies.invoker.services.graph_execution_manager.set( -# session -# ) # TODO: can this be done automatically, or add node through an API? -# return session -# except NodeAlreadyExecutedError: -# raise HTTPException(status_code=400) -# except IndexError: -# raise HTTPException(status_code=400) - - -# # TODO: the edge being in the path here is really ugly, find a better solution -# @session_router.delete( -# "/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}", -# operation_id="delete_edge", -# responses={ -# 200: {"model": GraphExecutionState}, -# 400: {"description": "Invalid node or link"}, -# 404: {"description": "Session not found"}, -# }, -# deprecated=True, -# ) -# async def delete_edge( -# session_id: str = Path(description="The id of the session"), -# from_node_id: str = Path(description="The id of the node the edge is coming from"), -# from_field: str = Path(description="The field of the node the edge is coming from"), -# to_node_id: str = Path(description="The id of the node the edge is going to"), -# to_field: str = Path(description="The field of the node the edge is going to"), -# ) -> GraphExecutionState: -# """Deletes an edge from the graph""" -# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) -# if session is None: -# raise HTTPException(status_code=404) - -# try: -# edge = Edge( -# source=EdgeConnection(node_id=from_node_id, field=from_field), -# destination=EdgeConnection(node_id=to_node_id, field=to_field), -# ) -# session.delete_edge(edge) -# ApiDependencies.invoker.services.graph_execution_manager.set( -# session -# ) # TODO: can this be done automatically, or add node through an API? -# return session -# except NodeAlreadyExecutedError: -# raise HTTPException(status_code=400) -# except IndexError: -# raise HTTPException(status_code=400) - - -# @session_router.put( -# "/{session_id}/invoke", -# operation_id="invoke_session", -# responses={ -# 200: {"model": None}, -# 202: {"description": "The invocation is queued"}, -# 400: {"description": "The session has no invocations ready to invoke"}, -# 404: {"description": "Session not found"}, -# }, -# deprecated=True, -# ) -# async def invoke_session( -# queue_id: str = Query(description="The id of the queue to associate the session with"), -# session_id: str = Path(description="The id of the session to invoke"), -# all: bool = Query(default=False, description="Whether or not to invoke all remaining invocations"), -# ) -> Response: -# """Invokes a session""" -# session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id) -# if session is None: -# raise HTTPException(status_code=404) - -# if session.is_complete(): -# raise HTTPException(status_code=400) - -# ApiDependencies.invoker.invoke(queue_id, session, invoke_all=all) -# return Response(status_code=202) - - -# @session_router.delete( -# "/{session_id}/invoke", -# operation_id="cancel_session_invoke", -# responses={202: {"description": "The invocation is canceled"}}, -# deprecated=True, -# ) -# async def cancel_session_invoke( -# session_id: str = Path(description="The id of the session to cancel"), -# ) -> Response: -# """Invokes a session""" -# ApiDependencies.invoker.cancel(session_id) -# return Response(status_code=202) diff --git a/invokeai/app/api/sockets.py b/invokeai/app/api/sockets.py index e651e43559..463545d9bc 100644 --- a/invokeai/app/api/sockets.py +++ b/invokeai/app/api/sockets.py @@ -12,16 +12,26 @@ class SocketIO: __sio: AsyncServer __app: ASGIApp + __sub_queue: str = "subscribe_queue" + __unsub_queue: str = "unsubscribe_queue" + + __sub_bulk_download: str = "subscribe_bulk_download" + __unsub_bulk_download: str = "unsubscribe_bulk_download" + def __init__(self, app: FastAPI): self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*") self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io") app.mount("/ws", self.__app) - self.__sio.on("subscribe_queue", handler=self._handle_sub_queue) - self.__sio.on("unsubscribe_queue", handler=self._handle_unsub_queue) + self.__sio.on(self.__sub_queue, handler=self._handle_sub_queue) + self.__sio.on(self.__unsub_queue, handler=self._handle_unsub_queue) local_handler.register(event_name=EventServiceBase.queue_event, _func=self._handle_queue_event) local_handler.register(event_name=EventServiceBase.model_event, _func=self._handle_model_event) + self.__sio.on(self.__sub_bulk_download, handler=self._handle_sub_bulk_download) + self.__sio.on(self.__unsub_bulk_download, handler=self._handle_unsub_bulk_download) + local_handler.register(event_name=EventServiceBase.bulk_download_event, _func=self._handle_bulk_download_event) + async def _handle_queue_event(self, event: Event): await self.__sio.emit( event=event[1]["event"], @@ -39,3 +49,18 @@ class SocketIO: async def _handle_model_event(self, event: Event) -> None: await self.__sio.emit(event=event[1]["event"], data=event[1]["data"]) + + async def _handle_bulk_download_event(self, event: Event): + await self.__sio.emit( + event=event[1]["event"], + data=event[1]["data"], + room=event[1]["data"]["bulk_download_id"], + ) + + async def _handle_sub_bulk_download(self, sid, data, *args, **kwargs): + if "bulk_download_id" in data: + await self.__sio.enter_room(sid, data["bulk_download_id"]) + + async def _handle_unsub_bulk_download(self, sid, data, *args, **kwargs): + if "bulk_download_id" in data: + await self.__sio.leave_room(sid, data["bulk_download_id"]) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 6294083d0e..5fdf55d1ef 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -2,10 +2,12 @@ # which are imported/used before parse_args() is called will get the default config values instead of the # values from the command line or config file. import sys +from contextlib import asynccontextmanager from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles from invokeai.version.invokeai_version import __version__ +from .invocations.fields import InputFieldJSONSchemaExtra, OutputFieldJSONSchemaExtra from .services.config import InvokeAIAppConfig app_config = InvokeAIAppConfig.get_config() @@ -47,18 +49,14 @@ if True: # hack to make flake8 happy with imports coming after setting up the c boards, download_queue, images, - model_records, - models, + model_manager, session_queue, - sessions, utilities, workflows, ) from .api.sockets import SocketIO from .invocations.baseinvocation import ( BaseInvocation, - InputFieldJSONSchemaExtra, - OutputFieldJSONSchemaExtra, UIConfigBase, ) @@ -74,9 +72,25 @@ logger = InvokeAILogger.get_logger(config=app_config) mimetypes.add_type("application/javascript", ".js") mimetypes.add_type("text/css", ".css") + +@asynccontextmanager +async def lifespan(app: FastAPI): + # Add startup event to load dependencies + ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger) + yield + # Shut down threads + ApiDependencies.shutdown() + + # Create the app # TODO: create this all in a method so configuration/etc. can be passed in? -app = FastAPI(title="Invoke - Community Edition", docs_url=None, redoc_url=None, separate_input_output_schemas=False) +app = FastAPI( + title="Invoke - Community Edition", + docs_url=None, + redoc_url=None, + separate_input_output_schemas=False, + lifespan=lifespan, +) # Add event handler event_handler_id: int = id(app) @@ -99,24 +113,9 @@ app.add_middleware( app.add_middleware(GZipMiddleware, minimum_size=1000) -# Add startup event to load dependencies -@app.on_event("startup") -async def startup_event() -> None: - ApiDependencies.initialize(config=app_config, event_handler_id=event_handler_id, logger=logger) - - -# Shut down threads -@app.on_event("shutdown") -async def shutdown_event() -> None: - ApiDependencies.shutdown() - - # Include all routers -app.include_router(sessions.session_router, prefix="/api") - app.include_router(utilities.utilities_router, prefix="/api") -app.include_router(models.models_router, prefix="/api") -app.include_router(model_records.model_records_router, prefix="/api") +app.include_router(model_manager.model_manager_router, prefix="/api") app.include_router(download_queue.download_queue_router, prefix="/api") app.include_router(images.images_router, prefix="/api") app.include_router(boards.boards_router, prefix="/api") @@ -154,6 +153,8 @@ def custom_openapi() -> dict[str, Any]: # TODO: note that we assume the schema_key here is the TYPE.__name__ # This could break in some cases, figure out a better way to do it output_type_titles[schema_key] = output_schema["title"] + openapi_schema["components"]["schemas"][schema_key] = output_schema + openapi_schema["components"]["schemas"][schema_key]["class"] = "output" # Add Node Editor UI helper schemas ui_config_schemas = models_json_schema( @@ -176,23 +177,24 @@ def custom_openapi() -> dict[str, Any]: outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"} invoker_schema["output"] = outputs_ref invoker_schema["class"] = "invocation" - openapi_schema["components"]["schemas"][f"{output_type_title}"]["class"] = "output" - from invokeai.backend.model_management.models import get_model_config_enums + # This code no longer seems to be necessary? + # Leave it here just in case + # + # from invokeai.backend.model_manager import get_model_config_formats + # formats = get_model_config_formats() + # for model_config_name, enum_set in formats.items(): - for model_config_format_enum in set(get_model_config_enums()): - name = model_config_format_enum.__qualname__ + # if model_config_name in openapi_schema["components"]["schemas"]: + # # print(f"Config with name {name} already defined") + # continue - if name in openapi_schema["components"]["schemas"]: - # print(f"Config with name {name} already defined") - continue - - openapi_schema["components"]["schemas"][name] = { - "title": name, - "description": "An enumeration.", - "type": "string", - "enum": [v.value for v in model_config_format_enum], - } + # openapi_schema["components"]["schemas"][model_config_name] = { + # "title": model_config_name, + # "description": "An enumeration.", + # "type": "string", + # "enum": [v.value for v in enum_set], + # } app.openapi_schema = openapi_schema return app.openapi_schema diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index d9e0c7ba0d..5edae5342d 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -8,17 +8,33 @@ import warnings from abc import ABC, abstractmethod from enum import Enum from inspect import signature -from types import UnionType -from typing import TYPE_CHECKING, Any, Callable, ClassVar, Iterable, Literal, Optional, Type, TypeVar, Union, cast +from typing import ( + TYPE_CHECKING, + Annotated, + Any, + Callable, + ClassVar, + Iterable, + Literal, + Optional, + Type, + TypeVar, + Union, + cast, +) import semver -from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter, create_model -from pydantic.fields import FieldInfo, _Unset +from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model +from pydantic.fields import FieldInfo from pydantic_core import PydanticUndefined +from typing_extensions import TypeAliasType +from invokeai.app.invocations.fields import ( + FieldKind, + Input, +) from invokeai.app.services.config.config_default import InvokeAIAppConfig -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.metaenum import MetaEnum from invokeai.app.util.misc import uuid_string from invokeai.backend.util.logging import InvokeAILogger @@ -52,393 +68,6 @@ class Classification(str, Enum, metaclass=MetaEnum): Prototype = "prototype" -class Input(str, Enum, metaclass=MetaEnum): - """ - The type of input a field accepts. - - `Input.Direct`: The field must have its value provided directly, when the invocation and field \ - are instantiated. - - `Input.Connection`: The field must have its value provided by a connection. - - `Input.Any`: The field may have its value provided either directly or by a connection. - """ - - Connection = "connection" - Direct = "direct" - Any = "any" - - -class FieldKind(str, Enum, metaclass=MetaEnum): - """ - The kind of field. - - `Input`: An input field on a node. - - `Output`: An output field on a node. - - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is - one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name - "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, - allowing "metadata" for that field. - - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, - but which are used to store information about the node. For example, the `id` and `type` fields are node - attributes. - - The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app - startup, and when generating the OpenAPI schema for the workflow editor. - """ - - Input = "input" - Output = "output" - Internal = "internal" - NodeAttribute = "node_attribute" - - -class UIType(str, Enum, metaclass=MetaEnum): - """ - Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. - - - Model Fields - The most common node-author-facing use will be for model fields. Internally, there is no difference - between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the - base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that - the field is an SDXL main model field. - - - Any Field - We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to - indicate that the field accepts any type. Use with caution. This cannot be used on outputs. - - - Scheduler Field - Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. - - - Internal Fields - Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate - handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These - should not be used by node authors. - - - DEPRECATED Fields - These types are deprecated and should not be used by node authors. A warning will be logged if one is - used, and the type will be ignored. They are included here for backwards compatibility. - """ - - # region Model Field Types - SDXLMainModel = "SDXLMainModelField" - SDXLRefinerModel = "SDXLRefinerModelField" - ONNXModel = "ONNXModelField" - VaeModel = "VAEModelField" - LoRAModel = "LoRAModelField" - ControlNetModel = "ControlNetModelField" - IPAdapterModel = "IPAdapterModelField" - # endregion - - # region Misc Field Types - Scheduler = "SchedulerField" - Any = "AnyField" - # endregion - - # region Internal Field Types - _Collection = "CollectionField" - _CollectionItem = "CollectionItemField" - # endregion - - # region DEPRECATED - Boolean = "DEPRECATED_Boolean" - Color = "DEPRECATED_Color" - Conditioning = "DEPRECATED_Conditioning" - Control = "DEPRECATED_Control" - Float = "DEPRECATED_Float" - Image = "DEPRECATED_Image" - Integer = "DEPRECATED_Integer" - Latents = "DEPRECATED_Latents" - String = "DEPRECATED_String" - BooleanCollection = "DEPRECATED_BooleanCollection" - ColorCollection = "DEPRECATED_ColorCollection" - ConditioningCollection = "DEPRECATED_ConditioningCollection" - ControlCollection = "DEPRECATED_ControlCollection" - FloatCollection = "DEPRECATED_FloatCollection" - ImageCollection = "DEPRECATED_ImageCollection" - IntegerCollection = "DEPRECATED_IntegerCollection" - LatentsCollection = "DEPRECATED_LatentsCollection" - StringCollection = "DEPRECATED_StringCollection" - BooleanPolymorphic = "DEPRECATED_BooleanPolymorphic" - ColorPolymorphic = "DEPRECATED_ColorPolymorphic" - ConditioningPolymorphic = "DEPRECATED_ConditioningPolymorphic" - ControlPolymorphic = "DEPRECATED_ControlPolymorphic" - FloatPolymorphic = "DEPRECATED_FloatPolymorphic" - ImagePolymorphic = "DEPRECATED_ImagePolymorphic" - IntegerPolymorphic = "DEPRECATED_IntegerPolymorphic" - LatentsPolymorphic = "DEPRECATED_LatentsPolymorphic" - StringPolymorphic = "DEPRECATED_StringPolymorphic" - MainModel = "DEPRECATED_MainModel" - UNet = "DEPRECATED_UNet" - Vae = "DEPRECATED_Vae" - CLIP = "DEPRECATED_CLIP" - Collection = "DEPRECATED_Collection" - CollectionItem = "DEPRECATED_CollectionItem" - Enum = "DEPRECATED_Enum" - WorkflowField = "DEPRECATED_WorkflowField" - IsIntermediate = "DEPRECATED_IsIntermediate" - BoardField = "DEPRECATED_BoardField" - MetadataItem = "DEPRECATED_MetadataItem" - MetadataItemCollection = "DEPRECATED_MetadataItemCollection" - MetadataItemPolymorphic = "DEPRECATED_MetadataItemPolymorphic" - MetadataDict = "DEPRECATED_MetadataDict" - # endregion - - -class UIComponent(str, Enum, metaclass=MetaEnum): - """ - The type of UI component to use for a field, used to override the default components, which are - inferred from the field type. - """ - - None_ = "none" - Textarea = "textarea" - Slider = "slider" - - -class InputFieldJSONSchemaExtra(BaseModel): - """ - Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, - and by the workflow editor during schema parsing and UI rendering. - """ - - input: Input - orig_required: bool - field_kind: FieldKind - default: Optional[Any] = None - orig_default: Optional[Any] = None - ui_hidden: bool = False - ui_type: Optional[UIType] = None - ui_component: Optional[UIComponent] = None - ui_order: Optional[int] = None - ui_choice_labels: Optional[dict[str, str]] = None - - model_config = ConfigDict( - validate_assignment=True, - json_schema_serialization_defaults_required=True, - ) - - -class OutputFieldJSONSchemaExtra(BaseModel): - """ - Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor - during schema parsing and UI rendering. - """ - - field_kind: FieldKind - ui_hidden: bool - ui_type: Optional[UIType] - ui_order: Optional[int] - - model_config = ConfigDict( - validate_assignment=True, - json_schema_serialization_defaults_required=True, - ) - - -def InputField( - # copied from pydantic's Field - # TODO: Can we support default_factory? - default: Any = _Unset, - default_factory: Callable[[], Any] | None = _Unset, - title: str | None = _Unset, - description: str | None = _Unset, - pattern: str | None = _Unset, - strict: bool | None = _Unset, - gt: float | None = _Unset, - ge: float | None = _Unset, - lt: float | None = _Unset, - le: float | None = _Unset, - multiple_of: float | None = _Unset, - allow_inf_nan: bool | None = _Unset, - max_digits: int | None = _Unset, - decimal_places: int | None = _Unset, - min_length: int | None = _Unset, - max_length: int | None = _Unset, - # custom - input: Input = Input.Any, - ui_type: Optional[UIType] = None, - ui_component: Optional[UIComponent] = None, - ui_hidden: bool = False, - ui_order: Optional[int] = None, - ui_choice_labels: Optional[dict[str, str]] = None, -) -> Any: - """ - Creates an input field for an invocation. - - This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field) \ - that adds a few extra parameters to support graph execution and the node editor UI. - - :param Input input: [Input.Any] The kind of input this field requires. \ - `Input.Direct` means a value must be provided on instantiation. \ - `Input.Connection` means the value must be provided by a connection. \ - `Input.Any` means either will do. - - :param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \ - In some situations, the field's type is not enough to infer the correct UI type. \ - For example, model selection fields should render a dropdown UI component to select a model. \ - Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \ - `MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \ - `UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field. - - :param UIComponent ui_component: [None] Optionally specifies a specific component to use in the UI. \ - The UI will always render a suitable component, but sometimes you want something different than the default. \ - For example, a `string` field will default to a single-line input, but you may want a multi-line textarea instead. \ - For this case, you could provide `UIComponent.Textarea`. - - :param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. - - :param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. - - :param dict[str, str] ui_choice_labels: [None] Specifies the labels to use for the choices in an enum field. - """ - - json_schema_extra_ = InputFieldJSONSchemaExtra( - input=input, - ui_type=ui_type, - ui_component=ui_component, - ui_hidden=ui_hidden, - ui_order=ui_order, - ui_choice_labels=ui_choice_labels, - field_kind=FieldKind.Input, - orig_required=True, - ) - - """ - There is a conflict between the typing of invocation definitions and the typing of an invocation's - `invoke()` function. - - On instantiation of a node, the invocation definition is used to create the python class. At this time, - any number of fields may be optional, because they may be provided by connections. - - On calling of `invoke()`, however, those fields may be required. - - For example, consider an ResizeImageInvocation with an `image: ImageField` field. - - `image` is required during the call to `invoke()`, but when the python class is instantiated, - the field may not be present. This is fine, because that image field will be provided by a - connection from an ancestor node, which outputs an image. - - This means we want to type the `image` field as optional for the node class definition, but required - for the `invoke()` function. - - If we use `typing.Optional` in the node class definition, the field will be typed as optional in the - `invoke()` method, and we'll have to do a lot of runtime checks to ensure the field is present - or - any static type analysis tools will complain. - - To get around this, in node class definitions, we type all fields correctly for the `invoke()` function, - but secretly make them optional in `InputField()`. We also store the original required bool and/or default - value. When we call `invoke()`, we use this stored information to do an additional check on the class. - """ - - if default_factory is not _Unset and default_factory is not None: - default = default_factory() - logger.warn('"default_factory" is not supported, calling it now to set "default"') - - # These are the args we may wish pass to the pydantic `Field()` function - field_args = { - "default": default, - "title": title, - "description": description, - "pattern": pattern, - "strict": strict, - "gt": gt, - "ge": ge, - "lt": lt, - "le": le, - "multiple_of": multiple_of, - "allow_inf_nan": allow_inf_nan, - "max_digits": max_digits, - "decimal_places": decimal_places, - "min_length": min_length, - "max_length": max_length, - } - - # We only want to pass the args that were provided, otherwise the `Field()`` function won't work as expected - provided_args = {k: v for (k, v) in field_args.items() if v is not PydanticUndefined} - - # Because we are manually making fields optional, we need to store the original required bool for reference later - json_schema_extra_.orig_required = default is PydanticUndefined - - # Make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one - if input is Input.Any or input is Input.Connection: - default_ = None if default is PydanticUndefined else default - provided_args.update({"default": default_}) - if default is not PydanticUndefined: - # Before invoking, we'll check for the original default value and set it on the field if the field has no value - json_schema_extra_.default = default - json_schema_extra_.orig_default = default - elif default is not PydanticUndefined: - default_ = default - provided_args.update({"default": default_}) - json_schema_extra_.orig_default = default_ - - return Field( - **provided_args, - json_schema_extra=json_schema_extra_.model_dump(exclude_none=True), - ) - - -def OutputField( - # copied from pydantic's Field - default: Any = _Unset, - title: str | None = _Unset, - description: str | None = _Unset, - pattern: str | None = _Unset, - strict: bool | None = _Unset, - gt: float | None = _Unset, - ge: float | None = _Unset, - lt: float | None = _Unset, - le: float | None = _Unset, - multiple_of: float | None = _Unset, - allow_inf_nan: bool | None = _Unset, - max_digits: int | None = _Unset, - decimal_places: int | None = _Unset, - min_length: int | None = _Unset, - max_length: int | None = _Unset, - # custom - ui_type: Optional[UIType] = None, - ui_hidden: bool = False, - ui_order: Optional[int] = None, -) -> Any: - """ - Creates an output field for an invocation output. - - This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \ - that adds a few extra parameters to support graph execution and the node editor UI. - - :param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \ - In some situations, the field's type is not enough to infer the correct UI type. \ - For example, model selection fields should render a dropdown UI component to select a model. \ - Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \ - `MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \ - `UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field. - - :param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \ - - :param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ - """ - return Field( - default=default, - title=title, - description=description, - pattern=pattern, - strict=strict, - gt=gt, - ge=ge, - lt=lt, - le=le, - multiple_of=multiple_of, - allow_inf_nan=allow_inf_nan, - max_digits=max_digits, - decimal_places=decimal_places, - min_length=min_length, - max_length=max_length, - json_schema_extra=OutputFieldJSONSchemaExtra( - ui_type=ui_type, - ui_hidden=ui_hidden, - ui_order=ui_order, - field_kind=FieldKind.Output, - ).model_dump(exclude_none=True), - ) - - class UIConfigBase(BaseModel): """ Provides additional node configuration to the UI. @@ -460,33 +89,6 @@ class UIConfigBase(BaseModel): ) -class InvocationContext: - """Initialized and provided to on execution of invocations.""" - - services: InvocationServices - graph_execution_state_id: str - queue_id: str - queue_item_id: int - queue_batch_id: str - workflow: Optional[WorkflowWithoutID] - - def __init__( - self, - services: InvocationServices, - queue_id: str, - queue_item_id: int, - queue_batch_id: str, - graph_execution_state_id: str, - workflow: Optional[WorkflowWithoutID], - ): - self.services = services - self.graph_execution_state_id = graph_execution_state_id - self.queue_id = queue_id - self.queue_item_id = queue_item_id - self.queue_batch_id = queue_batch_id - self.workflow = workflow - - class BaseInvocationOutput(BaseModel): """ Base class for all invocation outputs. @@ -495,6 +97,7 @@ class BaseInvocationOutput(BaseModel): """ _output_classes: ClassVar[set[BaseInvocationOutput]] = set() + _typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None @classmethod def register_output(cls, output: BaseInvocationOutput) -> None: @@ -507,10 +110,14 @@ class BaseInvocationOutput(BaseModel): return cls._output_classes @classmethod - def get_outputs_union(cls) -> UnionType: - """Gets a union of all invocation outputs.""" - outputs_union = Union[tuple(cls._output_classes)] # type: ignore [valid-type] - return outputs_union # type: ignore [return-value] + def get_typeadapter(cls) -> TypeAdapter[Any]: + """Gets a pydantc TypeAdapter for the union of all invocation output types.""" + if not cls._typeadapter: + InvocationOutputsUnion = TypeAliasType( + "InvocationOutputsUnion", Annotated[Union[tuple(cls._output_classes)], Field(discriminator="type")] + ) + cls._typeadapter = TypeAdapter(InvocationOutputsUnion) + return cls._typeadapter @classmethod def get_output_types(cls) -> Iterable[str]: @@ -559,6 +166,7 @@ class BaseInvocation(ABC, BaseModel): """ _invocation_classes: ClassVar[set[BaseInvocation]] = set() + _typeadapter: ClassVar[Optional[TypeAdapter[Any]]] = None @classmethod def get_type(cls) -> str: @@ -571,10 +179,14 @@ class BaseInvocation(ABC, BaseModel): cls._invocation_classes.add(invocation) @classmethod - def get_invocations_union(cls) -> UnionType: - """Gets a union of all invocation types.""" - invocations_union = Union[tuple(cls._invocation_classes)] # type: ignore [valid-type] - return invocations_union # type: ignore [return-value] + def get_typeadapter(cls) -> TypeAdapter[Any]: + """Gets a pydantc TypeAdapter for the union of all invocation types.""" + if not cls._typeadapter: + InvocationsUnion = TypeAliasType( + "InvocationsUnion", Annotated[Union[tuple(cls._invocation_classes)], Field(discriminator="type")] + ) + cls._typeadapter = TypeAdapter(InvocationsUnion) + return cls._typeadapter @classmethod def get_invocations(cls) -> Iterable[BaseInvocation]: @@ -632,7 +244,7 @@ class BaseInvocation(ABC, BaseModel): """Invoke with provided context and return outputs.""" pass - def invoke_internal(self, context: InvocationContext) -> BaseInvocationOutput: + def invoke_internal(self, context: InvocationContext, services: "InvocationServices") -> BaseInvocationOutput: """ Internal invoke method, calls `invoke()` after some prep. Handles optional fields that are required to call `invoke()` and invocation cache. @@ -657,23 +269,23 @@ class BaseInvocation(ABC, BaseModel): raise MissingInputException(self.model_fields["type"].default, field_name) # skip node cache codepath if it's disabled - if context.services.configuration.node_cache_size == 0: + if services.configuration.node_cache_size == 0: return self.invoke(context) output: BaseInvocationOutput if self.use_cache: - key = context.services.invocation_cache.create_key(self) - cached_value = context.services.invocation_cache.get(key) + key = services.invocation_cache.create_key(self) + cached_value = services.invocation_cache.get(key) if cached_value is None: - context.services.logger.debug(f'Invocation cache miss for type "{self.get_type()}": {self.id}') + services.logger.debug(f'Invocation cache miss for type "{self.get_type()}": {self.id}') output = self.invoke(context) - context.services.invocation_cache.save(key, output) + services.invocation_cache.save(key, output) return output else: - context.services.logger.debug(f'Invocation cache hit for type "{self.get_type()}": {self.id}') + services.logger.debug(f'Invocation cache hit for type "{self.get_type()}": {self.id}') return cached_value else: - context.services.logger.debug(f'Skipping invocation cache for "{self.get_type()}": {self.id}') + services.logger.debug(f'Skipping invocation cache for "{self.get_type()}": {self.id}') return self.invoke(context) id: str = Field( @@ -714,9 +326,7 @@ RESERVED_NODE_ATTRIBUTE_FIELD_NAMES = { "workflow", } -RESERVED_INPUT_FIELD_NAMES = { - "metadata", -} +RESERVED_INPUT_FIELD_NAMES = {"metadata", "board"} RESERVED_OUTPUT_FIELD_NAMES = {"type"} @@ -926,37 +536,3 @@ def invocation_output( return cls return wrapper - - -class MetadataField(RootModel): - """ - Pydantic model for metadata with custom root of type dict[str, Any]. - Metadata is stored without a strict schema. - """ - - root: dict[str, Any] = Field(description="The metadata") - - -MetadataFieldValidator = TypeAdapter(MetadataField) - - -class WithMetadata(BaseModel): - metadata: Optional[MetadataField] = Field( - default=None, - description=FieldDescriptions.metadata, - json_schema_extra=InputFieldJSONSchemaExtra( - field_kind=FieldKind.Internal, - input=Input.Connection, - orig_required=False, - ).model_dump(exclude_none=True), - ) - - -class WithWorkflow: - workflow = None - - def __init_subclass__(cls) -> None: - logger.warn( - f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow." - ) - super().__init_subclass__() diff --git a/invokeai/app/invocations/collections.py b/invokeai/app/invocations/collections.py index 4c7b6f94cd..e02291980f 100644 --- a/invokeai/app/invocations/collections.py +++ b/invokeai/app/invocations/collections.py @@ -5,9 +5,11 @@ import numpy as np from pydantic import ValidationInfo, field_validator from invokeai.app.invocations.primitives import IntegerCollectionOutput +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import SEED_MAX -from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation +from .baseinvocation import BaseInvocation, invocation +from .fields import InputField @invocation( diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index 49c62cff56..ff13658052 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -1,40 +1,28 @@ -from dataclasses import dataclass -from typing import List, Optional, Union +from typing import Iterator, List, Optional, Tuple, Union, cast import torch from compel import Compel, ReturnedEmbeddingsType from compel.prompt_parser import Blend, Conjunction, CrossAttentionControlSubstitute, FlattenedPrompt, Fragment +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer -from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIComponent +from invokeai.app.invocations.primitives import ConditioningOutput +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.app.util.ti_utils import generate_ti_list +from invokeai.backend.lora import LoRAModelRaw +from invokeai.backend.model_patcher import ModelPatcher from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( BasicConditioningInfo, + ConditioningFieldData, ExtraConditioningInfo, SDXLConditioningInfo, ) +from invokeai.backend.util.devices import torch_dtype -from ...backend.model_management.lora import ModelPatcher -from ...backend.model_management.models import ModelNotFoundException, ModelType -from ...backend.util.devices import torch_dtype -from ..util.ti_utils import extract_ti_triggers_from_prompt -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - UIComponent, - invocation, - invocation_output, -) +from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output from .model import ClipField - -@dataclass -class ConditioningFieldData: - conditionings: List[BasicConditioningInfo] - # unconditioned: Optional[torch.Tensor] +# unconditioned: Optional[torch.Tensor] # class ConditioningAlgo(str, Enum): @@ -48,7 +36,7 @@ class ConditioningFieldData: title="Prompt", tags=["prompt", "compel"], category="conditioning", - version="1.0.0", + version="1.0.1", ) class CompelInvocation(BaseInvocation): """Parse prompt using compel package to conditioning.""" @@ -66,49 +54,27 @@ class CompelInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> ConditioningOutput: - tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.model_dump(), - context=context, - ) - text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.model_dump(), - context=context, - ) + tokenizer_info = context.models.load(**self.clip.tokenizer.model_dump()) + tokenizer_model = tokenizer_info.model + assert isinstance(tokenizer_model, CLIPTokenizer) + text_encoder_info = context.models.load(**self.clip.text_encoder.model_dump()) + text_encoder_model = text_encoder_info.model + assert isinstance(text_encoder_model, CLIPTextModel) - def _lora_loader(): + def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]: for lora in self.clip.loras: - lora_info = context.services.model_manager.get_model( - **lora.model_dump(exclude={"weight"}), context=context - ) - yield (lora_info.context.model, lora.weight) + lora_info = context.models.load(**lora.model_dump(exclude={"weight"})) + assert isinstance(lora_info.model, LoRAModelRaw) + yield (lora_info.model, lora.weight) del lora_info return - # loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras] + # loras = [(context.models.get(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras] - ti_list = [] - for trigger in extract_ti_triggers_from_prompt(self.prompt): - name = trigger[1:-1] - try: - ti_list.append( - ( - name, - context.services.model_manager.get_model( - model_name=name, - base_model=self.clip.text_encoder.base_model, - model_type=ModelType.TextualInversion, - context=context, - ).context.model, - ) - ) - except ModelNotFoundException: - # print(e) - # import traceback - # print(traceback.format_exc()) - print(f'Warn: trigger: "{trigger}" not found') + ti_list = generate_ti_list(self.prompt, text_encoder_info.config.base, context) with ( - ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( + ModelPatcher.apply_ti(tokenizer_model, text_encoder_model, ti_list) as ( tokenizer, ti_manager, ), @@ -116,8 +82,9 @@ class CompelInvocation(BaseInvocation): # Apply the LoRA after text_encoder has been moved to its target device for faster patching. ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()), # Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers. - ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers), + ModelPatcher.apply_clip_skip(text_encoder_model, self.clip.skipped_layers), ): + assert isinstance(text_encoder, CLIPTextModel) compel = Compel( tokenizer=tokenizer, text_encoder=text_encoder, @@ -128,7 +95,7 @@ class CompelInvocation(BaseInvocation): conjunction = Compel.parse_prompt_string(self.prompt) - if context.services.configuration.log_tokenization: + if context.config.get().log_tokenization: log_tokenization_for_conjunction(conjunction, tokenizer) c, options = compel.build_conditioning_tensor_for_conjunction(conjunction) @@ -149,17 +116,14 @@ class CompelInvocation(BaseInvocation): ] ) - conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning" - context.services.latents.save(conditioning_name, conditioning_data) + conditioning_name = context.conditioning.save(conditioning_data) - return ConditioningOutput( - conditioning=ConditioningField( - conditioning_name=conditioning_name, - ), - ) + return ConditioningOutput.build(conditioning_name) class SDXLPromptInvocationBase: + """Prompt processor for SDXL models.""" + def run_clip_compel( self, context: InvocationContext, @@ -168,26 +132,25 @@ class SDXLPromptInvocationBase: get_pooled: bool, lora_prefix: str, zero_on_empty: bool, - ): - tokenizer_info = context.services.model_manager.get_model( - **clip_field.tokenizer.model_dump(), - context=context, - ) - text_encoder_info = context.services.model_manager.get_model( - **clip_field.text_encoder.model_dump(), - context=context, - ) + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[ExtraConditioningInfo]]: + tokenizer_info = context.models.load(**clip_field.tokenizer.model_dump()) + tokenizer_model = tokenizer_info.model + assert isinstance(tokenizer_model, CLIPTokenizer) + text_encoder_info = context.models.load(**clip_field.text_encoder.model_dump()) + text_encoder_model = text_encoder_info.model + assert isinstance(text_encoder_model, (CLIPTextModel, CLIPTextModelWithProjection)) # return zero on empty if prompt == "" and zero_on_empty: - cpu_text_encoder = text_encoder_info.context.model + cpu_text_encoder = text_encoder_info.model + assert isinstance(cpu_text_encoder, torch.nn.Module) c = torch.zeros( ( 1, cpu_text_encoder.config.max_position_embeddings, cpu_text_encoder.config.hidden_size, ), - dtype=text_encoder_info.context.cache.precision, + dtype=cpu_text_encoder.dtype, ) if get_pooled: c_pooled = torch.zeros( @@ -198,40 +161,21 @@ class SDXLPromptInvocationBase: c_pooled = None return c, c_pooled, None - def _lora_loader(): + def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]: for lora in clip_field.loras: - lora_info = context.services.model_manager.get_model( - **lora.model_dump(exclude={"weight"}), context=context - ) - yield (lora_info.context.model, lora.weight) + lora_info = context.models.load(**lora.model_dump(exclude={"weight"})) + lora_model = lora_info.model + assert isinstance(lora_model, LoRAModelRaw) + yield (lora_model, lora.weight) del lora_info return - # loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras] + # loras = [(context.models.get(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras] - ti_list = [] - for trigger in extract_ti_triggers_from_prompt(prompt): - name = trigger[1:-1] - try: - ti_list.append( - ( - name, - context.services.model_manager.get_model( - model_name=name, - base_model=clip_field.text_encoder.base_model, - model_type=ModelType.TextualInversion, - context=context, - ).context.model, - ) - ) - except ModelNotFoundException: - # print(e) - # import traceback - # print(traceback.format_exc()) - print(f'Warn: trigger: "{trigger}" not found') + ti_list = generate_ti_list(prompt, text_encoder_info.config.base, context) with ( - ModelPatcher.apply_ti(tokenizer_info.context.model, text_encoder_info.context.model, ti_list) as ( + ModelPatcher.apply_ti(tokenizer_model, text_encoder_model, ti_list) as ( tokenizer, ti_manager, ), @@ -239,8 +183,10 @@ class SDXLPromptInvocationBase: # Apply the LoRA after text_encoder has been moved to its target device for faster patching. ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix), # Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers. - ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers), + ModelPatcher.apply_clip_skip(text_encoder_model, clip_field.skipped_layers), ): + assert isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)) + text_encoder = cast(CLIPTextModel, text_encoder) compel = Compel( tokenizer=tokenizer, text_encoder=text_encoder, @@ -253,7 +199,7 @@ class SDXLPromptInvocationBase: conjunction = Compel.parse_prompt_string(prompt) - if context.services.configuration.log_tokenization: + if context.config.get().log_tokenization: # TODO: better logging for and syntax log_tokenization_for_conjunction(conjunction, tokenizer) @@ -286,7 +232,7 @@ class SDXLPromptInvocationBase: title="SDXL Prompt", tags=["sdxl", "compel", "prompt"], category="conditioning", - version="1.0.0", + version="1.0.1", ) class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): """Parse prompt using compel package to conditioning.""" @@ -357,6 +303,7 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): dim=1, ) + assert c2_pooled is not None conditioning_data = ConditioningFieldData( conditionings=[ SDXLConditioningInfo( @@ -368,14 +315,9 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): ] ) - conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning" - context.services.latents.save(conditioning_name, conditioning_data) + conditioning_name = context.conditioning.save(conditioning_data) - return ConditioningOutput( - conditioning=ConditioningField( - conditioning_name=conditioning_name, - ), - ) + return ConditioningOutput.build(conditioning_name) @invocation( @@ -383,7 +325,7 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): title="SDXL Refiner Prompt", tags=["sdxl", "compel", "prompt"], category="conditioning", - version="1.0.0", + version="1.0.1", ) class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase): """Parse prompt using compel package to conditioning.""" @@ -410,6 +352,7 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase add_time_ids = torch.tensor([original_size + crop_coords + (self.aesthetic_score,)]) + assert c2_pooled is not None conditioning_data = ConditioningFieldData( conditionings=[ SDXLConditioningInfo( @@ -421,14 +364,9 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase ] ) - conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning" - context.services.latents.save(conditioning_name, conditioning_data) + conditioning_name = context.conditioning.save(conditioning_data) - return ConditioningOutput( - conditioning=ConditioningField( - conditioning_name=conditioning_name, - ), - ) + return ConditioningOutput.build(conditioning_name) @invocation_output("clip_skip_output") @@ -449,7 +387,7 @@ class ClipSkipInvocation(BaseInvocation): """Skip layers in clip text_encoder model.""" clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP") - skipped_layers: int = InputField(default=0, description=FieldDescriptions.skipped_layers) + skipped_layers: int = InputField(default=0, ge=0, description=FieldDescriptions.skipped_layers) def invoke(self, context: InvocationContext) -> ClipSkipInvocationOutput: self.clip.skipped_layers += self.skipped_layers @@ -459,9 +397,9 @@ class ClipSkipInvocation(BaseInvocation): def get_max_token_count( - tokenizer, + tokenizer: CLIPTokenizer, prompt: Union[FlattenedPrompt, Blend, Conjunction], - truncate_if_too_long=False, + truncate_if_too_long: bool = False, ) -> int: if type(prompt) is Blend: blend: Blend = prompt @@ -473,7 +411,9 @@ def get_max_token_count( return len(get_tokens_for_prompt_object(tokenizer, prompt, truncate_if_too_long)) -def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long=True) -> List[str]: +def get_tokens_for_prompt_object( + tokenizer: CLIPTokenizer, parsed_prompt: FlattenedPrompt, truncate_if_too_long: bool = True +) -> List[str]: if type(parsed_prompt) is Blend: raise ValueError("Blend is not supported here - you need to get tokens for each of its .children") @@ -486,24 +426,29 @@ def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, trun for x in parsed_prompt.children ] text = " ".join(text_fragments) - tokens = tokenizer.tokenize(text) + tokens: List[str] = tokenizer.tokenize(text) if truncate_if_too_long: max_tokens_length = tokenizer.model_max_length - 2 # typically 75 tokens = tokens[0:max_tokens_length] return tokens -def log_tokenization_for_conjunction(c: Conjunction, tokenizer, display_label_prefix=None): +def log_tokenization_for_conjunction( + c: Conjunction, tokenizer: CLIPTokenizer, display_label_prefix: Optional[str] = None +) -> None: display_label_prefix = display_label_prefix or "" for i, p in enumerate(c.prompts): if len(c.prompts) > 1: this_display_label_prefix = f"{display_label_prefix}(conjunction part {i + 1}, weight={c.weights[i]})" else: + assert display_label_prefix is not None this_display_label_prefix = display_label_prefix log_tokenization_for_prompt_object(p, tokenizer, display_label_prefix=this_display_label_prefix) -def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None): +def log_tokenization_for_prompt_object( + p: Union[Blend, FlattenedPrompt], tokenizer: CLIPTokenizer, display_label_prefix: Optional[str] = None +) -> None: display_label_prefix = display_label_prefix or "" if type(p) is Blend: blend: Blend = p @@ -543,7 +488,12 @@ def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokeniz log_tokenization_for_text(text, tokenizer, display_label=display_label_prefix) -def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_too_long=False): +def log_tokenization_for_text( + text: str, + tokenizer: CLIPTokenizer, + display_label: Optional[str] = None, + truncate_if_too_long: Optional[bool] = False, +) -> None: """shows how the prompt is tokenized # usually tokens have '' to indicate end-of-word, # but for readability it has been replaced with ' ' diff --git a/invokeai/app/invocations/conditioning.py b/invokeai/app/invocations/conditioning.py index 3102951408..b15fdcd198 100644 --- a/invokeai/app/invocations/conditioning.py +++ b/invokeai/app/invocations/conditioning.py @@ -2,11 +2,10 @@ import torch from invokeai.app.invocations.baseinvocation import ( BaseInvocation, - InputField, InvocationContext, - WithMetadata, invocation, ) +from invokeai.app.invocations.fields import InputField, WithMetadata from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, MaskField, MaskOutput @@ -51,9 +50,7 @@ class RectangleMaskInvocation(BaseInvocation, WithMetadata): :, self.y_top : self.y_top + self.rectangle_height, self.x_left : self.x_left + self.rectangle_width ] = True - mask_name = f"{context.graph_execution_state_id}__{self.id}_mask" - context.services.latents.save(mask_name, mask) - + mask_name = context.tensors.save(mask) return MaskOutput( mask=MaskField(mask_name=mask_name), width=self.width, diff --git a/invokeai/app/invocations/constants.py b/invokeai/app/invocations/constants.py new file mode 100644 index 0000000000..cebe0eb30f --- /dev/null +++ b/invokeai/app/invocations/constants.py @@ -0,0 +1,17 @@ +from typing import Literal + +from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP + +LATENT_SCALE_FACTOR = 8 +""" +HACK: Many nodes are currently hard-coded to use a fixed latent scale factor of 8. This is fragile, and will need to +be addressed if future models use a different latent scale factor. Also, note that there may be places where the scale +factor is hard-coded to a literal '8' rather than using this constant. +The ratio of image:latent dimensions is LATENT_SCALE_FACTOR:1, or 8:1. +""" + +SCHEDULER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())] +"""A literal type representing the valid scheduler names.""" + +IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] +"""A literal type for PIL image modes supported by Invoke""" diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 1f9342985a..9eba3acdca 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -23,27 +23,24 @@ from controlnet_aux import ( ) from controlnet_aux.util import HWC3, ade_palette from PIL import Image -from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator +from pydantic import BaseModel, Field, field_validator, model_validator -from invokeai.app.invocations.primitives import ImageField, ImageOutput +from invokeai.app.invocations.fields import ( + FieldDescriptions, + ImageField, + Input, + InputField, + OutputField, + WithBoard, + WithMetadata, +) +from invokeai.app.invocations.primitives import ImageOutput from invokeai.app.invocations.util import validate_begin_end_step, validate_weights -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.image_util.depth_anything import DepthAnythingDetector from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector -from ...backend.model_management import BaseModelType -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - WithMetadata, - invocation, - invocation_output, -) +from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"] CONTROLNET_RESIZE_VALUES = Literal[ @@ -57,10 +54,7 @@ CONTROLNET_RESIZE_VALUES = Literal[ class ControlNetModelField(BaseModel): """ControlNet model field""" - model_name: str = Field(description="Name of the ControlNet model") - base_model: BaseModelType = Field(description="Base model") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Model config record key for the ControlNet model") class ControlField(BaseModel): @@ -140,7 +134,7 @@ class ControlNetInvocation(BaseInvocation): # This invocation exists for other invocations to subclass it - do not register with @invocation! -class ImageProcessorInvocation(BaseInvocation, WithMetadata): +class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard): """Base class for invocations that preprocess images for ControlNet""" image: ImageField = InputField(description="The image to process") @@ -149,23 +143,18 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata): # superclass just passes through image without processing return image + def load_image(self, context: InvocationContext) -> Image.Image: + # allows override for any special formatting specific to the preprocessor + return context.images.get_pil(self.image.image_name, "RGB") + def invoke(self, context: InvocationContext) -> ImageOutput: - raw_image = context.services.images.get_pil_image(self.image.image_name) + raw_image = self.load_image(context) # image type should be PIL.PngImagePlugin.PngImageFile ? processed_image = self.run_processor(raw_image) # currently can't see processed image in node UI without a showImage node, # so for now setting image_type to RESULT instead of INTERMEDIATE so will get saved in gallery - image_dto = context.services.images.create( - image=processed_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.CONTROL, - session_id=context.graph_execution_state_id, - node_id=self.id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=processed_image) """Builds an ImageOutput and its ImageField""" processed_image_field = ImageField(image_name=image_dto.image_name) @@ -184,7 +173,7 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata): title="Canny Processor", tags=["controlnet", "canny"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class CannyImageProcessorInvocation(ImageProcessorInvocation): """Canny edge detection for ControlNet""" @@ -196,6 +185,10 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation): default=200, ge=0, le=255, description="The high threshold of the Canny pixel gradient (0-255)" ) + def load_image(self, context: InvocationContext) -> Image.Image: + # Keep alpha channel for Canny processing to detect edges of transparent areas + return context.images.get_pil(self.image.image_name, "RGBA") + def run_processor(self, image): canny_processor = CannyDetector() processed_image = canny_processor(image, self.low_threshold, self.high_threshold) @@ -207,7 +200,7 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation): title="HED (softedge) Processor", tags=["controlnet", "hed", "softedge"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class HedImageProcessorInvocation(ImageProcessorInvocation): """Applies HED edge detection to image""" @@ -236,7 +229,7 @@ class HedImageProcessorInvocation(ImageProcessorInvocation): title="Lineart Processor", tags=["controlnet", "lineart"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class LineartImageProcessorInvocation(ImageProcessorInvocation): """Applies line art processing to image""" @@ -258,7 +251,7 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation): title="Lineart Anime Processor", tags=["controlnet", "lineart", "anime"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation): """Applies line art anime processing to image""" @@ -281,7 +274,7 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation): title="Midas Depth Processor", tags=["controlnet", "midas"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class MidasDepthImageProcessorInvocation(ImageProcessorInvocation): """Applies Midas depth processing to image""" @@ -308,7 +301,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation): title="Normal BAE Processor", tags=["controlnet"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class NormalbaeImageProcessorInvocation(ImageProcessorInvocation): """Applies NormalBae processing to image""" @@ -325,7 +318,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation): @invocation( - "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.0" + "mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.1" ) class MlsdImageProcessorInvocation(ImageProcessorInvocation): """Applies MLSD processing to image""" @@ -348,7 +341,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation): @invocation( - "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.0" + "pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.1" ) class PidiImageProcessorInvocation(ImageProcessorInvocation): """Applies PIDI processing to image""" @@ -375,7 +368,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation): title="Content Shuffle Processor", tags=["controlnet", "contentshuffle"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): """Applies content shuffle processing to image""" @@ -405,7 +398,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation): title="Zoe (Depth) Processor", tags=["controlnet", "zoe", "depth"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation): """Applies Zoe depth processing to image""" @@ -421,7 +414,7 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation): title="Mediapipe Face Processor", tags=["controlnet", "mediapipe", "face"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class MediapipeFaceProcessorInvocation(ImageProcessorInvocation): """Applies mediapipe face processing to image""" @@ -430,10 +423,6 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation): min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection") def run_processor(self, image): - # MediaPipeFaceDetector throws an error if image has alpha channel - # so convert to RGB if needed - if image.mode == "RGBA": - image = image.convert("RGB") mediapipe_face_processor = MediapipeFaceDetector() processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence) return processed_image @@ -444,7 +433,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation): title="Leres (Depth) Processor", tags=["controlnet", "leres", "depth"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class LeresImageProcessorInvocation(ImageProcessorInvocation): """Applies leres processing to image""" @@ -473,7 +462,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation): title="Tile Resample Processor", tags=["controlnet", "tile"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class TileResamplerProcessorInvocation(ImageProcessorInvocation): """Tile resampler processor""" @@ -513,7 +502,7 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation): title="Segment Anything Processor", tags=["controlnet", "segmentanything"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class SegmentAnythingProcessorInvocation(ImageProcessorInvocation): """Applies segment anything processing to image""" @@ -555,7 +544,7 @@ class SamDetectorReproducibleColors(SamDetector): title="Color Map Processor", tags=["controlnet"], category="controlnet", - version="1.2.0", + version="1.2.1", ) class ColorMapImageProcessorInvocation(ImageProcessorInvocation): """Generates a color map from the provided image""" @@ -563,7 +552,6 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation): color_map_tile_size: int = InputField(default=64, ge=0, description=FieldDescriptions.tile_size) def run_processor(self, image: Image.Image): - image = image.convert("RGB") np_image = np.array(image, dtype=np.uint8) height, width = np_image.shape[:2] @@ -603,9 +591,6 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): depth_anything_detector = DepthAnythingDetector() depth_anything_detector.load_model(model_size=self.model_size) - if image.mode == "RGBA": - image = image.convert("RGB") - processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload) return processed_image @@ -625,7 +610,7 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation): draw_hands: bool = InputField(default=False) image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res) - def run_processor(self, image): + def run_processor(self, image: Image.Image): dw_openpose = DWOpenposeDetector() processed_image = dw_openpose( image, diff --git a/invokeai/app/invocations/cv.py b/invokeai/app/invocations/cv.py index cb6828d21a..8174f19b64 100644 --- a/invokeai/app/invocations/cv.py +++ b/invokeai/app/invocations/cv.py @@ -5,22 +5,24 @@ import cv2 as cv import numpy from PIL import Image, ImageOps -from invokeai.app.invocations.primitives import ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin +from invokeai.app.invocations.fields import ImageField +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, invocation +from .baseinvocation import BaseInvocation, invocation +from .fields import InputField, WithBoard, WithMetadata -@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.2.0") -class CvInpaintInvocation(BaseInvocation, WithMetadata): +@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.2.1") +class CvInpaintInvocation(BaseInvocation, WithMetadata, WithBoard): """Simple inpaint using opencv.""" image: ImageField = InputField(description="The image to inpaint") mask: ImageField = InputField(description="The mask to use when inpainting") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) - mask = context.services.images.get_pil_image(self.mask.image_name) + image = context.images.get_pil(self.image.image_name) + mask = context.images.get_pil(self.mask.image_name) # Convert to cv image/mask # TODO: consider making these utility functions @@ -34,18 +36,6 @@ class CvInpaintInvocation(BaseInvocation, WithMetadata): # TODO: consider making a utility function image_inpainted = Image.fromarray(cv.cvtColor(cv_inpainted, cv.COLOR_BGR2RGB)) - image_dto = context.services.images.create( - image=image_inpainted, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image_inpainted) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/facetools.py b/invokeai/app/invocations/facetools.py index e0c89b4de5..fed2ed5e4f 100644 --- a/invokeai/app/invocations/facetools.py +++ b/invokeai/app/invocations/facetools.py @@ -13,15 +13,13 @@ from pydantic import field_validator import invokeai.assets.fonts as font_assets from invokeai.app.invocations.baseinvocation import ( BaseInvocation, - InputField, - InvocationContext, - OutputField, - WithMetadata, invocation, invocation_output, ) -from invokeai.app.invocations.primitives import ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin +from invokeai.app.invocations.fields import ImageField, InputField, OutputField, WithBoard, WithMetadata +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.image_records.image_records_common import ImageCategory +from invokeai.app.services.shared.invocation_context import InvocationContext @invocation_output("face_mask_output") @@ -306,37 +304,37 @@ def extract_face( # Adjust the crop boundaries to stay within the original image's dimensions if x_min < 0: - context.services.logger.warning("FaceTools --> -X-axis padding reached image edge.") + context.logger.warning("FaceTools --> -X-axis padding reached image edge.") x_max -= x_min x_min = 0 elif x_max > mask.width: - context.services.logger.warning("FaceTools --> +X-axis padding reached image edge.") + context.logger.warning("FaceTools --> +X-axis padding reached image edge.") x_min -= x_max - mask.width x_max = mask.width if y_min < 0: - context.services.logger.warning("FaceTools --> +Y-axis padding reached image edge.") + context.logger.warning("FaceTools --> +Y-axis padding reached image edge.") y_max -= y_min y_min = 0 elif y_max > mask.height: - context.services.logger.warning("FaceTools --> -Y-axis padding reached image edge.") + context.logger.warning("FaceTools --> -Y-axis padding reached image edge.") y_min -= y_max - mask.height y_max = mask.height # Ensure the crop is square and adjust the boundaries if needed if x_max - x_min != crop_size: - context.services.logger.warning("FaceTools --> Limiting x-axis padding to constrain bounding box to a square.") + context.logger.warning("FaceTools --> Limiting x-axis padding to constrain bounding box to a square.") diff = crop_size - (x_max - x_min) x_min -= diff // 2 x_max += diff - diff // 2 if y_max - y_min != crop_size: - context.services.logger.warning("FaceTools --> Limiting y-axis padding to constrain bounding box to a square.") + context.logger.warning("FaceTools --> Limiting y-axis padding to constrain bounding box to a square.") diff = crop_size - (y_max - y_min) y_min -= diff // 2 y_max += diff - diff // 2 - context.services.logger.info(f"FaceTools --> Calculated bounding box (8 multiple): {crop_size}") + context.logger.info(f"FaceTools --> Calculated bounding box (8 multiple): {crop_size}") # Crop the output image to the specified size with the center of the face mesh as the center. mask = mask.crop((x_min, y_min, x_max, y_max)) @@ -368,7 +366,7 @@ def get_faces_list( # Generate the face box mask and get the center of the face. if not should_chunk: - context.services.logger.info("FaceTools --> Attempting full image face detection.") + context.logger.info("FaceTools --> Attempting full image face detection.") result = generate_face_box_mask( context=context, minimum_confidence=minimum_confidence, @@ -380,7 +378,7 @@ def get_faces_list( draw_mesh=draw_mesh, ) if should_chunk or len(result) == 0: - context.services.logger.info("FaceTools --> Chunking image (chunk toggled on, or no face found in full image).") + context.logger.info("FaceTools --> Chunking image (chunk toggled on, or no face found in full image).") width, height = image.size image_chunks = [] x_offsets = [] @@ -399,7 +397,7 @@ def get_faces_list( x_offsets.append(x) y_offsets.append(0) fx += increment - context.services.logger.info(f"FaceTools --> Chunk starting at x = {x}") + context.logger.info(f"FaceTools --> Chunk starting at x = {x}") elif height > width: # Portrait - slice the image vertically fy = 0.0 @@ -411,10 +409,10 @@ def get_faces_list( x_offsets.append(0) y_offsets.append(y) fy += increment - context.services.logger.info(f"FaceTools --> Chunk starting at y = {y}") + context.logger.info(f"FaceTools --> Chunk starting at y = {y}") for idx in range(len(image_chunks)): - context.services.logger.info(f"FaceTools --> Evaluating faces in chunk {idx}") + context.logger.info(f"FaceTools --> Evaluating faces in chunk {idx}") result = result + generate_face_box_mask( context=context, minimum_confidence=minimum_confidence, @@ -428,7 +426,7 @@ def get_faces_list( if len(result) == 0: # Give up - context.services.logger.warning( + context.logger.warning( "FaceTools --> No face detected in chunked input image. Passing through original image." ) @@ -437,7 +435,7 @@ def get_faces_list( return all_faces -@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.2.0") +@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.2.1") class FaceOffInvocation(BaseInvocation, WithMetadata): """Bound, extract, and mask a face from an image using MediaPipe detection""" @@ -470,11 +468,11 @@ class FaceOffInvocation(BaseInvocation, WithMetadata): ) if len(all_faces) == 0: - context.services.logger.warning("FaceOff --> No faces detected. Passing through original image.") + context.logger.warning("FaceOff --> No faces detected. Passing through original image.") return None if self.face_id > len(all_faces) - 1: - context.services.logger.warning( + context.logger.warning( f"FaceOff --> Face ID {self.face_id} is outside of the number of faces detected ({len(all_faces)}). Passing through original image." ) return None @@ -486,7 +484,7 @@ class FaceOffInvocation(BaseInvocation, WithMetadata): return face_data def invoke(self, context: InvocationContext) -> FaceOffOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) result = self.faceoff(context=context, image=image) if result is None: @@ -500,24 +498,9 @@ class FaceOffInvocation(BaseInvocation, WithMetadata): x = result["x_min"] y = result["y_min"] - image_dto = context.services.images.create( - image=result_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - workflow=context.workflow, - ) + image_dto = context.images.save(image=result_image) - mask_dto = context.services.images.create( - image=result_mask, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.MASK, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - ) + mask_dto = context.images.save(image=result_mask, image_category=ImageCategory.MASK) output = FaceOffOutput( image=ImageField(image_name=image_dto.image_name), @@ -531,7 +514,7 @@ class FaceOffInvocation(BaseInvocation, WithMetadata): return output -@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.2.0") +@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.2.1") class FaceMaskInvocation(BaseInvocation, WithMetadata): """Face mask creation using mediapipe face detection""" @@ -580,7 +563,7 @@ class FaceMaskInvocation(BaseInvocation, WithMetadata): if len(intersected_face_ids) == 0: id_range_str = ",".join([str(id) for id in id_range]) - context.services.logger.warning( + context.logger.warning( f"Face IDs must be in range of detected faces - requested {self.face_ids}, detected {id_range_str}. Passing through original image." ) return FaceMaskResult( @@ -616,27 +599,12 @@ class FaceMaskInvocation(BaseInvocation, WithMetadata): ) def invoke(self, context: InvocationContext) -> FaceMaskOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) result = self.facemask(context=context, image=image) - image_dto = context.services.images.create( - image=result["image"], - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - workflow=context.workflow, - ) + image_dto = context.images.save(image=result["image"]) - mask_dto = context.services.images.create( - image=result["mask"], - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.MASK, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - ) + mask_dto = context.images.save(image=result["mask"], image_category=ImageCategory.MASK) output = FaceMaskOutput( image=ImageField(image_name=image_dto.image_name), @@ -649,9 +617,9 @@ class FaceMaskInvocation(BaseInvocation, WithMetadata): @invocation( - "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.2.0" + "face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.2.1" ) -class FaceIdentifierInvocation(BaseInvocation, WithMetadata): +class FaceIdentifierInvocation(BaseInvocation, WithMetadata, WithBoard): """Outputs an image with detected face IDs printed on each face. For use with other FaceTools.""" image: ImageField = InputField(description="Image to face detect") @@ -705,21 +673,9 @@ class FaceIdentifierInvocation(BaseInvocation, WithMetadata): return image def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) result_image = self.faceidentifier(context=context, image=image) - image_dto = context.services.images.create( - image=result_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - workflow=context.workflow, - ) + image_dto = context.images.save(image=result_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/fields.py b/invokeai/app/invocations/fields.py new file mode 100644 index 0000000000..63572b5151 --- /dev/null +++ b/invokeai/app/invocations/fields.py @@ -0,0 +1,576 @@ +from enum import Enum +from typing import Any, Callable, Optional, Tuple + +from pydantic import BaseModel, ConfigDict, Field, RootModel, TypeAdapter +from pydantic.fields import _Unset +from pydantic_core import PydanticUndefined + +from invokeai.app.util.metaenum import MetaEnum +from invokeai.backend.util.logging import InvokeAILogger + +logger = InvokeAILogger.get_logger() + + +class UIType(str, Enum, metaclass=MetaEnum): + """ + Type hints for the UI for situations in which the field type is not enough to infer the correct UI type. + + - Model Fields + The most common node-author-facing use will be for model fields. Internally, there is no difference + between SD-1, SD-2 and SDXL model fields - they all use the class `MainModelField`. To ensure the + base-model-specific UI is rendered, use e.g. `ui_type=UIType.SDXLMainModelField` to indicate that + the field is an SDXL main model field. + + - Any Field + We cannot infer the usage of `typing.Any` via schema parsing, so you *must* use `ui_type=UIType.Any` to + indicate that the field accepts any type. Use with caution. This cannot be used on outputs. + + - Scheduler Field + Special handling in the UI is needed for this field, which otherwise would be parsed as a plain enum field. + + - Internal Fields + Similar to the Any Field, the `collect` and `iterate` nodes make use of `typing.Any`. To facilitate + handling these types in the client, we use `UIType._Collection` and `UIType._CollectionItem`. These + should not be used by node authors. + + - DEPRECATED Fields + These types are deprecated and should not be used by node authors. A warning will be logged if one is + used, and the type will be ignored. They are included here for backwards compatibility. + """ + + # region Model Field Types + SDXLMainModel = "SDXLMainModelField" + SDXLRefinerModel = "SDXLRefinerModelField" + ONNXModel = "ONNXModelField" + VaeModel = "VAEModelField" + LoRAModel = "LoRAModelField" + ControlNetModel = "ControlNetModelField" + IPAdapterModel = "IPAdapterModelField" + # endregion + + # region Misc Field Types + Scheduler = "SchedulerField" + Any = "AnyField" + # endregion + + # region Internal Field Types + _Collection = "CollectionField" + _CollectionItem = "CollectionItemField" + # endregion + + # region DEPRECATED + Boolean = "DEPRECATED_Boolean" + Color = "DEPRECATED_Color" + Conditioning = "DEPRECATED_Conditioning" + Control = "DEPRECATED_Control" + Float = "DEPRECATED_Float" + Image = "DEPRECATED_Image" + Integer = "DEPRECATED_Integer" + Latents = "DEPRECATED_Latents" + String = "DEPRECATED_String" + BooleanCollection = "DEPRECATED_BooleanCollection" + ColorCollection = "DEPRECATED_ColorCollection" + ConditioningCollection = "DEPRECATED_ConditioningCollection" + ControlCollection = "DEPRECATED_ControlCollection" + FloatCollection = "DEPRECATED_FloatCollection" + ImageCollection = "DEPRECATED_ImageCollection" + IntegerCollection = "DEPRECATED_IntegerCollection" + LatentsCollection = "DEPRECATED_LatentsCollection" + StringCollection = "DEPRECATED_StringCollection" + BooleanPolymorphic = "DEPRECATED_BooleanPolymorphic" + ColorPolymorphic = "DEPRECATED_ColorPolymorphic" + ConditioningPolymorphic = "DEPRECATED_ConditioningPolymorphic" + ControlPolymorphic = "DEPRECATED_ControlPolymorphic" + FloatPolymorphic = "DEPRECATED_FloatPolymorphic" + ImagePolymorphic = "DEPRECATED_ImagePolymorphic" + IntegerPolymorphic = "DEPRECATED_IntegerPolymorphic" + LatentsPolymorphic = "DEPRECATED_LatentsPolymorphic" + StringPolymorphic = "DEPRECATED_StringPolymorphic" + MainModel = "DEPRECATED_MainModel" + UNet = "DEPRECATED_UNet" + Vae = "DEPRECATED_Vae" + CLIP = "DEPRECATED_CLIP" + Collection = "DEPRECATED_Collection" + CollectionItem = "DEPRECATED_CollectionItem" + Enum = "DEPRECATED_Enum" + WorkflowField = "DEPRECATED_WorkflowField" + IsIntermediate = "DEPRECATED_IsIntermediate" + BoardField = "DEPRECATED_BoardField" + MetadataItem = "DEPRECATED_MetadataItem" + MetadataItemCollection = "DEPRECATED_MetadataItemCollection" + MetadataItemPolymorphic = "DEPRECATED_MetadataItemPolymorphic" + MetadataDict = "DEPRECATED_MetadataDict" + + +class UIComponent(str, Enum, metaclass=MetaEnum): + """ + The type of UI component to use for a field, used to override the default components, which are + inferred from the field type. + """ + + None_ = "none" + Textarea = "textarea" + Slider = "slider" + + +class FieldDescriptions: + denoising_start = "When to start denoising, expressed a percentage of total steps" + denoising_end = "When to stop denoising, expressed a percentage of total steps" + cfg_scale = "Classifier-Free Guidance scale" + cfg_rescale_multiplier = "Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR" + scheduler = "Scheduler to use during inference" + positive_cond = "Positive conditioning tensor" + negative_cond = "Negative conditioning tensor" + noise = "Noise tensor" + clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count" + unet = "UNet (scheduler, LoRAs)" + vae = "VAE" + cond = "Conditioning tensor" + controlnet_model = "ControlNet model to load" + vae_model = "VAE model to load" + lora_model = "LoRA model to load" + main_model = "Main model (UNet, VAE, CLIP) to load" + sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load" + sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load" + onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load" + lora_weight = "The weight at which the LoRA is applied to each model" + compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor" + raw_prompt = "Raw prompt text (no parsing)" + sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor" + skipped_layers = "Number of layers to skip in text encoder" + seed = "Seed for random number generation" + steps = "Number of steps to run" + width = "Width of output (px)" + height = "Height of output (px)" + control = "ControlNet(s) to apply" + ip_adapter = "IP-Adapter to apply" + t2i_adapter = "T2I-Adapter(s) to apply" + denoised_latents = "Denoised latents tensor" + latents = "Latents tensor" + strength = "Strength of denoising (proportional to steps)" + metadata = "Optional metadata to be saved with the image" + metadata_collection = "Collection of Metadata" + metadata_item_polymorphic = "A single metadata item or collection of metadata items" + metadata_item_label = "Label for this metadata item" + metadata_item_value = "The value for this metadata item (may be any type)" + workflow = "Optional workflow to be saved with the image" + interp_mode = "Interpolation mode" + torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)" + fp32 = "Whether or not to use full float32 precision" + precision = "Precision to use" + tiled = "Processing using overlapping tiles (reduce memory consumption)" + detect_res = "Pixel resolution for detection" + image_res = "Pixel resolution for output image" + safe_mode = "Whether or not to use safe mode" + scribble_mode = "Whether or not to use scribble mode" + scale_factor = "The factor by which to scale" + blend_alpha = ( + "Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B." + ) + num_1 = "The first number" + num_2 = "The second number" + mask = "The mask to use for the operation" + board = "The board to save the image to" + image = "The image to process" + tile_size = "Tile size" + inclusive_low = "The inclusive low value" + exclusive_high = "The exclusive high value" + decimal_places = "The number of decimal places to round to" + freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' + freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' + freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features." + freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features." + + +class ImageField(BaseModel): + """An image primitive field""" + + image_name: str = Field(description="The name of the image") + + +class BoardField(BaseModel): + """A board primitive field""" + + board_id: str = Field(description="The id of the board") + + +class MaskField(BaseModel): + """A mask primitive field.""" + + mask_name: str = Field(description="The name of the mask.") + + +class DenoiseMaskField(BaseModel): + """An inpaint mask field""" + + mask_name: str = Field(description="The name of the mask image") + masked_latents_name: Optional[str] = Field(default=None, description="The name of the masked image latents") + gradient: bool = Field(default=False, description="Used for gradient inpainting") + + +class LatentsField(BaseModel): + """A latents tensor primitive field""" + + latents_name: str = Field(description="The name of the latents") + seed: Optional[int] = Field(default=None, description="Seed used to generate this latents") + + +class ColorField(BaseModel): + """A color primitive field""" + + r: int = Field(ge=0, le=255, description="The red component") + g: int = Field(ge=0, le=255, description="The green component") + b: int = Field(ge=0, le=255, description="The blue component") + a: int = Field(ge=0, le=255, description="The alpha component") + + def tuple(self) -> Tuple[int, int, int, int]: + return (self.r, self.g, self.b, self.a) + + +class ConditioningField(BaseModel): + """A conditioning tensor primitive value""" + + conditioning_name: str = Field(description="The name of conditioning tensor") + mask: Optional[MaskField] = Field( + default=None, + description="The bool mask associated with this conditioning tensor. Excluded regions should be set to False, " + "included regions should be set to True.", + ) + + +class MetadataField(RootModel): + """ + Pydantic model for metadata with custom root of type dict[str, Any]. + Metadata is stored without a strict schema. + """ + + root: dict[str, Any] = Field(description="The metadata") + + +MetadataFieldValidator = TypeAdapter(MetadataField) + + +class Input(str, Enum, metaclass=MetaEnum): + """ + The type of input a field accepts. + - `Input.Direct`: The field must have its value provided directly, when the invocation and field \ + are instantiated. + - `Input.Connection`: The field must have its value provided by a connection. + - `Input.Any`: The field may have its value provided either directly or by a connection. + """ + + Connection = "connection" + Direct = "direct" + Any = "any" + + +class FieldKind(str, Enum, metaclass=MetaEnum): + """ + The kind of field. + - `Input`: An input field on a node. + - `Output`: An output field on a node. + - `Internal`: A field which is treated as an input, but cannot be used in node definitions. Metadata is + one example. It is provided to nodes via the WithMetadata class, and we want to reserve the field name + "metadata" for this on all nodes. `FieldKind` is used to short-circuit the field name validation logic, + allowing "metadata" for that field. + - `NodeAttribute`: The field is a node attribute. These are fields which are not inputs or outputs, + but which are used to store information about the node. For example, the `id` and `type` fields are node + attributes. + + The presence of this in `json_schema_extra["field_kind"]` is used when initializing node schemas on app + startup, and when generating the OpenAPI schema for the workflow editor. + """ + + Input = "input" + Output = "output" + Internal = "internal" + NodeAttribute = "node_attribute" + + +class InputFieldJSONSchemaExtra(BaseModel): + """ + Extra attributes to be added to input fields and their OpenAPI schema. Used during graph execution, + and by the workflow editor during schema parsing and UI rendering. + """ + + input: Input + orig_required: bool + field_kind: FieldKind + default: Optional[Any] = None + orig_default: Optional[Any] = None + ui_hidden: bool = False + ui_type: Optional[UIType] = None + ui_component: Optional[UIComponent] = None + ui_order: Optional[int] = None + ui_choice_labels: Optional[dict[str, str]] = None + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + + +class WithMetadata(BaseModel): + """ + Inherit from this class if your node needs a metadata input field. + """ + + metadata: Optional[MetadataField] = Field( + default=None, + description=FieldDescriptions.metadata, + json_schema_extra=InputFieldJSONSchemaExtra( + field_kind=FieldKind.Internal, + input=Input.Connection, + orig_required=False, + ).model_dump(exclude_none=True), + ) + + +class WithWorkflow: + workflow = None + + def __init_subclass__(cls) -> None: + logger.warn( + f"{cls.__module__.split('.')[0]}.{cls.__name__}: WithWorkflow is deprecated. Use `context.workflow` to access the workflow." + ) + super().__init_subclass__() + + +class WithBoard(BaseModel): + """ + Inherit from this class if your node needs a board input field. + """ + + board: Optional[BoardField] = Field( + default=None, + description=FieldDescriptions.board, + json_schema_extra=InputFieldJSONSchemaExtra( + field_kind=FieldKind.Internal, + input=Input.Direct, + orig_required=False, + ).model_dump(exclude_none=True), + ) + + +class OutputFieldJSONSchemaExtra(BaseModel): + """ + Extra attributes to be added to input fields and their OpenAPI schema. Used by the workflow editor + during schema parsing and UI rendering. + """ + + field_kind: FieldKind + ui_hidden: bool + ui_type: Optional[UIType] + ui_order: Optional[int] + + model_config = ConfigDict( + validate_assignment=True, + json_schema_serialization_defaults_required=True, + ) + + +def InputField( + # copied from pydantic's Field + # TODO: Can we support default_factory? + default: Any = _Unset, + default_factory: Callable[[], Any] | None = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom + input: Input = Input.Any, + ui_type: Optional[UIType] = None, + ui_component: Optional[UIComponent] = None, + ui_hidden: bool = False, + ui_order: Optional[int] = None, + ui_choice_labels: Optional[dict[str, str]] = None, +) -> Any: + """ + Creates an input field for an invocation. + + This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/latest/api/fields/#pydantic.fields.Field) \ + that adds a few extra parameters to support graph execution and the node editor UI. + + :param Input input: [Input.Any] The kind of input this field requires. \ + `Input.Direct` means a value must be provided on instantiation. \ + `Input.Connection` means the value must be provided by a connection. \ + `Input.Any` means either will do. + + :param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \ + In some situations, the field's type is not enough to infer the correct UI type. \ + For example, model selection fields should render a dropdown UI component to select a model. \ + Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \ + `MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \ + `UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field. + + :param UIComponent ui_component: [None] Optionally specifies a specific component to use in the UI. \ + The UI will always render a suitable component, but sometimes you want something different than the default. \ + For example, a `string` field will default to a single-line input, but you may want a multi-line textarea instead. \ + For this case, you could provide `UIComponent.Textarea`. + + :param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. + + :param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. + + :param dict[str, str] ui_choice_labels: [None] Specifies the labels to use for the choices in an enum field. + """ + + json_schema_extra_ = InputFieldJSONSchemaExtra( + input=input, + ui_type=ui_type, + ui_component=ui_component, + ui_hidden=ui_hidden, + ui_order=ui_order, + ui_choice_labels=ui_choice_labels, + field_kind=FieldKind.Input, + orig_required=True, + ) + + """ + There is a conflict between the typing of invocation definitions and the typing of an invocation's + `invoke()` function. + + On instantiation of a node, the invocation definition is used to create the python class. At this time, + any number of fields may be optional, because they may be provided by connections. + + On calling of `invoke()`, however, those fields may be required. + + For example, consider an ResizeImageInvocation with an `image: ImageField` field. + + `image` is required during the call to `invoke()`, but when the python class is instantiated, + the field may not be present. This is fine, because that image field will be provided by a + connection from an ancestor node, which outputs an image. + + This means we want to type the `image` field as optional for the node class definition, but required + for the `invoke()` function. + + If we use `typing.Optional` in the node class definition, the field will be typed as optional in the + `invoke()` method, and we'll have to do a lot of runtime checks to ensure the field is present - or + any static type analysis tools will complain. + + To get around this, in node class definitions, we type all fields correctly for the `invoke()` function, + but secretly make them optional in `InputField()`. We also store the original required bool and/or default + value. When we call `invoke()`, we use this stored information to do an additional check on the class. + """ + + if default_factory is not _Unset and default_factory is not None: + default = default_factory() + logger.warn('"default_factory" is not supported, calling it now to set "default"') + + # These are the args we may wish pass to the pydantic `Field()` function + field_args = { + "default": default, + "title": title, + "description": description, + "pattern": pattern, + "strict": strict, + "gt": gt, + "ge": ge, + "lt": lt, + "le": le, + "multiple_of": multiple_of, + "allow_inf_nan": allow_inf_nan, + "max_digits": max_digits, + "decimal_places": decimal_places, + "min_length": min_length, + "max_length": max_length, + } + + # We only want to pass the args that were provided, otherwise the `Field()`` function won't work as expected + provided_args = {k: v for (k, v) in field_args.items() if v is not PydanticUndefined} + + # Because we are manually making fields optional, we need to store the original required bool for reference later + json_schema_extra_.orig_required = default is PydanticUndefined + + # Make Input.Any and Input.Connection fields optional, providing None as a default if the field doesn't already have one + if input is Input.Any or input is Input.Connection: + default_ = None if default is PydanticUndefined else default + provided_args.update({"default": default_}) + if default is not PydanticUndefined: + # Before invoking, we'll check for the original default value and set it on the field if the field has no value + json_schema_extra_.default = default + json_schema_extra_.orig_default = default + elif default is not PydanticUndefined: + default_ = default + provided_args.update({"default": default_}) + json_schema_extra_.orig_default = default_ + + return Field( + **provided_args, + json_schema_extra=json_schema_extra_.model_dump(exclude_none=True), + ) + + +def OutputField( + # copied from pydantic's Field + default: Any = _Unset, + title: str | None = _Unset, + description: str | None = _Unset, + pattern: str | None = _Unset, + strict: bool | None = _Unset, + gt: float | None = _Unset, + ge: float | None = _Unset, + lt: float | None = _Unset, + le: float | None = _Unset, + multiple_of: float | None = _Unset, + allow_inf_nan: bool | None = _Unset, + max_digits: int | None = _Unset, + decimal_places: int | None = _Unset, + min_length: int | None = _Unset, + max_length: int | None = _Unset, + # custom + ui_type: Optional[UIType] = None, + ui_hidden: bool = False, + ui_order: Optional[int] = None, +) -> Any: + """ + Creates an output field for an invocation output. + + This is a wrapper for Pydantic's [Field](https://docs.pydantic.dev/1.10/usage/schema/#field-customization) \ + that adds a few extra parameters to support graph execution and the node editor UI. + + :param UIType ui_type: [None] Optionally provides an extra type hint for the UI. \ + In some situations, the field's type is not enough to infer the correct UI type. \ + For example, model selection fields should render a dropdown UI component to select a model. \ + Internally, there is no difference between SD-1, SD-2 and SDXL model fields, they all use \ + `MainModelField`. So to ensure the base-model-specific UI is rendered, you can use \ + `UIType.SDXLMainModelField` to indicate that the field is an SDXL main model field. + + :param bool ui_hidden: [False] Specifies whether or not this field should be hidden in the UI. \ + + :param int ui_order: [None] Specifies the order in which this field should be rendered in the UI. \ + """ + return Field( + default=default, + title=title, + description=description, + pattern=pattern, + strict=strict, + gt=gt, + ge=ge, + lt=lt, + le=le, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + min_length=min_length, + max_length=max_length, + json_schema_extra=OutputFieldJSONSchemaExtra( + ui_type=ui_type, + ui_hidden=ui_hidden, + ui_order=ui_order, + field_kind=FieldKind.Output, + ).model_dump(exclude_none=True), + ) diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index f729d60cdd..7755f4cc69 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -7,33 +7,33 @@ import cv2 import numpy from PIL import Image, ImageChops, ImageFilter, ImageOps -from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.constants import IMAGE_MODES +from invokeai.app.invocations.fields import ( + ColorField, + FieldDescriptions, + ImageField, + InputField, + WithBoard, + WithMetadata, +) +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.image_records.image_records_common import ImageCategory +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark from invokeai.backend.image_util.safety_checker import SafetyChecker -from .baseinvocation import ( - BaseInvocation, - Classification, - Input, - InputField, - InvocationContext, - WithMetadata, - invocation, -) +from .baseinvocation import BaseInvocation, Classification, invocation -@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0") +@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.1") class ShowImageInvocation(BaseInvocation): """Displays a provided image using the OS image viewer, and passes it forward in the pipeline.""" image: ImageField = InputField(description="The image to show") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) - if image: - image.show() + image = context.images.get_pil(self.image.image_name) + image.show() # TODO: how to handle failure? @@ -49,9 +49,9 @@ class ShowImageInvocation(BaseInvocation): title="Blank Image", tags=["image"], category="image", - version="1.2.0", + version="1.2.1", ) -class BlankImageInvocation(BaseInvocation, WithMetadata): +class BlankImageInvocation(BaseInvocation, WithMetadata, WithBoard): """Creates a blank image and forwards it to the pipeline""" width: int = InputField(default=512, description="The width of the image") @@ -62,22 +62,9 @@ class BlankImageInvocation(BaseInvocation, WithMetadata): def invoke(self, context: InvocationContext) -> ImageOutput: image = Image.new(mode=self.mode, size=(self.width, self.height), color=self.color.tuple()) - image_dto = context.services.images.create( - image=image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -85,9 +72,9 @@ class BlankImageInvocation(BaseInvocation, WithMetadata): title="Crop Image", tags=["image", "crop"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageCropInvocation(BaseInvocation, WithMetadata): +class ImageCropInvocation(BaseInvocation, WithMetadata, WithBoard): """Crops an image to a specified box. The box can be outside of the image.""" image: ImageField = InputField(description="The image to crop") @@ -97,27 +84,14 @@ class ImageCropInvocation(BaseInvocation, WithMetadata): height: int = InputField(default=512, gt=0, description="The height of the crop rectangle") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) image_crop = Image.new(mode="RGBA", size=(self.width, self.height), color=(0, 0, 0, 0)) image_crop.paste(image, (-self.x, -self.y)) - image_dto = context.services.images.create( - image=image_crop, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image_crop) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -149,7 +123,7 @@ class CenterPadCropInvocation(BaseInvocation): ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) # Calculate and create new image dimensions new_width = image.width + self.right + self.left @@ -159,20 +133,9 @@ class CenterPadCropInvocation(BaseInvocation): # Paste new image onto input image_crop.paste(image, (self.left, self.top)) - image_dto = context.services.images.create( - image=image_crop, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - ) + image_dto = context.images.save(image=image_crop) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -180,9 +143,9 @@ class CenterPadCropInvocation(BaseInvocation): title="Paste Image", tags=["image", "paste"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImagePasteInvocation(BaseInvocation, WithMetadata): +class ImagePasteInvocation(BaseInvocation, WithMetadata, WithBoard): """Pastes an image into another image.""" base_image: ImageField = InputField(description="The base image") @@ -196,11 +159,11 @@ class ImagePasteInvocation(BaseInvocation, WithMetadata): crop: bool = InputField(default=False, description="Crop to base image dimensions") def invoke(self, context: InvocationContext) -> ImageOutput: - base_image = context.services.images.get_pil_image(self.base_image.image_name) - image = context.services.images.get_pil_image(self.image.image_name) + base_image = context.images.get_pil(self.base_image.image_name) + image = context.images.get_pil(self.image.image_name) mask = None if self.mask is not None: - mask = context.services.images.get_pil_image(self.mask.image_name) + mask = context.images.get_pil(self.mask.image_name) mask = ImageOps.invert(mask.convert("L")) # TODO: probably shouldn't invert mask here... should user be required to do it? @@ -217,22 +180,9 @@ class ImagePasteInvocation(BaseInvocation, WithMetadata): base_w, base_h = base_image.size new_image = new_image.crop((abs(min_x), abs(min_y), abs(min_x) + base_w, abs(min_y) + base_h)) - image_dto = context.services.images.create( - image=new_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=new_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -240,37 +190,24 @@ class ImagePasteInvocation(BaseInvocation, WithMetadata): title="Mask from Alpha", tags=["image", "mask"], category="image", - version="1.2.0", + version="1.2.1", ) -class MaskFromAlphaInvocation(BaseInvocation, WithMetadata): +class MaskFromAlphaInvocation(BaseInvocation, WithMetadata, WithBoard): """Extracts the alpha channel of an image as a mask.""" image: ImageField = InputField(description="The image to create the mask from") invert: bool = InputField(default=False, description="Whether or not to invert the mask") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) image_mask = image.split()[-1] if self.invert: image_mask = ImageOps.invert(image_mask) - image_dto = context.services.images.create( - image=image_mask, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.MASK, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image_mask, image_category=ImageCategory.MASK) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -278,36 +215,23 @@ class MaskFromAlphaInvocation(BaseInvocation, WithMetadata): title="Multiply Images", tags=["image", "multiply"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageMultiplyInvocation(BaseInvocation, WithMetadata): +class ImageMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard): """Multiplies two images together using `PIL.ImageChops.multiply()`.""" image1: ImageField = InputField(description="The first image to multiply") image2: ImageField = InputField(description="The second image to multiply") def invoke(self, context: InvocationContext) -> ImageOutput: - image1 = context.services.images.get_pil_image(self.image1.image_name) - image2 = context.services.images.get_pil_image(self.image2.image_name) + image1 = context.images.get_pil(self.image1.image_name) + image2 = context.images.get_pil(self.image2.image_name) multiply_image = ImageChops.multiply(image1, image2) - image_dto = context.services.images.create( - image=multiply_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=multiply_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) IMAGE_CHANNELS = Literal["A", "R", "G", "B"] @@ -318,38 +242,22 @@ IMAGE_CHANNELS = Literal["A", "R", "G", "B"] title="Extract Image Channel", tags=["image", "channel"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageChannelInvocation(BaseInvocation, WithMetadata): +class ImageChannelInvocation(BaseInvocation, WithMetadata, WithBoard): """Gets a channel from an image.""" image: ImageField = InputField(description="The image to get the channel from") channel: IMAGE_CHANNELS = InputField(default="A", description="The channel to get") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) channel_image = image.getchannel(self.channel) - image_dto = context.services.images.create( - image=channel_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=channel_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) - - -IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"] + return ImageOutput.build(image_dto) @invocation( @@ -357,35 +265,22 @@ IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F title="Convert Image Mode", tags=["image", "convert"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageConvertInvocation(BaseInvocation, WithMetadata): +class ImageConvertInvocation(BaseInvocation, WithMetadata, WithBoard): """Converts an image to a different mode.""" image: ImageField = InputField(description="The image to convert") mode: IMAGE_MODES = InputField(default="L", description="The mode to convert to") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) converted_image = image.convert(self.mode) - image_dto = context.services.images.create( - image=converted_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=converted_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -393,9 +288,9 @@ class ImageConvertInvocation(BaseInvocation, WithMetadata): title="Blur Image", tags=["image", "blur"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageBlurInvocation(BaseInvocation, WithMetadata): +class ImageBlurInvocation(BaseInvocation, WithMetadata, WithBoard): """Blurs an image""" image: ImageField = InputField(description="The image to blur") @@ -404,29 +299,16 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata): blur_type: Literal["gaussian", "box"] = InputField(default="gaussian", description="The type of blur") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) blur = ( ImageFilter.GaussianBlur(self.radius) if self.blur_type == "gaussian" else ImageFilter.BoxBlur(self.radius) ) blur_image = image.filter(blur) - image_dto = context.services.images.create( - image=blur_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=blur_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -434,10 +316,10 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata): title="Unsharp Mask", tags=["image", "unsharp_mask"], category="image", - version="1.2.0", + version="1.2.1", classification=Classification.Beta, ) -class UnsharpMaskInvocation(BaseInvocation, WithMetadata): +class UnsharpMaskInvocation(BaseInvocation, WithMetadata, WithBoard): """Applies an unsharp mask filter to an image""" image: ImageField = InputField(description="The image to use") @@ -451,7 +333,7 @@ class UnsharpMaskInvocation(BaseInvocation, WithMetadata): return numpy.array(img) / 255 def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) mode = image.mode alpha_channel = image.getchannel("A") if mode == "RGBA" else None @@ -469,16 +351,7 @@ class UnsharpMaskInvocation(BaseInvocation, WithMetadata): if alpha_channel is not None: image.putalpha(alpha_channel) - image_dto = context.services.images.create( - image=image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image) return ImageOutput( image=ImageField(image_name=image_dto.image_name), @@ -512,9 +385,9 @@ PIL_RESAMPLING_MAP = { title="Resize Image", tags=["image", "resize"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageResizeInvocation(BaseInvocation, WithMetadata): +class ImageResizeInvocation(BaseInvocation, WithMetadata, WithBoard): """Resizes an image to specific dimensions""" image: ImageField = InputField(description="The image to resize") @@ -523,7 +396,7 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata): resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) resample_mode = PIL_RESAMPLING_MAP[self.resample_mode] @@ -532,22 +405,9 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata): resample=resample_mode, ) - image_dto = context.services.images.create( - image=resize_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=resize_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -555,9 +415,9 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata): title="Scale Image", tags=["image", "scale"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageScaleInvocation(BaseInvocation, WithMetadata): +class ImageScaleInvocation(BaseInvocation, WithMetadata, WithBoard): """Scales an image by a factor""" image: ImageField = InputField(description="The image to scale") @@ -569,7 +429,7 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata): resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) resample_mode = PIL_RESAMPLING_MAP[self.resample_mode] width = int(image.width * self.scale_factor) @@ -580,22 +440,9 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata): resample=resample_mode, ) - image_dto = context.services.images.create( - image=resize_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=resize_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -603,9 +450,9 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata): title="Lerp Image", tags=["image", "lerp"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageLerpInvocation(BaseInvocation, WithMetadata): +class ImageLerpInvocation(BaseInvocation, WithMetadata, WithBoard): """Linear interpolation of all pixels of an image""" image: ImageField = InputField(description="The image to lerp") @@ -613,29 +460,16 @@ class ImageLerpInvocation(BaseInvocation, WithMetadata): max: int = InputField(default=255, ge=0, le=255, description="The maximum output value") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) image_arr = numpy.asarray(image, dtype=numpy.float32) / 255 image_arr = image_arr * (self.max - self.min) + self.min lerp_image = Image.fromarray(numpy.uint8(image_arr)) - image_dto = context.services.images.create( - image=lerp_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=lerp_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -643,9 +477,9 @@ class ImageLerpInvocation(BaseInvocation, WithMetadata): title="Inverse Lerp Image", tags=["image", "ilerp"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageInverseLerpInvocation(BaseInvocation, WithMetadata): +class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard): """Inverse linear interpolation of all pixels of an image""" image: ImageField = InputField(description="The image to lerp") @@ -653,29 +487,16 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata): max: int = InputField(default=255, ge=0, le=255, description="The maximum input value") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) image_arr = numpy.asarray(image, dtype=numpy.float32) image_arr = numpy.minimum(numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1) * 255 # type: ignore [assignment] ilerp_image = Image.fromarray(numpy.uint8(image_arr)) - image_dto = context.services.images.create( - image=ilerp_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=ilerp_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -683,17 +504,17 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata): title="Blur NSFW Image", tags=["image", "nsfw"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata): +class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard): """Add blur to NSFW-flagged images""" image: ImageField = InputField(description="The image to check") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) - logger = context.services.logger + logger = context.logger logger.debug("Running NSFW checker") if SafetyChecker.has_nsfw_concept(image): logger.info("A potentially NSFW image has been detected. Image will be blurred.") @@ -702,22 +523,9 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata): blurry_image.paste(caution, (0, 0), caution) image = blurry_image - image_dto = context.services.images.create( - image=image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) def _get_caution_img(self) -> Image.Image: import invokeai.app.assets.images as image_assets @@ -731,33 +539,20 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata): title="Add Invisible Watermark", tags=["image", "watermark"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageWatermarkInvocation(BaseInvocation, WithMetadata): +class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithBoard): """Add an invisible watermark to an image""" image: ImageField = InputField(description="The image to check") text: str = InputField(default="InvokeAI", description="Watermark text") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) new_image = InvisibleWatermark.add_watermark(image, self.text) - image_dto = context.services.images.create( - image=new_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=new_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -765,9 +560,9 @@ class ImageWatermarkInvocation(BaseInvocation, WithMetadata): title="Mask Edge", tags=["image", "mask", "inpaint"], category="image", - version="1.2.0", + version="1.2.1", ) -class MaskEdgeInvocation(BaseInvocation, WithMetadata): +class MaskEdgeInvocation(BaseInvocation, WithMetadata, WithBoard): """Applies an edge mask to an image""" image: ImageField = InputField(description="The image to apply the mask to") @@ -779,7 +574,7 @@ class MaskEdgeInvocation(BaseInvocation, WithMetadata): ) def invoke(self, context: InvocationContext) -> ImageOutput: - mask = context.services.images.get_pil_image(self.image.image_name).convert("L") + mask = context.images.get_pil(self.image.image_name).convert("L") npimg = numpy.asarray(mask, dtype=numpy.uint8) npgradient = numpy.uint8(255 * (1.0 - numpy.floor(numpy.abs(0.5 - numpy.float32(npimg) / 255.0) * 2.0))) @@ -794,22 +589,9 @@ class MaskEdgeInvocation(BaseInvocation, WithMetadata): new_mask = ImageOps.invert(new_mask) - image_dto = context.services.images.create( - image=new_mask, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.MASK, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=new_mask, image_category=ImageCategory.MASK) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -817,36 +599,23 @@ class MaskEdgeInvocation(BaseInvocation, WithMetadata): title="Combine Masks", tags=["image", "mask", "multiply"], category="image", - version="1.2.0", + version="1.2.1", ) -class MaskCombineInvocation(BaseInvocation, WithMetadata): +class MaskCombineInvocation(BaseInvocation, WithMetadata, WithBoard): """Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`.""" mask1: ImageField = InputField(description="The first mask to combine") mask2: ImageField = InputField(description="The second image to combine") def invoke(self, context: InvocationContext) -> ImageOutput: - mask1 = context.services.images.get_pil_image(self.mask1.image_name).convert("L") - mask2 = context.services.images.get_pil_image(self.mask2.image_name).convert("L") + mask1 = context.images.get_pil(self.mask1.image_name).convert("L") + mask2 = context.images.get_pil(self.mask2.image_name).convert("L") combined_mask = ImageChops.multiply(mask1, mask2) - image_dto = context.services.images.create( - image=combined_mask, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=combined_mask, image_category=ImageCategory.MASK) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -854,9 +623,9 @@ class MaskCombineInvocation(BaseInvocation, WithMetadata): title="Color Correct", tags=["image", "color"], category="image", - version="1.2.0", + version="1.2.1", ) -class ColorCorrectInvocation(BaseInvocation, WithMetadata): +class ColorCorrectInvocation(BaseInvocation, WithMetadata, WithBoard): """ Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image. @@ -870,11 +639,11 @@ class ColorCorrectInvocation(BaseInvocation, WithMetadata): def invoke(self, context: InvocationContext) -> ImageOutput: pil_init_mask = None if self.mask is not None: - pil_init_mask = context.services.images.get_pil_image(self.mask.image_name).convert("L") + pil_init_mask = context.images.get_pil(self.mask.image_name).convert("L") - init_image = context.services.images.get_pil_image(self.reference.image_name) + init_image = context.images.get_pil(self.reference.image_name) - result = context.services.images.get_pil_image(self.image.image_name).convert("RGBA") + result = context.images.get_pil(self.image.image_name).convert("RGBA") # if init_image is None or init_mask is None: # return result @@ -948,22 +717,9 @@ class ColorCorrectInvocation(BaseInvocation, WithMetadata): # Paste original on color-corrected generation (using blurred mask) matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) - image_dto = context.services.images.create( - image=matched_result, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=matched_result) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -971,16 +727,16 @@ class ColorCorrectInvocation(BaseInvocation, WithMetadata): title="Adjust Image Hue", tags=["image", "hue"], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageHueAdjustmentInvocation(BaseInvocation, WithMetadata): +class ImageHueAdjustmentInvocation(BaseInvocation, WithMetadata, WithBoard): """Adjusts the Hue of an image.""" image: ImageField = InputField(description="The image to adjust") hue: int = InputField(default=0, description="The degrees by which to rotate the hue, 0-360") def invoke(self, context: InvocationContext) -> ImageOutput: - pil_image = context.services.images.get_pil_image(self.image.image_name) + pil_image = context.images.get_pil(self.image.image_name) # Convert image to HSV color space hsv_image = numpy.array(pil_image.convert("HSV")) @@ -994,24 +750,9 @@ class ImageHueAdjustmentInvocation(BaseInvocation, WithMetadata): # Convert back to PIL format and to original color mode pil_image = Image.fromarray(hsv_image, mode="HSV").convert("RGBA") - image_dto = context.services.images.create( - image=pil_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - is_intermediate=self.is_intermediate, - session_id=context.graph_execution_state_id, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=pil_image) - return ImageOutput( - image=ImageField( - image_name=image_dto.image_name, - ), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) COLOR_CHANNELS = Literal[ @@ -1075,9 +816,9 @@ CHANNEL_FORMATS = { "value", ], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata): +class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard): """Add or subtract a value from a specific color channel of an image.""" image: ImageField = InputField(description="The image to adjust") @@ -1085,7 +826,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata): offset: int = InputField(default=0, ge=-255, le=255, description="The amount to adjust the channel by") def invoke(self, context: InvocationContext) -> ImageOutput: - pil_image = context.services.images.get_pil_image(self.image.image_name) + pil_image = context.images.get_pil(self.image.image_name) # extract the channel and mode from the input and reference tuple mode = CHANNEL_FORMATS[self.channel][0] @@ -1104,24 +845,9 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata): # Convert back to RGBA format and output pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA") - image_dto = context.services.images.create( - image=pil_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - is_intermediate=self.is_intermediate, - session_id=context.graph_execution_state_id, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=pil_image) - return ImageOutput( - image=ImageField( - image_name=image_dto.image_name, - ), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -1146,9 +872,9 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata): "value", ], category="image", - version="1.2.0", + version="1.2.1", ) -class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata): +class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard): """Scale a specific color channel of an image.""" image: ImageField = InputField(description="The image to adjust") @@ -1157,7 +883,7 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata): invert_channel: bool = InputField(default=False, description="Invert the channel after scaling") def invoke(self, context: InvocationContext) -> ImageOutput: - pil_image = context.services.images.get_pil_image(self.image.image_name) + pil_image = context.images.get_pil(self.image.image_name) # extract the channel and mode from the input and reference tuple mode = CHANNEL_FORMATS[self.channel][0] @@ -1180,24 +906,9 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata): # Convert back to RGBA format and output pil_image = Image.fromarray(converted_image.astype(numpy.uint8), mode=mode).convert("RGBA") - image_dto = context.services.images.create( - image=pil_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - is_intermediate=self.is_intermediate, - session_id=context.graph_execution_state_id, - workflow=context.workflow, - metadata=self.metadata, - ) + image_dto = context.images.save(image=pil_image) - return ImageOutput( - image=ImageField( - image_name=image_dto.image_name, - ), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( @@ -1205,64 +916,54 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata): title="Save Image", tags=["primitives", "image"], category="primitives", - version="1.2.0", + version="1.2.1", use_cache=False, ) -class SaveImageInvocation(BaseInvocation, WithMetadata): +class SaveImageInvocation(BaseInvocation, WithMetadata, WithBoard): """Saves an image. Unlike an image primitive, this invocation stores a copy of the image.""" image: ImageField = InputField(description=FieldDescriptions.image) - board: BoardField = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) - image_dto = context.services.images.create( - image=image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - board_id=self.board.board_id if self.board else None, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( - "linear_ui_output", - title="Linear UI Image Output", - tags=["primitives", "image"], - category="primitives", - version="1.0.1", - use_cache=False, + "canvas_paste_back", + title="Canvas Paste Back", + tags=["image", "combine"], + category="image", + version="1.0.0", ) -class LinearUIOutputInvocation(BaseInvocation, WithMetadata): - """Handles Linear UI Image Outputting tasks.""" +class CanvasPasteBackInvocation(BaseInvocation, WithMetadata, WithBoard): + """Combines two images by using the mask provided. Intended for use on the Unified Canvas.""" - image: ImageField = InputField(description=FieldDescriptions.image) - board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct) + source_image: ImageField = InputField(description="The source image") + target_image: ImageField = InputField(default=None, description="The target image") + mask: ImageField = InputField( + description="The mask to use when pasting", + ) + mask_blur: int = InputField(default=0, ge=0, description="The amount to blur the mask by") + + def _prepare_mask(self, mask: Image.Image) -> Image.Image: + mask_array = numpy.array(mask) + kernel = numpy.ones((self.mask_blur, self.mask_blur), numpy.uint8) + dilated_mask_array = cv2.erode(mask_array, kernel, iterations=3) + dilated_mask = Image.fromarray(dilated_mask_array) + if self.mask_blur > 0: + mask = dilated_mask.filter(ImageFilter.GaussianBlur(self.mask_blur)) + return ImageOps.invert(mask.convert("L")) def invoke(self, context: InvocationContext) -> ImageOutput: - image_dto = context.services.images.get_dto(self.image.image_name) + source_image = context.images.get_pil(self.source_image.image_name) + target_image = context.images.get_pil(self.target_image.image_name) + mask = self._prepare_mask(context.images.get_pil(self.mask.image_name)) - if self.board: - context.services.board_images.add_image_to_board(self.board.board_id, self.image.image_name) + source_image.paste(target_image, (0, 0), mask) - if image_dto.is_intermediate != self.is_intermediate: - context.services.images.update( - self.image.image_name, changes=ImageRecordChanges(is_intermediate=self.is_intermediate) - ) - - return ImageOutput( - image=ImageField(image_name=self.image.image_name), - width=image_dto.width, - height=image_dto.height, - ) + image_dto = context.images.save(image=source_image) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/infill.py b/invokeai/app/invocations/infill.py index c3d00bb133..53f6f4732f 100644 --- a/invokeai/app/invocations/infill.py +++ b/invokeai/app/invocations/infill.py @@ -6,14 +6,16 @@ from typing import Literal, Optional, get_args import numpy as np from PIL import Image, ImageOps -from invokeai.app.invocations.primitives import ColorField, ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin +from invokeai.app.invocations.fields import ColorField, ImageField +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import SEED_MAX from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint from invokeai.backend.image_util.lama import LaMA from invokeai.backend.image_util.patchmatch import PatchMatch -from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, invocation +from .baseinvocation import BaseInvocation, invocation +from .fields import InputField, WithBoard, WithMetadata from .image import PIL_RESAMPLING_MAP, PIL_RESAMPLING_MODES @@ -118,8 +120,8 @@ def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int] return si -@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0") -class InfillColorInvocation(BaseInvocation, WithMetadata): +@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1") +class InfillColorInvocation(BaseInvocation, WithMetadata, WithBoard): """Infills transparent areas of an image with a solid color""" image: ImageField = InputField(description="The image to infill") @@ -129,33 +131,20 @@ class InfillColorInvocation(BaseInvocation, WithMetadata): ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) solid_bg = Image.new("RGBA", image.size, self.color.tuple()) infilled = Image.alpha_composite(solid_bg, image.convert("RGBA")) infilled.paste(image, (0, 0), image.split()[-1]) - image_dto = context.services.images.create( - image=infilled, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=infilled) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) -@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1") -class InfillTileInvocation(BaseInvocation, WithMetadata): +@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2") +class InfillTileInvocation(BaseInvocation, WithMetadata, WithBoard): """Infills transparent areas of an image with tiles of the image""" image: ImageField = InputField(description="The image to infill") @@ -168,33 +157,20 @@ class InfillTileInvocation(BaseInvocation, WithMetadata): ) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) infilled = tile_fill_missing(image.copy(), seed=self.seed, tile_size=self.tile_size) infilled.paste(image, (0, 0), image.split()[-1]) - image_dto = context.services.images.create( - image=infilled, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=infilled) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) @invocation( - "infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0" + "infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1" ) -class InfillPatchMatchInvocation(BaseInvocation, WithMetadata): +class InfillPatchMatchInvocation(BaseInvocation, WithMetadata, WithBoard): """Infills transparent areas of an image using the PatchMatch algorithm""" image: ImageField = InputField(description="The image to infill") @@ -202,7 +178,7 @@ class InfillPatchMatchInvocation(BaseInvocation, WithMetadata): resample_mode: PIL_RESAMPLING_MODES = InputField(default="bicubic", description="The resampling mode") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name).convert("RGBA") + image = context.images.get_pil(self.image.image_name).convert("RGBA") resample_mode = PIL_RESAMPLING_MAP[self.resample_mode] @@ -227,77 +203,38 @@ class InfillPatchMatchInvocation(BaseInvocation, WithMetadata): infilled.paste(image, (0, 0), mask=image.split()[-1]) # image.paste(infilled, (0, 0), mask=image.split()[-1]) - image_dto = context.services.images.create( - image=infilled, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=infilled) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) -@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0") -class LaMaInfillInvocation(BaseInvocation, WithMetadata): +@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1") +class LaMaInfillInvocation(BaseInvocation, WithMetadata, WithBoard): """Infills transparent areas of an image using the LaMa model""" image: ImageField = InputField(description="The image to infill") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) infilled = infill_lama(image.copy()) - image_dto = context.services.images.create( - image=infilled, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=infilled) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) -@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.0") -class CV2InfillInvocation(BaseInvocation, WithMetadata): +@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1") +class CV2InfillInvocation(BaseInvocation, WithMetadata, WithBoard): """Infills transparent areas of an image using OpenCV Inpainting""" image: ImageField = InputField(description="The image to infill") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) infilled = infill_cv2(image.copy()) - image_dto = context.services.images.create( - image=infilled, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=infilled) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/ip_adapter.py b/invokeai/app/invocations/ip_adapter.py index 6bd2889624..bebdc29b86 100644 --- a/invokeai/app/invocations/ip_adapter.py +++ b/invokeai/app/invocations/ip_adapter.py @@ -1,38 +1,29 @@ -import os from builtins import float from typing import List, Union -from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator +from pydantic import BaseModel, Field, field_validator, model_validator +from typing_extensions import Self from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, invocation, invocation_output, ) +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.util import validate_begin_end_step, validate_weights -from invokeai.app.shared.fields import FieldDescriptions -from invokeai.backend.model_management.models.base import BaseModelType, ModelType -from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.model_manager.config import BaseModelType, ModelType +# LS: Consider moving these two classes into model.py class IPAdapterModelField(BaseModel): - model_name: str = Field(description="Name of the IP-Adapter model") - base_model: BaseModelType = Field(description="Base model") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Key to the IP-Adapter model") class CLIPVisionModelField(BaseModel): - model_name: str = Field(description="Name of the CLIP Vision image encoder model") - base_model: BaseModelType = Field(description="Base model (usually 'Any')") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Key to the CLIP Vision image encoder model") class IPAdapterField(BaseModel): @@ -49,12 +40,12 @@ class IPAdapterField(BaseModel): @field_validator("weight") @classmethod - def validate_ip_adapter_weight(cls, v): + def validate_ip_adapter_weight(cls, v: float) -> float: validate_weights(v) return v @model_validator(mode="after") - def validate_begin_end_step_percent(self): + def validate_begin_end_step_percent(self) -> Self: validate_begin_end_step(self.begin_step_percent, self.end_step_percent) return self @@ -65,7 +56,7 @@ class IPAdapterOutput(BaseInvocationOutput): ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter") -@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.1") +@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.2") class IPAdapterInvocation(BaseInvocation): """Collects IP-Adapter info to pass to other nodes.""" @@ -87,33 +78,25 @@ class IPAdapterInvocation(BaseInvocation): @field_validator("weight") @classmethod - def validate_ip_adapter_weight(cls, v): + def validate_ip_adapter_weight(cls, v: float) -> float: validate_weights(v) return v @model_validator(mode="after") - def validate_begin_end_step_percent(self): + def validate_begin_end_step_percent(self) -> Self: validate_begin_end_step(self.begin_step_percent, self.end_step_percent) return self def invoke(self, context: InvocationContext) -> IPAdapterOutput: # Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model. - ip_adapter_info = context.services.model_manager.model_info( - self.ip_adapter_model.model_name, self.ip_adapter_model.base_model, ModelType.IPAdapter - ) - # HACK(ryand): This is bad for a couple of reasons: 1) we are bypassing the model manager to read the model - # directly, and 2) we are reading from disk every time this invocation is called without caching the result. - # A better solution would be to store the image encoder model reference in the IP-Adapter model info, but this - # is currently messy due to differences between how the model info is generated when installing a model from - # disk vs. downloading the model. - image_encoder_model_id = get_ip_adapter_image_encoder_model_id( - os.path.join(context.services.configuration.get_config().models_path, ip_adapter_info["path"]) - ) + ip_adapter_info = context.models.get_config(self.ip_adapter_model.key) + image_encoder_model_id = ip_adapter_info.image_encoder_model_id image_encoder_model_name = image_encoder_model_id.split("/")[-1].strip() - image_encoder_model = CLIPVisionModelField( - model_name=image_encoder_model_name, - base_model=BaseModelType.Any, + image_encoder_models = context.models.search_by_attrs( + name=image_encoder_model_name, base=BaseModelType.Any, type=ModelType.CLIPVision ) + assert len(image_encoder_models) == 1 + image_encoder_model = CLIPVisionModelField(key=image_encoder_models[0].key) return IPAdapterOutput( ip_adapter=IPAdapterField( image=self.image, diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 5dc0aea927..f414fb7770 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -3,14 +3,16 @@ import inspect import math from contextlib import ExitStack from functools import singledispatchmethod -from typing import List, Literal, Optional, Union +from typing import Any, Iterator, List, Literal, Optional, Tuple, Union import einops import numpy as np +import numpy.typing as npt import torch import torchvision import torchvision.transforms as T from diffusers import AutoencoderKL, AutoencoderTiny +from diffusers.configuration_utils import ConfigMixin from diffusers.image_processor import VaeImageProcessor from diffusers.models.adapter import T2IAdapter from diffusers.models.attention_processor import ( @@ -19,28 +21,42 @@ from diffusers.models.attention_processor import ( LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from diffusers.schedulers import DPMSolverSDEScheduler from diffusers.schedulers import SchedulerMixin as Scheduler +from PIL import Image, ImageFilter from pydantic import field_validator from torchvision.transforms.functional import resize as tv_resize +from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR, SCHEDULER_NAME_VALUES +from invokeai.app.invocations.fields import ( + ConditioningField, + DenoiseMaskField, + FieldDescriptions, + ImageField, + Input, + InputField, + LatentsField, + OutputField, + UIType, + WithBoard, + WithMetadata, +) from invokeai.app.invocations.ip_adapter import IPAdapterField from invokeai.app.invocations.primitives import ( - DenoiseMaskField, DenoiseMaskOutput, - ImageField, ImageOutput, - LatentsField, LatentsOutput, - build_latents_output, ) from invokeai.app.invocations.t2i_adapter import T2IAdapterField -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.controlnet_utils import prepare_control_image from invokeai.app.util.step_callback import stable_diffusion_step_callback from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus -from invokeai.backend.model_management.models import ModelType, SilenceWarnings +from invokeai.backend.lora import LoRAModelRaw +from invokeai.backend.model_manager import BaseModelType, LoadedModel +from invokeai.backend.model_patcher import ModelPatcher +from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( BasicConditioningInfo, IPAdapterConditioningInfo, @@ -49,11 +65,8 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ( TextConditioningData, TextConditioningRegions, ) +from invokeai.backend.util.silence_warnings import SilenceWarnings -from ...backend.model_management.lora import ModelPatcher -from ...backend.model_management.models import BaseModelType -from ...backend.model_management.seamless import set_seamless -from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.stable_diffusion.diffusers_pipeline import ( ControlNetData, IPAdapterData, @@ -66,16 +79,9 @@ from ...backend.util.devices import choose_precision, choose_torch_device from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - UIType, - WithMetadata, invocation, invocation_output, ) -from .compel import ConditioningField from .controlnet_image_processors import ControlField from .model import ModelInfo, UNetField, VaeField @@ -84,18 +90,10 @@ if choose_torch_device() == torch.device("mps"): DEFAULT_PRECISION = choose_precision(choose_torch_device()) -SAMPLER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())] - -# HACK: Many nodes are currently hard-coded to use a fixed latent scale factor of 8. This is fragile, and will need to -# be addressed if future models use a different latent scale factor. Also, note that there may be places where the scale -# factor is hard-coded to a literal '8' rather than using this constant. -# The ratio of image:latent dimensions is LATENT_SCALE_FACTOR:1, or 8:1. -LATENT_SCALE_FACTOR = 8 - @invocation_output("scheduler_output") class SchedulerOutput(BaseInvocationOutput): - scheduler: SAMPLER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler) + scheduler: SCHEDULER_NAME_VALUES = OutputField(description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler) @invocation( @@ -108,7 +106,7 @@ class SchedulerOutput(BaseInvocationOutput): class SchedulerInvocation(BaseInvocation): """Selects a scheduler.""" - scheduler: SAMPLER_NAME_VALUES = InputField( + scheduler: SCHEDULER_NAME_VALUES = InputField( default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler, @@ -123,7 +121,7 @@ class SchedulerInvocation(BaseInvocation): title="Create Denoise Mask", tags=["mask", "denoise"], category="latents", - version="1.0.0", + version="1.0.1", ) class CreateDenoiseMaskInvocation(BaseInvocation): """Creates mask for denoising model run.""" @@ -138,10 +136,10 @@ class CreateDenoiseMaskInvocation(BaseInvocation): ui_order=4, ) - def prep_mask_tensor(self, mask_image): + def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor: if mask_image.mode != "L": mask_image = mask_image.convert("L") - mask_tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + mask_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) if mask_tensor.dim() == 3: mask_tensor = mask_tensor.unsqueeze(0) # if shape is not None: @@ -151,41 +149,90 @@ class CreateDenoiseMaskInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> DenoiseMaskOutput: if self.image is not None: - image = context.services.images.get_pil_image(self.image.image_name) - image = image_resized_to_grid_as_tensor(image.convert("RGB")) - if image.dim() == 3: - image = image.unsqueeze(0) + image = context.images.get_pil(self.image.image_name) + image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) + if image_tensor.dim() == 3: + image_tensor = image_tensor.unsqueeze(0) else: - image = None + image_tensor = None mask = self.prep_mask_tensor( - context.services.images.get_pil_image(self.mask.image_name), + context.images.get_pil(self.mask.image_name), ) - if image is not None: - vae_info = context.services.model_manager.get_model( - **self.vae.vae.model_dump(), - context=context, - ) + if image_tensor is not None: + vae_info = context.models.load(**self.vae.vae.model_dump()) - img_mask = tv_resize(mask, image.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) - masked_image = image * torch.where(img_mask < 0.5, 0.0, 1.0) + img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) + masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0) # TODO: masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone()) - masked_latents_name = f"{context.graph_execution_state_id}__{self.id}_masked_latents" - context.services.latents.save(masked_latents_name, masked_latents) + masked_latents_name = context.tensors.save(tensor=masked_latents) else: masked_latents_name = None - mask_name = f"{context.graph_execution_state_id}__{self.id}_mask" - context.services.latents.save(mask_name, mask) + mask_name = context.tensors.save(tensor=mask) - return DenoiseMaskOutput( - denoise_mask=DenoiseMaskField( - mask_name=mask_name, - masked_latents_name=masked_latents_name, - ), + return DenoiseMaskOutput.build( + mask_name=mask_name, + masked_latents_name=masked_latents_name, + gradient=False, + ) + + +@invocation( + "create_gradient_mask", + title="Create Gradient Mask", + tags=["mask", "denoise"], + category="latents", + version="1.0.0", +) +class CreateGradientMaskInvocation(BaseInvocation): + """Creates mask for denoising model run.""" + + mask: ImageField = InputField(default=None, description="Image which will be masked", ui_order=1) + edge_radius: int = InputField( + default=16, ge=0, description="How far to blur/expand the edges of the mask", ui_order=2 + ) + coherence_mode: Literal["Gaussian Blur", "Box Blur", "Staged"] = InputField(default="Gaussian Blur", ui_order=3) + minimum_denoise: float = InputField( + default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4 + ) + + @torch.no_grad() + def invoke(self, context: InvocationContext) -> DenoiseMaskOutput: + mask_image = context.images.get_pil(self.mask.image_name, mode="L") + if self.coherence_mode == "Box Blur": + blur_mask = mask_image.filter(ImageFilter.BoxBlur(self.edge_radius)) + else: # Gaussian Blur OR Staged + # Gaussian Blur uses standard deviation. 1/2 radius is a good approximation + blur_mask = mask_image.filter(ImageFilter.GaussianBlur(self.edge_radius / 2)) + + mask_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False) + blur_tensor: torch.Tensor = image_resized_to_grid_as_tensor(blur_mask, normalize=False) + + # redistribute blur so that the edges are 0 and blur out to 1 + blur_tensor = (blur_tensor - 0.5) * 2 + + threshold = 1 - self.minimum_denoise + + if self.coherence_mode == "Staged": + # wherever the blur_tensor is masked to any degree, convert it to threshold + blur_tensor = torch.where((blur_tensor < 1), threshold, blur_tensor) + else: + # wherever the blur_tensor is above threshold but less than 1, drop it to threshold + blur_tensor = torch.where((blur_tensor > threshold) & (blur_tensor < 1), threshold, blur_tensor) + + # multiply original mask to force actually masked regions to 0 + blur_tensor = mask_tensor * blur_tensor + + mask_name = context.tensors.save(tensor=blur_tensor.unsqueeze(1)) + + return DenoiseMaskOutput.build( + mask_name=mask_name, + masked_latents_name=None, + gradient=True, ) @@ -196,10 +243,7 @@ def get_scheduler( seed: int, ) -> Scheduler: scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"]) - orig_scheduler_info = context.services.model_manager.get_model( - **scheduler_info.model_dump(), - context=context, - ) + orig_scheduler_info = context.models.load(**scheduler_info.model_dump()) with orig_scheduler_info as orig_scheduler: scheduler_config = orig_scheduler.config @@ -207,7 +251,7 @@ def get_scheduler( scheduler_config = scheduler_config["_backup"] scheduler_config = { **scheduler_config, - **scheduler_extra_config, + **scheduler_extra_config, # FIXME "_backup": scheduler_config, } @@ -220,6 +264,7 @@ def get_scheduler( # hack copied over from generate.py if not hasattr(scheduler, "uses_inpainting_model"): scheduler.uses_inpainting_model = lambda: False + assert isinstance(scheduler, Scheduler) return scheduler @@ -228,7 +273,7 @@ def get_scheduler( title="Denoise Latents", tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"], category="latents", - version="1.5.1", + version="1.5.2", ) class DenoiseLatentsInvocation(BaseInvocation): """Denoises noisy latents to decodable images""" @@ -256,7 +301,7 @@ class DenoiseLatentsInvocation(BaseInvocation): description=FieldDescriptions.denoising_start, ) denoising_end: float = InputField(default=1.0, ge=0, le=1, description=FieldDescriptions.denoising_end) - scheduler: SAMPLER_NAME_VALUES = InputField( + scheduler: SCHEDULER_NAME_VALUES = InputField( default="euler", description=FieldDescriptions.scheduler, ui_type=UIType.Scheduler, @@ -303,7 +348,7 @@ class DenoiseLatentsInvocation(BaseInvocation): ) @field_validator("cfg_scale") - def ge_one(cls, v): + def ge_one(cls, v: Union[List[float], float]) -> Union[List[float], float]: """validate that all cfg_scale values are >= 1""" if isinstance(v, list): for i in v: @@ -346,12 +391,12 @@ class DenoiseLatentsInvocation(BaseInvocation): text_embeddings: Union[list[BasicConditioningInfo], list[SDXLConditioningInfo]] = [] text_embeddings_masks: list[Optional[torch.Tensor]] = [] for cond in cond_list: - cond_data = context.services.latents.get(cond.conditioning_name) + cond_data = context.conditioning.load(cond.conditioning_name) text_embeddings.append(cond_data.conditionings[0].to(device=device, dtype=dtype)) mask = cond.mask if mask is not None: - mask = context.services.latents.get(mask.mask_name) + mask = context.tensors.load(mask.mask_name) text_embeddings_masks.append(mask) return text_embeddings, text_embeddings_masks @@ -494,13 +539,12 @@ class DenoiseLatentsInvocation(BaseInvocation): guidance_scale=self.cfg_scale, guidance_rescale_multiplier=self.cfg_rescale_multiplier, ) - return conditioning_data def create_pipeline( self, - unet, - scheduler, + unet: UNet2DConditionModel, + scheduler: Scheduler, ) -> StableDiffusionGeneratorPipeline: # TODO: # configure_model_padding( @@ -511,10 +555,10 @@ class DenoiseLatentsInvocation(BaseInvocation): class FakeVae: class FakeVaeConfig: - def __init__(self): + def __init__(self) -> None: self.block_out_channels = [0] - def __init__(self): + def __init__(self) -> None: self.config = FakeVae.FakeVaeConfig() return StableDiffusionGeneratorPipeline( @@ -531,11 +575,11 @@ class DenoiseLatentsInvocation(BaseInvocation): def prep_control_data( self, context: InvocationContext, - control_input: Union[ControlField, List[ControlField]], + control_input: Optional[Union[ControlField, List[ControlField]]], latents_shape: List[int], exit_stack: ExitStack, do_classifier_free_guidance: bool = True, - ) -> List[ControlNetData]: + ) -> Optional[List[ControlNetData]]: # Assuming fixed dimensional scaling of LATENT_SCALE_FACTOR. control_height_resize = latents_shape[2] * LATENT_SCALE_FACTOR control_width_resize = latents_shape[3] * LATENT_SCALE_FACTOR @@ -557,18 +601,11 @@ class DenoiseLatentsInvocation(BaseInvocation): # and if weight is None, populate with default 1.0? controlnet_data = [] for control_info in control_list: - control_model = exit_stack.enter_context( - context.services.model_manager.get_model( - model_name=control_info.control_model.model_name, - model_type=ModelType.ControlNet, - base_model=control_info.control_model.base_model, - context=context, - ) - ) + control_model = exit_stack.enter_context(context.models.load(key=control_info.control_model.key)) # control_models.append(control_model) control_image_field = control_info.image - input_image = context.services.images.get_pil_image(control_image_field.image_name) + input_image = context.images.get_pil(control_image_field.image_name) # self.image.image_type, self.image.image_name # FIXME: still need to test with different widths, heights, devices, dtypes # and add in batch_size, num_images_per_prompt? @@ -624,27 +661,17 @@ class DenoiseLatentsInvocation(BaseInvocation): ip_adapter_data_list = [] for single_ip_adapter in ip_adapter: ip_adapter_model: Union[IPAdapter, IPAdapterPlus] = exit_stack.enter_context( - context.services.model_manager.get_model( - model_name=single_ip_adapter.ip_adapter_model.model_name, - model_type=ModelType.IPAdapter, - base_model=single_ip_adapter.ip_adapter_model.base_model, - context=context, - ) + context.models.load(key=single_ip_adapter.ip_adapter_model.key) ) - image_encoder_model_info = context.services.model_manager.get_model( - model_name=single_ip_adapter.image_encoder_model.model_name, - model_type=ModelType.CLIPVision, - base_model=single_ip_adapter.image_encoder_model.base_model, - context=context, - ) + image_encoder_model_info = context.models.load(key=single_ip_adapter.image_encoder_model.key) # `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here. - single_ipa_images = single_ip_adapter.image - if not isinstance(single_ipa_images, list): - single_ipa_images = [single_ipa_images] + single_ipa_image_fields = single_ip_adapter.image + if not isinstance(single_ipa_image_fields, list): + single_ipa_image_fields = [single_ipa_image_fields] - single_ipa_images = [context.services.images.get_pil_image(image.image_name) for image in single_ipa_images] + single_ipa_images = [context.images.get_pil(image.image_name) for image in single_ipa_image_fields] # TODO(ryand): With some effort, the step of running the CLIP Vision encoder could be done before any other # models are needed in memory. This would help to reduce peak memory utilization in low-memory environments. @@ -685,26 +712,20 @@ class DenoiseLatentsInvocation(BaseInvocation): t2i_adapter_data = [] for t2i_adapter_field in t2i_adapter: - t2i_adapter_model_info = context.services.model_manager.get_model( - model_name=t2i_adapter_field.t2i_adapter_model.model_name, - model_type=ModelType.T2IAdapter, - base_model=t2i_adapter_field.t2i_adapter_model.base_model, - context=context, - ) - image = context.services.images.get_pil_image(t2i_adapter_field.image.image_name) + t2i_adapter_model_config = context.models.get_config(key=t2i_adapter_field.t2i_adapter_model.key) + t2i_adapter_loaded_model = context.models.load(key=t2i_adapter_field.t2i_adapter_model.key) + image = context.images.get_pil(t2i_adapter_field.image.image_name) # The max_unet_downscale is the maximum amount that the UNet model downscales the latent image internally. - if t2i_adapter_field.t2i_adapter_model.base_model == BaseModelType.StableDiffusion1: + if t2i_adapter_model_config.base == BaseModelType.StableDiffusion1: max_unet_downscale = 8 - elif t2i_adapter_field.t2i_adapter_model.base_model == BaseModelType.StableDiffusionXL: + elif t2i_adapter_model_config.base == BaseModelType.StableDiffusionXL: max_unet_downscale = 4 else: - raise ValueError( - f"Unexpected T2I-Adapter base model type: '{t2i_adapter_field.t2i_adapter_model.base_model}'." - ) + raise ValueError(f"Unexpected T2I-Adapter base model type: '{t2i_adapter_model_config.base}'.") t2i_adapter_model: T2IAdapter - with t2i_adapter_model_info as t2i_adapter_model: + with t2i_adapter_loaded_model as t2i_adapter_model: total_downscale_factor = t2i_adapter_model.total_downscale_factor # Resize the T2I-Adapter input image. @@ -724,7 +745,7 @@ class DenoiseLatentsInvocation(BaseInvocation): do_classifier_free_guidance=False, width=t2i_input_width, height=t2i_input_height, - num_channels=t2i_adapter_model.config.in_channels, + num_channels=t2i_adapter_model.config["in_channels"], # mypy treats this as a FrozenDict device=t2i_adapter_model.device, dtype=t2i_adapter_model.dtype, resize_mode=t2i_adapter_field.resize_mode, @@ -749,7 +770,16 @@ class DenoiseLatentsInvocation(BaseInvocation): # original idea by https://github.com/AmericanPresidentJimmyCarter # TODO: research more for second order schedulers timesteps - def init_scheduler(self, scheduler, device, steps, denoising_start, denoising_end, seed: int): + def init_scheduler( + self, + scheduler: Union[Scheduler, ConfigMixin], + device: torch.device, + steps: int, + denoising_start: float, + denoising_end: float, + seed: int, + ) -> Tuple[int, List[int], int]: + assert isinstance(scheduler, ConfigMixin) if scheduler.config.get("cpu_only", False): scheduler.set_timesteps(steps, device="cpu") timesteps = scheduler.timesteps.to(device=device) @@ -761,11 +791,11 @@ class DenoiseLatentsInvocation(BaseInvocation): _timesteps = timesteps[:: scheduler.order] # get start timestep index - t_start_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_start))) + t_start_val = int(round(scheduler.config["num_train_timesteps"] * (1 - denoising_start))) t_start_idx = len(list(filter(lambda ts: ts >= t_start_val, _timesteps))) # get end timestep index - t_end_val = int(round(scheduler.config.num_train_timesteps * (1 - denoising_end))) + t_end_val = int(round(scheduler.config["num_train_timesteps"] * (1 - denoising_end))) t_end_idx = len(list(filter(lambda ts: ts >= t_end_val, _timesteps[t_start_idx:]))) # apply order to indexes @@ -786,18 +816,20 @@ class DenoiseLatentsInvocation(BaseInvocation): return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs - def prep_inpaint_mask(self, context, latents): + def prep_inpaint_mask( + self, context: InvocationContext, latents: torch.Tensor + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], bool]: if self.denoise_mask is None: - return None, None + return None, None, False - mask = context.services.latents.get(self.denoise_mask.mask_name) + mask = context.tensors.load(self.denoise_mask.mask_name) mask = tv_resize(mask, latents.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False) if self.denoise_mask.masked_latents_name is not None: - masked_latents = context.services.latents.get(self.denoise_mask.masked_latents_name) + masked_latents = context.tensors.load(self.denoise_mask.masked_latents_name) else: masked_latents = None - return 1 - mask, masked_latents + return 1 - mask, masked_latents, self.denoise_mask.gradient @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: @@ -805,11 +837,11 @@ class DenoiseLatentsInvocation(BaseInvocation): seed = None noise = None if self.noise is not None: - noise = context.services.latents.get(self.noise.latents_name) + noise = context.tensors.load(self.noise.latents_name) seed = self.noise.seed if self.latents is not None: - latents = context.services.latents.get(self.latents.latents_name) + latents = context.tensors.load(self.latents.latents_name) if seed is None: seed = self.latents.seed @@ -824,7 +856,7 @@ class DenoiseLatentsInvocation(BaseInvocation): if seed is None: seed = 0 - mask, masked_latents = self.prep_inpaint_mask(context, latents) + mask, masked_latents, gradient_mask = self.prep_inpaint_mask(context, latents) # TODO(ryand): I have hard-coded `do_classifier_free_guidance=True` to mirror the behaviour of ControlNets, # below. Investigate whether this is appropriate. @@ -835,35 +867,30 @@ class DenoiseLatentsInvocation(BaseInvocation): do_classifier_free_guidance=True, ) - # Get the source node id (we are invoking the prepared node) - graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) - source_node_id = graph_execution_state.prepared_source_mapping[self.id] + # get the unet's config so that we can pass the base to dispatch_progress() + unet_config = context.models.get_config(self.unet.unet.key) - def step_callback(state: PipelineIntermediateState): - self.dispatch_progress(context, source_node_id, state, self.unet.unet.base_model) + def step_callback(state: PipelineIntermediateState) -> None: + context.util.sd_step_callback(state, unet_config.base) - def _lora_loader(): + def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]: for lora in self.unet.loras: - lora_info = context.services.model_manager.get_model( - **lora.model_dump(exclude={"weight"}), - context=context, - ) - yield (lora_info.context.model, lora.weight) + lora_info = context.models.load(**lora.model_dump(exclude={"weight"})) + yield (lora_info.model, lora.weight) del lora_info return - unet_info = context.services.model_manager.get_model( - **self.unet.unet.model_dump(), - context=context, - ) + unet_info = context.models.load(**self.unet.unet.model_dump()) + assert isinstance(unet_info.model, UNet2DConditionModel) with ( ExitStack() as exit_stack, - ModelPatcher.apply_freeu(unet_info.context.model, self.unet.freeu_config), - set_seamless(unet_info.context.model, self.unet.seamless_axes), + ModelPatcher.apply_freeu(unet_info.model, self.unet.freeu_config), + set_seamless(unet_info.model, self.unet.seamless_axes), # FIXME unet_info as unet, # Apply the LoRA after unet has been moved to its target device for faster patching. ModelPatcher.apply_lora_unet(unet, _lora_loader()), ): + assert isinstance(unet, UNet2DConditionModel) latents = latents.to(device=unet.device, dtype=unet.dtype) if noise is not None: noise = noise.to(device=unet.device, dtype=unet.dtype) @@ -917,6 +944,7 @@ class DenoiseLatentsInvocation(BaseInvocation): seed=seed, mask=mask, masked_latents=masked_latents, + gradient_mask=gradient_mask, num_inference_steps=num_inference_steps, scheduler_step_kwargs=scheduler_step_kwargs, conditioning_data=conditioning_data, @@ -932,9 +960,8 @@ class DenoiseLatentsInvocation(BaseInvocation): if choose_torch_device() == torch.device("mps"): mps.empty_cache() - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, result_latents) - return build_latents_output(latents_name=name, latents=result_latents, seed=seed) + name = context.tensors.save(tensor=result_latents) + return LatentsOutput.build(latents_name=name, latents=result_latents, seed=seed) @invocation( @@ -942,9 +969,9 @@ class DenoiseLatentsInvocation(BaseInvocation): title="Latents to Image", tags=["latents", "image", "vae", "l2i"], category="latents", - version="1.2.0", + version="1.2.1", ) -class LatentsToImageInvocation(BaseInvocation, WithMetadata): +class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): """Generates an image from latents.""" latents: LatentsField = InputField( @@ -960,14 +987,12 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata): @torch.no_grad() def invoke(self, context: InvocationContext) -> ImageOutput: - latents = context.services.latents.get(self.latents.latents_name) + latents = context.tensors.load(self.latents.latents_name) - vae_info = context.services.model_manager.get_model( - **self.vae.vae.model_dump(), - context=context, - ) + vae_info = context.models.load(**self.vae.vae.model_dump()) - with set_seamless(vae_info.context.model, self.vae.seamless_axes), vae_info as vae: + with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae: + assert isinstance(vae, torch.nn.Module) latents = latents.to(vae.device) if self.fp32: vae.to(dtype=torch.float32) @@ -994,7 +1019,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata): vae.to(dtype=torch.float16) latents = latents.half() - if self.tiled or context.services.configuration.tiled_decode: + if self.tiled or context.config.get().tiled_decode: vae.enable_tiling() else: vae.disable_tiling() @@ -1018,22 +1043,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata): if choose_torch_device() == torch.device("mps"): mps.empty_cache() - image_dto = context.services.images.create( - image=image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", "trilinear", "area", "nearest-exact"] @@ -1044,7 +1056,7 @@ LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic", title="Resize Latents", tags=["latents", "resize"], category="latents", - version="1.0.0", + version="1.0.1", ) class ResizeLatentsInvocation(BaseInvocation): """Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8.""" @@ -1067,7 +1079,7 @@ class ResizeLatentsInvocation(BaseInvocation): antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias) def invoke(self, context: InvocationContext) -> LatentsOutput: - latents = context.services.latents.get(self.latents.latents_name) + latents = context.tensors.load(self.latents.latents_name) # TODO: device = choose_torch_device() @@ -1085,10 +1097,8 @@ class ResizeLatentsInvocation(BaseInvocation): if device == torch.device("mps"): mps.empty_cache() - name = f"{context.graph_execution_state_id}__{self.id}" - # context.services.latents.set(name, resized_latents) - context.services.latents.save(name, resized_latents) - return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) + name = context.tensors.save(tensor=resized_latents) + return LatentsOutput.build(latents_name=name, latents=resized_latents, seed=self.latents.seed) @invocation( @@ -1096,7 +1106,7 @@ class ResizeLatentsInvocation(BaseInvocation): title="Scale Latents", tags=["latents", "resize"], category="latents", - version="1.0.0", + version="1.0.1", ) class ScaleLatentsInvocation(BaseInvocation): """Scales latents by a given factor.""" @@ -1110,7 +1120,7 @@ class ScaleLatentsInvocation(BaseInvocation): antialias: bool = InputField(default=False, description=FieldDescriptions.torch_antialias) def invoke(self, context: InvocationContext) -> LatentsOutput: - latents = context.services.latents.get(self.latents.latents_name) + latents = context.tensors.load(self.latents.latents_name) # TODO: device = choose_torch_device() @@ -1129,10 +1139,8 @@ class ScaleLatentsInvocation(BaseInvocation): if device == torch.device("mps"): mps.empty_cache() - name = f"{context.graph_execution_state_id}__{self.id}" - # context.services.latents.set(name, resized_latents) - context.services.latents.save(name, resized_latents) - return build_latents_output(latents_name=name, latents=resized_latents, seed=self.latents.seed) + name = context.tensors.save(tensor=resized_latents) + return LatentsOutput.build(latents_name=name, latents=resized_latents, seed=self.latents.seed) @invocation( @@ -1140,7 +1148,7 @@ class ScaleLatentsInvocation(BaseInvocation): title="Image to Latents", tags=["latents", "image", "vae", "i2l"], category="latents", - version="1.0.0", + version="1.0.1", ) class ImageToLatentsInvocation(BaseInvocation): """Encodes an image into latents.""" @@ -1156,8 +1164,9 @@ class ImageToLatentsInvocation(BaseInvocation): fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32) @staticmethod - def vae_encode(vae_info, upcast, tiled, image_tensor): + def vae_encode(vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor) -> torch.Tensor: with vae_info as vae: + assert isinstance(vae, torch.nn.Module) orig_dtype = vae.dtype if upcast: vae.to(dtype=torch.float32) @@ -1201,12 +1210,9 @@ class ImageToLatentsInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> LatentsOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) - vae_info = context.services.model_manager.get_model( - **self.vae.vae.model_dump(), - context=context, - ) + vae_info = context.models.load(**self.vae.vae.model_dump()) image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB")) if image_tensor.dim() == 3: @@ -1214,22 +1220,26 @@ class ImageToLatentsInvocation(BaseInvocation): latents = self.vae_encode(vae_info, self.fp32, self.tiled, image_tensor) - name = f"{context.graph_execution_state_id}__{self.id}" latents = latents.to("cpu") - context.services.latents.save(name, latents) - return build_latents_output(latents_name=name, latents=latents, seed=None) + name = context.tensors.save(tensor=latents) + return LatentsOutput.build(latents_name=name, latents=latents, seed=None) @singledispatchmethod @staticmethod def _encode_to_tensor(vae: AutoencoderKL, image_tensor: torch.FloatTensor) -> torch.FloatTensor: + assert isinstance(vae, torch.nn.Module) image_tensor_dist = vae.encode(image_tensor).latent_dist - latents = image_tensor_dist.sample().to(dtype=vae.dtype) # FIXME: uses torch.randn. make reproducible! + latents: torch.Tensor = image_tensor_dist.sample().to( + dtype=vae.dtype + ) # FIXME: uses torch.randn. make reproducible! return latents @_encode_to_tensor.register @staticmethod def _(vae: AutoencoderTiny, image_tensor: torch.FloatTensor) -> torch.FloatTensor: - return vae.encode(image_tensor).latents + assert isinstance(vae, torch.nn.Module) + latents: torch.FloatTensor = vae.encode(image_tensor).latents + return latents @invocation( @@ -1237,7 +1247,7 @@ class ImageToLatentsInvocation(BaseInvocation): title="Blend Latents", tags=["latents", "blend"], category="latents", - version="1.0.0", + version="1.0.1", ) class BlendLatentsInvocation(BaseInvocation): """Blend two latents using a given alpha. Latents must have same size.""" @@ -1253,8 +1263,8 @@ class BlendLatentsInvocation(BaseInvocation): alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha) def invoke(self, context: InvocationContext) -> LatentsOutput: - latents_a = context.services.latents.get(self.latents_a.latents_name) - latents_b = context.services.latents.get(self.latents_b.latents_name) + latents_a = context.tensors.load(self.latents_a.latents_name) + latents_b = context.tensors.load(self.latents_b.latents_name) if latents_a.shape != latents_b.shape: raise Exception("Latents to blend must be the same size.") @@ -1262,7 +1272,12 @@ class BlendLatentsInvocation(BaseInvocation): # TODO: device = choose_torch_device() - def slerp(t, v0, v1, DOT_THRESHOLD=0.9995): + def slerp( + t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here? + v0: Union[torch.Tensor, npt.NDArray[Any]], + v1: Union[torch.Tensor, npt.NDArray[Any]], + DOT_THRESHOLD: float = 0.9995, + ) -> Union[torch.Tensor, npt.NDArray[Any]]: """ Spherical linear interpolation Args: @@ -1295,12 +1310,16 @@ class BlendLatentsInvocation(BaseInvocation): v2 = s0 * v0 + s1 * v1 if inputs_are_torch: - v2 = torch.from_numpy(v2).to(device) - - return v2 + v2_torch: torch.Tensor = torch.from_numpy(v2).to(device) + return v2_torch + else: + assert isinstance(v2, np.ndarray) + return v2 # blend - blended_latents = slerp(self.alpha, latents_a, latents_b) + bl = slerp(self.alpha, latents_a, latents_b) + assert isinstance(bl, torch.Tensor) + blended_latents: torch.Tensor = bl # for type checking convenience # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699 blended_latents = blended_latents.to("cpu") @@ -1308,10 +1327,8 @@ class BlendLatentsInvocation(BaseInvocation): if device == torch.device("mps"): mps.empty_cache() - name = f"{context.graph_execution_state_id}__{self.id}" - # context.services.latents.set(name, resized_latents) - context.services.latents.save(name, blended_latents) - return build_latents_output(latents_name=name, latents=blended_latents) + name = context.tensors.save(tensor=blended_latents) + return LatentsOutput.build(latents_name=name, latents=blended_latents) # The Crop Latents node was copied from @skunkworxdark's implementation here: @@ -1321,7 +1338,7 @@ class BlendLatentsInvocation(BaseInvocation): title="Crop Latents", tags=["latents", "crop"], category="latents", - version="1.0.0", + version="1.0.1", ) # TODO(ryand): Named `CropLatentsCoreInvocation` to prevent a conflict with custom node `CropLatentsInvocation`. # Currently, if the class names conflict then 'GET /openapi.json' fails. @@ -1356,7 +1373,7 @@ class CropLatentsCoreInvocation(BaseInvocation): ) def invoke(self, context: InvocationContext) -> LatentsOutput: - latents = context.services.latents.get(self.latents.latents_name) + latents = context.tensors.load(self.latents.latents_name) x1 = self.x // LATENT_SCALE_FACTOR y1 = self.y // LATENT_SCALE_FACTOR @@ -1365,10 +1382,9 @@ class CropLatentsCoreInvocation(BaseInvocation): cropped_latents = latents[..., y1:y2, x1:x2] - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, cropped_latents) + name = context.tensors.save(tensor=cropped_latents) - return build_latents_output(latents_name=name, latents=cropped_latents) + return LatentsOutput.build(latents_name=name, latents=cropped_latents) @invocation_output("ideal_size_output") @@ -1396,15 +1412,16 @@ class IdealSizeInvocation(BaseInvocation): description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large)", ) - def trim_to_multiple_of(self, *args, multiple_of=LATENT_SCALE_FACTOR): + def trim_to_multiple_of(self, *args: int, multiple_of: int = LATENT_SCALE_FACTOR) -> Tuple[int, ...]: return tuple((x - x % multiple_of) for x in args) def invoke(self, context: InvocationContext) -> IdealSizeOutput: + unet_config = context.models.get_config(**self.unet.unet.model_dump()) aspect = self.width / self.height - dimension = 512 - if self.unet.unet.base_model == BaseModelType.StableDiffusion2: + dimension: float = 512 + if unet_config.base == BaseModelType.StableDiffusion2: dimension = 768 - elif self.unet.unet.base_model == BaseModelType.StableDiffusionXL: + elif unet_config.base == BaseModelType.StableDiffusionXL: dimension = 1024 dimension = dimension * self.multiplier min_dimension = math.floor(dimension * 0.5) diff --git a/invokeai/app/invocations/math.py b/invokeai/app/invocations/math.py index defc61275f..83a092be69 100644 --- a/invokeai/app/invocations/math.py +++ b/invokeai/app/invocations/math.py @@ -5,10 +5,11 @@ from typing import Literal import numpy as np from pydantic import ValidationInfo, field_validator +from invokeai.app.invocations.fields import FieldDescriptions, InputField from invokeai.app.invocations.primitives import FloatOutput, IntegerOutput -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation +from .baseinvocation import BaseInvocation, invocation @invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0") diff --git a/invokeai/app/invocations/metadata.py b/invokeai/app/invocations/metadata.py index 14d66f8ef6..bec1b0d9d5 100644 --- a/invokeai/app/invocations/metadata.py +++ b/invokeai/app/invocations/metadata.py @@ -5,20 +5,22 @@ from pydantic import BaseModel, ConfigDict, Field from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - InputField, - InvocationContext, - MetadataField, - OutputField, - UIType, invocation, invocation_output, ) from invokeai.app.invocations.controlnet_image_processors import ControlField +from invokeai.app.invocations.fields import ( + FieldDescriptions, + ImageField, + InputField, + MetadataField, + OutputField, + UIType, +) from invokeai.app.invocations.ip_adapter import IPAdapterModelField from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField -from invokeai.app.invocations.primitives import ImageField from invokeai.app.invocations.t2i_adapter import T2IAdapterField -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.services.shared.invocation_context import InvocationContext from ...version import __version__ @@ -31,7 +33,7 @@ class MetadataItemField(BaseModel): class LoRAMetadataField(BaseModel): """LoRA Metadata Field""" - lora: LoRAModelField = Field(description=FieldDescriptions.lora_model) + model: LoRAModelField = Field(description=FieldDescriptions.lora_model) weight: float = Field(description=FieldDescriptions.lora_weight) @@ -112,7 +114,7 @@ GENERATION_MODES = Literal[ ] -@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.1") +@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.1.1") class CoreMetadataInvocation(BaseInvocation): """Collects core generation metadata into a MetadataField""" diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 99dcc72999..6087bc82db 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -1,31 +1,24 @@ import copy from typing import List, Optional -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, Field -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.shared.models import FreeUConfig -from ...backend.model_management import BaseModelType, ModelType, SubModelType +from ...backend.model_manager import SubModelType from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, invocation, invocation_output, ) class ModelInfo(BaseModel): - model_name: str = Field(description="Info to load submodel") - base_model: BaseModelType = Field(description="Base model") - model_type: ModelType = Field(description="Info to load submodel") - submodel: Optional[SubModelType] = Field(default=None, description="Info to load submodel") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Key of model as returned by ModelRecordServiceBase.get_model()") + submodel_type: Optional[SubModelType] = Field(default=None, description="Info to load submodel") class LoraInfo(ModelInfo): @@ -55,7 +48,7 @@ class VaeField(BaseModel): @invocation_output("unet_output") class UNetOutput(BaseInvocationOutput): - """Base class for invocations that output a UNet field""" + """Base class for invocations that output a UNet field.""" unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet") @@ -84,20 +77,13 @@ class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput): class MainModelField(BaseModel): """Main model field""" - model_name: str = Field(description="Name of the model") - base_model: BaseModelType = Field(description="Base model") - model_type: ModelType = Field(description="Model Type") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Model key") class LoRAModelField(BaseModel): """LoRA model field""" - model_name: str = Field(description="Name of the LoRA model") - base_model: BaseModelType = Field(description="Base model") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="LoRA model key") @invocation( @@ -105,7 +91,7 @@ class LoRAModelField(BaseModel): title="Main Model", tags=["model"], category="model", - version="1.0.0", + version="1.0.1", ) class MainModelLoaderInvocation(BaseInvocation): """Loads a main model, outputting its submodels.""" @@ -114,85 +100,40 @@ class MainModelLoaderInvocation(BaseInvocation): # TODO: precision? def invoke(self, context: InvocationContext) -> ModelLoaderOutput: - base_model = self.model.base_model - model_name = self.model.model_name - model_type = ModelType.Main + key = self.model.key # TODO: not found exceptions - if not context.services.model_manager.model_exists( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ): - raise Exception(f"Unknown {base_model} {model_type} model: {model_name}") - - """ - if not context.services.model_manager.model_exists( - model_name=self.model_name, - model_type=SDModelType.Diffusers, - submodel=SDModelType.Tokenizer, - ): - raise Exception( - f"Failed to find tokenizer submodel in {self.model_name}! Check if model corrupted" - ) - - if not context.services.model_manager.model_exists( - model_name=self.model_name, - model_type=SDModelType.Diffusers, - submodel=SDModelType.TextEncoder, - ): - raise Exception( - f"Failed to find text_encoder submodel in {self.model_name}! Check if model corrupted" - ) - - if not context.services.model_manager.model_exists( - model_name=self.model_name, - model_type=SDModelType.Diffusers, - submodel=SDModelType.UNet, - ): - raise Exception( - f"Failed to find unet submodel from {self.model_name}! Check if model corrupted" - ) - """ + if not context.models.exists(key): + raise Exception(f"Unknown model {key}") return ModelLoaderOutput( unet=UNetField( unet=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.UNet, + key=key, + submodel_type=SubModelType.UNet, ), scheduler=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Scheduler, + key=key, + submodel_type=SubModelType.Scheduler, ), loras=[], ), clip=ClipField( tokenizer=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Tokenizer, + key=key, + submodel_type=SubModelType.Tokenizer, ), text_encoder=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.TextEncoder, + key=key, + submodel_type=SubModelType.TextEncoder, ), loras=[], skipped_layers=0, ), vae=VaeField( vae=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Vae, + key=key, + submodel_type=SubModelType.Vae, ), ), ) @@ -206,7 +147,7 @@ class LoraLoaderOutput(BaseInvocationOutput): clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") -@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.0") +@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.1") class LoraLoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" @@ -229,21 +170,16 @@ class LoraLoaderInvocation(BaseInvocation): if self.lora is None: raise Exception("No LoRA provided") - base_model = self.lora.base_model - lora_name = self.lora.model_name + lora_key = self.lora.key - if not context.services.model_manager.model_exists( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - ): - raise Exception(f"Unkown lora name: {lora_name}!") + if not context.models.exists(lora_key): + raise Exception(f"Unkown lora: {lora_key}!") - if self.unet is not None and any(lora.model_name == lora_name for lora in self.unet.loras): - raise Exception(f'Lora "{lora_name}" already applied to unet') + if self.unet is not None and any(lora.key == lora_key for lora in self.unet.loras): + raise Exception(f'Lora "{lora_key}" already applied to unet') - if self.clip is not None and any(lora.model_name == lora_name for lora in self.clip.loras): - raise Exception(f'Lora "{lora_name}" already applied to clip') + if self.clip is not None and any(lora.key == lora_key for lora in self.clip.loras): + raise Exception(f'Lora "{lora_key}" already applied to clip') output = LoraLoaderOutput() @@ -251,10 +187,8 @@ class LoraLoaderInvocation(BaseInvocation): output.unet = copy.deepcopy(self.unet) output.unet.loras.append( LoraInfo( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - submodel=None, + key=lora_key, + submodel_type=None, weight=self.weight, ) ) @@ -263,10 +197,8 @@ class LoraLoaderInvocation(BaseInvocation): output.clip = copy.deepcopy(self.clip) output.clip.loras.append( LoraInfo( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - submodel=None, + key=lora_key, + submodel_type=None, weight=self.weight, ) ) @@ -288,7 +220,7 @@ class SDXLLoraLoaderOutput(BaseInvocationOutput): title="SDXL LoRA", tags=["lora", "model"], category="model", - version="1.0.0", + version="1.0.1", ) class SDXLLoraLoaderInvocation(BaseInvocation): """Apply selected lora to unet and text_encoder.""" @@ -318,24 +250,19 @@ class SDXLLoraLoaderInvocation(BaseInvocation): if self.lora is None: raise Exception("No LoRA provided") - base_model = self.lora.base_model - lora_name = self.lora.model_name + lora_key = self.lora.key - if not context.services.model_manager.model_exists( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - ): - raise Exception(f"Unknown lora name: {lora_name}!") + if not context.models.exists(lora_key): + raise Exception(f"Unknown lora: {lora_key}!") - if self.unet is not None and any(lora.model_name == lora_name for lora in self.unet.loras): - raise Exception(f'Lora "{lora_name}" already applied to unet') + if self.unet is not None and any(lora.key == lora_key for lora in self.unet.loras): + raise Exception(f'Lora "{lora_key}" already applied to unet') - if self.clip is not None and any(lora.model_name == lora_name for lora in self.clip.loras): - raise Exception(f'Lora "{lora_name}" already applied to clip') + if self.clip is not None and any(lora.key == lora_key for lora in self.clip.loras): + raise Exception(f'Lora "{lora_key}" already applied to clip') - if self.clip2 is not None and any(lora.model_name == lora_name for lora in self.clip2.loras): - raise Exception(f'Lora "{lora_name}" already applied to clip2') + if self.clip2 is not None and any(lora.key == lora_key for lora in self.clip2.loras): + raise Exception(f'Lora "{lora_key}" already applied to clip2') output = SDXLLoraLoaderOutput() @@ -343,10 +270,8 @@ class SDXLLoraLoaderInvocation(BaseInvocation): output.unet = copy.deepcopy(self.unet) output.unet.loras.append( LoraInfo( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - submodel=None, + key=lora_key, + submodel_type=None, weight=self.weight, ) ) @@ -355,10 +280,8 @@ class SDXLLoraLoaderInvocation(BaseInvocation): output.clip = copy.deepcopy(self.clip) output.clip.loras.append( LoraInfo( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - submodel=None, + key=lora_key, + submodel_type=None, weight=self.weight, ) ) @@ -367,10 +290,8 @@ class SDXLLoraLoaderInvocation(BaseInvocation): output.clip2 = copy.deepcopy(self.clip2) output.clip2.loras.append( LoraInfo( - base_model=base_model, - model_name=lora_name, - model_type=ModelType.Lora, - submodel=None, + key=lora_key, + submodel_type=None, weight=self.weight, ) ) @@ -381,13 +302,10 @@ class SDXLLoraLoaderInvocation(BaseInvocation): class VAEModelField(BaseModel): """Vae model field""" - model_name: str = Field(description="Name of the model") - base_model: BaseModelType = Field(description="Base model") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Model's key") -@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.0") +@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.1") class VaeLoaderInvocation(BaseInvocation): """Loads a VAE model, outputting a VaeLoaderOutput""" @@ -398,25 +316,12 @@ class VaeLoaderInvocation(BaseInvocation): ) def invoke(self, context: InvocationContext) -> VAEOutput: - base_model = self.vae_model.base_model - model_name = self.vae_model.model_name - model_type = ModelType.Vae + key = self.vae_model.key - if not context.services.model_manager.model_exists( - base_model=base_model, - model_name=model_name, - model_type=model_type, - ): - raise Exception(f"Unkown vae name: {model_name}!") - return VAEOutput( - vae=VaeField( - vae=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ) - ) - ) + if not context.models.exists(key): + raise Exception(f"Unkown vae: {key}!") + + return VAEOutput(vae=VaeField(vae=ModelInfo(key=key))) @invocation_output("seamless_output") diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py index b1ee91e1cd..335d3df292 100644 --- a/invokeai/app/invocations/noise.py +++ b/invokeai/app/invocations/noise.py @@ -4,17 +4,15 @@ import torch from pydantic import field_validator -from invokeai.app.invocations.latent import LatentsField -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR +from invokeai.app.invocations.fields import FieldDescriptions, InputField, LatentsField, OutputField +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import SEED_MAX from ...backend.util.devices import choose_torch_device, torch_dtype from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, - InputField, - InvocationContext, - OutputField, invocation, invocation_output, ) @@ -69,13 +67,13 @@ class NoiseOutput(BaseInvocationOutput): width: int = OutputField(description=FieldDescriptions.width) height: int = OutputField(description=FieldDescriptions.height) - -def build_noise_output(latents_name: str, latents: torch.Tensor, seed: int): - return NoiseOutput( - noise=LatentsField(latents_name=latents_name, seed=seed), - width=latents.size()[3] * 8, - height=latents.size()[2] * 8, - ) + @classmethod + def build(cls, latents_name: str, latents: torch.Tensor, seed: int) -> "NoiseOutput": + return cls( + noise=LatentsField(latents_name=latents_name, seed=seed), + width=latents.size()[3] * LATENT_SCALE_FACTOR, + height=latents.size()[2] * LATENT_SCALE_FACTOR, + ) @invocation( @@ -96,13 +94,13 @@ class NoiseInvocation(BaseInvocation): ) width: int = InputField( default=512, - multiple_of=8, + multiple_of=LATENT_SCALE_FACTOR, gt=0, description=FieldDescriptions.width, ) height: int = InputField( default=512, - multiple_of=8, + multiple_of=LATENT_SCALE_FACTOR, gt=0, description=FieldDescriptions.height, ) @@ -124,6 +122,5 @@ class NoiseInvocation(BaseInvocation): seed=self.seed, use_cpu=self.use_cpu, ) - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, noise) - return build_noise_output(latents_name=name, latents=noise, seed=self.seed) + name = context.tensors.save(tensor=noise) + return NoiseOutput.build(latents_name=name, latents=noise, seed=self.seed) diff --git a/invokeai/app/invocations/onnx.py b/invokeai/app/invocations/onnx.py deleted file mode 100644 index 759cfde700..0000000000 --- a/invokeai/app/invocations/onnx.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) 2023 Borisov Sergey (https://github.com/StAlKeR7779) - -import inspect - -# from contextlib import ExitStack -from typing import List, Literal, Union - -import numpy as np -import torch -from diffusers.image_processor import VaeImageProcessor -from pydantic import BaseModel, ConfigDict, Field, field_validator -from tqdm import tqdm - -from invokeai.app.invocations.primitives import ConditioningField, ConditioningOutput, ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin -from invokeai.app.shared.fields import FieldDescriptions -from invokeai.app.util.step_callback import stable_diffusion_step_callback -from invokeai.backend import BaseModelType, ModelType, SubModelType - -from ...backend.model_management import ONNXModelPatcher -from ...backend.stable_diffusion import PipelineIntermediateState -from ...backend.util import choose_torch_device -from ..util.ti_utils import extract_ti_triggers_from_prompt -from .baseinvocation import ( - BaseInvocation, - BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - UIComponent, - UIType, - WithMetadata, - invocation, - invocation_output, -) -from .controlnet_image_processors import ControlField -from .latent import SAMPLER_NAME_VALUES, LatentsField, LatentsOutput, build_latents_output, get_scheduler -from .model import ClipField, ModelInfo, UNetField, VaeField - -ORT_TO_NP_TYPE = { - "tensor(bool)": np.bool_, - "tensor(int8)": np.int8, - "tensor(uint8)": np.uint8, - "tensor(int16)": np.int16, - "tensor(uint16)": np.uint16, - "tensor(int32)": np.int32, - "tensor(uint32)": np.uint32, - "tensor(int64)": np.int64, - "tensor(uint64)": np.uint64, - "tensor(float16)": np.float16, - "tensor(float)": np.float32, - "tensor(double)": np.float64, -} - -PRECISION_VALUES = Literal[tuple(ORT_TO_NP_TYPE.keys())] - - -@invocation("prompt_onnx", title="ONNX Prompt (Raw)", tags=["prompt", "onnx"], category="conditioning", version="1.0.0") -class ONNXPromptInvocation(BaseInvocation): - prompt: str = InputField(default="", description=FieldDescriptions.raw_prompt, ui_component=UIComponent.Textarea) - clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection) - - def invoke(self, context: InvocationContext) -> ConditioningOutput: - tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.model_dump(), - ) - text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.model_dump(), - ) - with tokenizer_info as orig_tokenizer, text_encoder_info as text_encoder: # , ExitStack() as stack: - loras = [ - ( - context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, - lora.weight, - ) - for lora in self.clip.loras - ] - - ti_list = [] - for trigger in extract_ti_triggers_from_prompt(self.prompt): - name = trigger[1:-1] - try: - ti_list.append( - ( - name, - context.services.model_manager.get_model( - model_name=name, - base_model=self.clip.text_encoder.base_model, - model_type=ModelType.TextualInversion, - ).context.model, - ) - ) - except Exception: - # print(e) - # import traceback - # print(traceback.format_exc()) - print(f'Warn: trigger: "{trigger}" not found') - if loras or ti_list: - text_encoder.release_session() - with ( - ONNXModelPatcher.apply_lora_text_encoder(text_encoder, loras), - ONNXModelPatcher.apply_ti(orig_tokenizer, text_encoder, ti_list) as (tokenizer, ti_manager), - ): - text_encoder.create_session() - - # copy from - # https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L153 - text_inputs = tokenizer( - self.prompt, - padding="max_length", - max_length=tokenizer.model_max_length, - truncation=True, - return_tensors="np", - ) - text_input_ids = text_inputs.input_ids - """ - untruncated_ids = tokenizer(prompt, padding="max_length", return_tensors="np").input_ids - - if not np.array_equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - """ - - prompt_embeds = text_encoder(input_ids=text_input_ids.astype(np.int32))[0] - - conditioning_name = f"{context.graph_execution_state_id}_{self.id}_conditioning" - - # TODO: hacky but works ;D maybe rename latents somehow? - context.services.latents.save(conditioning_name, (prompt_embeds, None)) - - return ConditioningOutput( - conditioning=ConditioningField( - conditioning_name=conditioning_name, - ), - ) - - -# Text to image -@invocation( - "t2l_onnx", - title="ONNX Text to Latents", - tags=["latents", "inference", "txt2img", "onnx"], - category="latents", - version="1.0.0", -) -class ONNXTextToLatentsInvocation(BaseInvocation): - """Generates latents from conditionings.""" - - positive_conditioning: ConditioningField = InputField( - description=FieldDescriptions.positive_cond, - input=Input.Connection, - ) - negative_conditioning: ConditioningField = InputField( - description=FieldDescriptions.negative_cond, - input=Input.Connection, - ) - noise: LatentsField = InputField( - description=FieldDescriptions.noise, - input=Input.Connection, - ) - steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps) - cfg_scale: Union[float, List[float]] = InputField( - default=7.5, - ge=1, - description=FieldDescriptions.cfg_scale, - ) - scheduler: SAMPLER_NAME_VALUES = InputField( - default="euler", description=FieldDescriptions.scheduler, input=Input.Direct, ui_type=UIType.Scheduler - ) - precision: PRECISION_VALUES = InputField(default="tensor(float16)", description=FieldDescriptions.precision) - unet: UNetField = InputField( - description=FieldDescriptions.unet, - input=Input.Connection, - ) - control: Union[ControlField, list[ControlField]] = InputField( - default=None, - description=FieldDescriptions.control, - ) - # seamless: bool = InputField(default=False, description="Whether or not to generate an image that can tile without seams", ) - # seamless_axes: str = InputField(default="", description="The axes to tile the image on, 'x' and/or 'y'") - - @field_validator("cfg_scale") - def ge_one(cls, v): - """validate that all cfg_scale values are >= 1""" - if isinstance(v, list): - for i in v: - if i < 1: - raise ValueError("cfg_scale must be greater than 1") - else: - if v < 1: - raise ValueError("cfg_scale must be greater than 1") - return v - - # based on - # https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L375 - def invoke(self, context: InvocationContext) -> LatentsOutput: - c, _ = context.services.latents.get(self.positive_conditioning.conditioning_name) - uc, _ = context.services.latents.get(self.negative_conditioning.conditioning_name) - graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id) - source_node_id = graph_execution_state.prepared_source_mapping[self.id] - if isinstance(c, torch.Tensor): - c = c.cpu().numpy() - if isinstance(uc, torch.Tensor): - uc = uc.cpu().numpy() - device = torch.device(choose_torch_device()) - prompt_embeds = np.concatenate([uc, c]) - - latents = context.services.latents.get(self.noise.latents_name) - if isinstance(latents, torch.Tensor): - latents = latents.cpu().numpy() - - # TODO: better execution device handling - latents = latents.astype(ORT_TO_NP_TYPE[self.precision]) - - # get the initial random noise unless the user supplied it - do_classifier_free_guidance = True - # latents_dtype = prompt_embeds.dtype - # latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, width // 8) - # if latents.shape != latents_shape: - # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") - - scheduler = get_scheduler( - context=context, - scheduler_info=self.unet.scheduler, - scheduler_name=self.scheduler, - seed=0, # TODO: refactor this node - ) - - def torch2numpy(latent: torch.Tensor): - return latent.cpu().numpy() - - def numpy2torch(latent, device): - return torch.from_numpy(latent).to(device) - - def dispatch_progress( - self, context: InvocationContext, source_node_id: str, intermediate_state: PipelineIntermediateState - ) -> None: - stable_diffusion_step_callback( - context=context, - intermediate_state=intermediate_state, - node=self.model_dump(), - source_node_id=source_node_id, - ) - - scheduler.set_timesteps(self.steps) - latents = latents * np.float64(scheduler.init_noise_sigma) - - extra_step_kwargs = {} - if "eta" in set(inspect.signature(scheduler.step).parameters.keys()): - extra_step_kwargs.update( - eta=0.0, - ) - - unet_info = context.services.model_manager.get_model(**self.unet.unet.model_dump()) - - with unet_info as unet: # , ExitStack() as stack: - # loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras] - loras = [ - ( - context.services.model_manager.get_model(**lora.model_dump(exclude={"weight"})).context.model, - lora.weight, - ) - for lora in self.unet.loras - ] - - if loras: - unet.release_session() - with ONNXModelPatcher.apply_lora_unet(unet, loras): - # TODO: - _, _, h, w = latents.shape - unet.create_session(h, w) - - timestep_dtype = next( - (input.type for input in unet.session.get_inputs() if input.name == "timestep"), "tensor(float16)" - ) - timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] - for i in tqdm(range(len(scheduler.timesteps))): - t = scheduler.timesteps[i] - # expand the latents if we are doing classifier free guidance - latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = scheduler.scale_model_input(numpy2torch(latent_model_input, device), t) - latent_model_input = latent_model_input.cpu().numpy() - - # predict the noise residual - timestep = np.array([t], dtype=timestep_dtype) - noise_pred = unet(sample=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds) - noise_pred = noise_pred[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) - noise_pred = noise_pred_uncond + self.cfg_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - scheduler_output = scheduler.step( - numpy2torch(noise_pred, device), t, numpy2torch(latents, device), **extra_step_kwargs - ) - latents = torch2numpy(scheduler_output.prev_sample) - - state = PipelineIntermediateState( - run_id="test", step=i, timestep=timestep, latents=scheduler_output.prev_sample - ) - dispatch_progress(self, context=context, source_node_id=source_node_id, intermediate_state=state) - - # call the callback, if provided - # if callback is not None and i % callback_steps == 0: - # callback(i, t, latents) - - torch.cuda.empty_cache() - - name = f"{context.graph_execution_state_id}__{self.id}" - context.services.latents.save(name, latents) - return build_latents_output(latents_name=name, latents=torch.from_numpy(latents)) - - -# Latent to image -@invocation( - "l2i_onnx", - title="ONNX Latents to Image", - tags=["latents", "image", "vae", "onnx"], - category="image", - version="1.2.0", -) -class ONNXLatentsToImageInvocation(BaseInvocation, WithMetadata): - """Generates an image from latents.""" - - latents: LatentsField = InputField( - description=FieldDescriptions.denoised_latents, - input=Input.Connection, - ) - vae: VaeField = InputField( - description=FieldDescriptions.vae, - input=Input.Connection, - ) - # tiled: bool = InputField(default=False, description="Decode latents by overlaping tiles(less memory consumption)") - - def invoke(self, context: InvocationContext) -> ImageOutput: - latents = context.services.latents.get(self.latents.latents_name) - - if self.vae.vae.submodel != SubModelType.VaeDecoder: - raise Exception(f"Expected vae_decoder, found: {self.vae.vae.model_type}") - - vae_info = context.services.model_manager.get_model( - **self.vae.vae.model_dump(), - ) - - # clear memory as vae decode can request a lot - torch.cuda.empty_cache() - - with vae_info as vae: - vae.create_session() - - # copied from - # https://github.com/huggingface/diffusers/blob/3ebbaf7c96801271f9e6c21400033b6aa5ffcf29/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py#L427 - latents = 1 / 0.18215 * latents - # image = self.vae_decoder(latent_sample=latents)[0] - # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 - image = np.concatenate([vae(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]) - - image = np.clip(image / 2 + 0.5, 0, 1) - image = image.transpose((0, 2, 3, 1)) - image = VaeImageProcessor.numpy_to_pil(image)[0] - - torch.cuda.empty_cache() - - image_dto = context.services.images.create( - image=image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) - - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) - - -@invocation_output("model_loader_output_onnx") -class ONNXModelLoaderOutput(BaseInvocationOutput): - """Model loader output""" - - unet: UNetField = OutputField(default=None, description=FieldDescriptions.unet, title="UNet") - clip: ClipField = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP") - vae_decoder: VaeField = OutputField(default=None, description=FieldDescriptions.vae, title="VAE Decoder") - vae_encoder: VaeField = OutputField(default=None, description=FieldDescriptions.vae, title="VAE Encoder") - - -class OnnxModelField(BaseModel): - """Onnx model field""" - - model_name: str = Field(description="Name of the model") - base_model: BaseModelType = Field(description="Base model") - model_type: ModelType = Field(description="Model Type") - - model_config = ConfigDict(protected_namespaces=()) - - -@invocation("onnx_model_loader", title="ONNX Main Model", tags=["onnx", "model"], category="model", version="1.0.0") -class OnnxModelLoaderInvocation(BaseInvocation): - """Loads a main model, outputting its submodels.""" - - model: OnnxModelField = InputField( - description=FieldDescriptions.onnx_main_model, input=Input.Direct, ui_type=UIType.ONNXModel - ) - - def invoke(self, context: InvocationContext) -> ONNXModelLoaderOutput: - base_model = self.model.base_model - model_name = self.model.model_name - model_type = ModelType.ONNX - - # TODO: not found exceptions - if not context.services.model_manager.model_exists( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ): - raise Exception(f"Unknown {base_model} {model_type} model: {model_name}") - - """ - if not context.services.model_manager.model_exists( - model_name=self.model_name, - model_type=SDModelType.Diffusers, - submodel=SDModelType.Tokenizer, - ): - raise Exception( - f"Failed to find tokenizer submodel in {self.model_name}! Check if model corrupted" - ) - - if not context.services.model_manager.model_exists( - model_name=self.model_name, - model_type=SDModelType.Diffusers, - submodel=SDModelType.TextEncoder, - ): - raise Exception( - f"Failed to find text_encoder submodel in {self.model_name}! Check if model corrupted" - ) - - if not context.services.model_manager.model_exists( - model_name=self.model_name, - model_type=SDModelType.Diffusers, - submodel=SDModelType.UNet, - ): - raise Exception( - f"Failed to find unet submodel from {self.model_name}! Check if model corrupted" - ) - """ - - return ONNXModelLoaderOutput( - unet=UNetField( - unet=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.UNet, - ), - scheduler=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Scheduler, - ), - loras=[], - ), - clip=ClipField( - tokenizer=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Tokenizer, - ), - text_encoder=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.TextEncoder, - ), - loras=[], - skipped_layers=0, - ), - vae_decoder=VaeField( - vae=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.VaeDecoder, - ), - ), - vae_encoder=VaeField( - vae=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.VaeEncoder, - ), - ), - ) diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py index dccd18f754..6845637de9 100644 --- a/invokeai/app/invocations/param_easing.py +++ b/invokeai/app/invocations/param_easing.py @@ -40,8 +40,10 @@ from easing_functions import ( from matplotlib.ticker import MaxNLocator from invokeai.app.invocations.primitives import FloatCollectionOutput +from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, InputField, InvocationContext, invocation +from .baseinvocation import BaseInvocation, invocation +from .fields import InputField @invocation( @@ -109,7 +111,7 @@ EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())] title="Step Param Easing", tags=["step", "easing"], category="step", - version="1.0.0", + version="1.0.1", ) class StepParamEasingInvocation(BaseInvocation): """Experimental per-step parameter easing for denoising steps""" @@ -148,19 +150,19 @@ class StepParamEasingInvocation(BaseInvocation): postlist = list(num_poststeps * [self.post_end_value]) if log_diagnostics: - context.services.logger.debug("start_step: " + str(start_step)) - context.services.logger.debug("end_step: " + str(end_step)) - context.services.logger.debug("num_easing_steps: " + str(num_easing_steps)) - context.services.logger.debug("num_presteps: " + str(num_presteps)) - context.services.logger.debug("num_poststeps: " + str(num_poststeps)) - context.services.logger.debug("prelist size: " + str(len(prelist))) - context.services.logger.debug("postlist size: " + str(len(postlist))) - context.services.logger.debug("prelist: " + str(prelist)) - context.services.logger.debug("postlist: " + str(postlist)) + context.logger.debug("start_step: " + str(start_step)) + context.logger.debug("end_step: " + str(end_step)) + context.logger.debug("num_easing_steps: " + str(num_easing_steps)) + context.logger.debug("num_presteps: " + str(num_presteps)) + context.logger.debug("num_poststeps: " + str(num_poststeps)) + context.logger.debug("prelist size: " + str(len(prelist))) + context.logger.debug("postlist size: " + str(len(postlist))) + context.logger.debug("prelist: " + str(prelist)) + context.logger.debug("postlist: " + str(postlist)) easing_class = EASING_FUNCTIONS_MAP[self.easing] if log_diagnostics: - context.services.logger.debug("easing class: " + str(easing_class)) + context.logger.debug("easing class: " + str(easing_class)) easing_list = [] if self.mirror: # "expected" mirroring # if number of steps is even, squeeze duration down to (number_of_steps)/2 @@ -171,7 +173,7 @@ class StepParamEasingInvocation(BaseInvocation): base_easing_duration = int(np.ceil(num_easing_steps / 2.0)) if log_diagnostics: - context.services.logger.debug("base easing duration: " + str(base_easing_duration)) + context.logger.debug("base easing duration: " + str(base_easing_duration)) even_num_steps = num_easing_steps % 2 == 0 # even number of steps easing_function = easing_class( start=self.start_value, @@ -183,14 +185,14 @@ class StepParamEasingInvocation(BaseInvocation): easing_val = easing_function.ease(step_index) base_easing_vals.append(easing_val) if log_diagnostics: - context.services.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val)) + context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val)) if even_num_steps: mirror_easing_vals = list(reversed(base_easing_vals)) else: mirror_easing_vals = list(reversed(base_easing_vals[0:-1])) if log_diagnostics: - context.services.logger.debug("base easing vals: " + str(base_easing_vals)) - context.services.logger.debug("mirror easing vals: " + str(mirror_easing_vals)) + context.logger.debug("base easing vals: " + str(base_easing_vals)) + context.logger.debug("mirror easing vals: " + str(mirror_easing_vals)) easing_list = base_easing_vals + mirror_easing_vals # FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely @@ -225,12 +227,12 @@ class StepParamEasingInvocation(BaseInvocation): step_val = easing_function.ease(step_index) easing_list.append(step_val) if log_diagnostics: - context.services.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val)) + context.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val)) if log_diagnostics: - context.services.logger.debug("prelist size: " + str(len(prelist))) - context.services.logger.debug("easing_list size: " + str(len(easing_list))) - context.services.logger.debug("postlist size: " + str(len(postlist))) + context.logger.debug("prelist size: " + str(len(prelist))) + context.logger.debug("easing_list size: " + str(len(easing_list))) + context.logger.debug("postlist size: " + str(len(postlist))) param_list = prelist + easing_list + postlist diff --git a/invokeai/app/invocations/primitives.py b/invokeai/app/invocations/primitives.py index 6ec77dd0ec..0b1dea7ff3 100644 --- a/invokeai/app/invocations/primitives.py +++ b/invokeai/app/invocations/primitives.py @@ -1,20 +1,29 @@ # Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) -from typing import Optional, Tuple +from typing import Optional import torch -from pydantic import BaseModel, Field -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR +from invokeai.app.invocations.fields import ( + ColorField, + ConditioningField, + DenoiseMaskField, + FieldDescriptions, + ImageField, + Input, + InputField, + LatentsField, + MaskField, + OutputField, + UIComponent, +) +from invokeai.app.services.images.images_common import ImageDTO +from invokeai.app.services.shared.invocation_context import InvocationContext from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - UIComponent, invocation, invocation_output, ) @@ -221,24 +230,6 @@ class StringCollectionInvocation(BaseInvocation): # region Image -class ImageField(BaseModel): - """An image primitive field""" - - image_name: str = Field(description="The name of the image") - - -class BoardField(BaseModel): - """A board primitive field""" - - board_id: str = Field(description="The id of the board") - - -class MaskField(BaseModel): - """A mask primitive field.""" - - mask_name: str = Field(description="The name of the mask.") - - @invocation_output("mask_output") class MaskOutput(BaseInvocationOutput): """A torch mask tensor. @@ -259,6 +250,14 @@ class ImageOutput(BaseInvocationOutput): width: int = OutputField(description="The width of the image in pixels") height: int = OutputField(description="The height of the image in pixels") + @classmethod + def build(cls, image_dto: ImageDTO) -> "ImageOutput": + return cls( + image=ImageField(image_name=image_dto.image_name), + width=image_dto.width, + height=image_dto.height, + ) + @invocation_output("image_collection_output") class ImageCollectionOutput(BaseInvocationOutput): @@ -269,16 +268,14 @@ class ImageCollectionOutput(BaseInvocationOutput): ) -@invocation("image", title="Image Primitive", tags=["primitives", "image"], category="primitives", version="1.0.0") -class ImageInvocation( - BaseInvocation, -): +@invocation("image", title="Image Primitive", tags=["primitives", "image"], category="primitives", version="1.0.1") +class ImageInvocation(BaseInvocation): """An image primitive value""" image: ImageField = InputField(description="The image to load") def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) + image = context.images.get_pil(self.image.image_name) return ImageOutput( image=ImageField(image_name=self.image.image_name), @@ -308,42 +305,44 @@ class ImageCollectionInvocation(BaseInvocation): # region DenoiseMask -class DenoiseMaskField(BaseModel): - """An inpaint mask field""" - - mask_name: str = Field(description="The name of the mask image") - masked_latents_name: Optional[str] = Field(default=None, description="The name of the masked image latents") - - @invocation_output("denoise_mask_output") class DenoiseMaskOutput(BaseInvocationOutput): """Base class for nodes that output a single image""" denoise_mask: DenoiseMaskField = OutputField(description="Mask for denoise model run") + @classmethod + def build( + cls, mask_name: str, masked_latents_name: Optional[str] = None, gradient: bool = False + ) -> "DenoiseMaskOutput": + return cls( + denoise_mask=DenoiseMaskField( + mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=gradient + ), + ) + # endregion # region Latents -class LatentsField(BaseModel): - """A latents tensor primitive field""" - - latents_name: str = Field(description="The name of the latents") - seed: Optional[int] = Field(default=None, description="Seed used to generate this latents") - - @invocation_output("latents_output") class LatentsOutput(BaseInvocationOutput): """Base class for nodes that output a single latents tensor""" - latents: LatentsField = OutputField( - description=FieldDescriptions.latents, - ) + latents: LatentsField = OutputField(description=FieldDescriptions.latents) width: int = OutputField(description=FieldDescriptions.width) height: int = OutputField(description=FieldDescriptions.height) + @classmethod + def build(cls, latents_name: str, latents: torch.Tensor, seed: Optional[int] = None) -> "LatentsOutput": + return cls( + latents=LatentsField(latents_name=latents_name, seed=seed), + width=latents.size()[3] * LATENT_SCALE_FACTOR, + height=latents.size()[2] * LATENT_SCALE_FACTOR, + ) + @invocation_output("latents_collection_output") class LatentsCollectionOutput(BaseInvocationOutput): @@ -355,7 +354,7 @@ class LatentsCollectionOutput(BaseInvocationOutput): @invocation( - "latents", title="Latents Primitive", tags=["primitives", "latents"], category="primitives", version="1.0.0" + "latents", title="Latents Primitive", tags=["primitives", "latents"], category="primitives", version="1.0.1" ) class LatentsInvocation(BaseInvocation): """A latents tensor primitive value""" @@ -363,9 +362,9 @@ class LatentsInvocation(BaseInvocation): latents: LatentsField = InputField(description="The latents tensor", input=Input.Connection) def invoke(self, context: InvocationContext) -> LatentsOutput: - latents = context.services.latents.get(self.latents.latents_name) + latents = context.tensors.load(self.latents.latents_name) - return build_latents_output(self.latents.latents_name, latents) + return LatentsOutput.build(self.latents.latents_name, latents) @invocation( @@ -386,31 +385,11 @@ class LatentsCollectionInvocation(BaseInvocation): return LatentsCollectionOutput(collection=self.collection) -def build_latents_output(latents_name: str, latents: torch.Tensor, seed: Optional[int] = None): - return LatentsOutput( - latents=LatentsField(latents_name=latents_name, seed=seed), - width=latents.size()[3] * 8, - height=latents.size()[2] * 8, - ) - - # endregion # region Color -class ColorField(BaseModel): - """A color primitive field""" - - r: int = Field(ge=0, le=255, description="The red component") - g: int = Field(ge=0, le=255, description="The green component") - b: int = Field(ge=0, le=255, description="The blue component") - a: int = Field(ge=0, le=255, description="The alpha component") - - def tuple(self) -> Tuple[int, int, int, int]: - return (self.r, self.g, self.b, self.a) - - @invocation_output("color_output") class ColorOutput(BaseInvocationOutput): """Base class for nodes that output a single color""" @@ -442,23 +421,16 @@ class ColorInvocation(BaseInvocation): # region Conditioning -class ConditioningField(BaseModel): - """A conditioning tensor primitive value""" - - conditioning_name: str = Field(description="The name of conditioning tensor") - mask: Optional[MaskField] = Field( - default=None, - description="The mask associated with this conditioning tensor. Excluded regions should be set to False, " - "included regions should be set to 1.", - ) - - @invocation_output("conditioning_output") class ConditioningOutput(BaseInvocationOutput): """Base class for nodes that output a single conditioning tensor""" conditioning: ConditioningField = OutputField(description=FieldDescriptions.cond) + @classmethod + def build(cls, conditioning_name: str) -> "ConditioningOutput": + return cls(conditioning=ConditioningField(conditioning_name=conditioning_name)) + @invocation_output("conditioning_collection_output") class ConditioningCollectionOutput(BaseInvocationOutput): diff --git a/invokeai/app/invocations/prompt.py b/invokeai/app/invocations/prompt.py index 4778d98077..234743a003 100644 --- a/invokeai/app/invocations/prompt.py +++ b/invokeai/app/invocations/prompt.py @@ -6,8 +6,10 @@ from dynamicprompts.generators import CombinatorialPromptGenerator, RandomPrompt from pydantic import field_validator from invokeai.app.invocations.primitives import StringCollectionOutput +from invokeai.app.services.shared.invocation_context import InvocationContext -from .baseinvocation import BaseInvocation, InputField, InvocationContext, UIComponent, invocation +from .baseinvocation import BaseInvocation, invocation +from .fields import InputField, UIComponent @invocation( diff --git a/invokeai/app/invocations/sdxl.py b/invokeai/app/invocations/sdxl.py index 68076fdfeb..0df27c0011 100644 --- a/invokeai/app/invocations/sdxl.py +++ b/invokeai/app/invocations/sdxl.py @@ -1,14 +1,10 @@ -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.model_manager import SubModelType -from ...backend.model_management import ModelType, SubModelType from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - UIType, invocation, invocation_output, ) @@ -34,7 +30,7 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput): vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE") -@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.0") +@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.1") class SDXLModelLoaderInvocation(BaseInvocation): """Loads an sdxl base model, outputting its submodels.""" @@ -44,72 +40,52 @@ class SDXLModelLoaderInvocation(BaseInvocation): # TODO: precision? def invoke(self, context: InvocationContext) -> SDXLModelLoaderOutput: - base_model = self.model.base_model - model_name = self.model.model_name - model_type = ModelType.Main + model_key = self.model.key # TODO: not found exceptions - if not context.services.model_manager.model_exists( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ): - raise Exception(f"Unknown {base_model} {model_type} model: {model_name}") + if not context.models.exists(model_key): + raise Exception(f"Unknown model: {model_key}") return SDXLModelLoaderOutput( unet=UNetField( unet=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.UNet, + key=model_key, + submodel_type=SubModelType.UNet, ), scheduler=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Scheduler, + key=model_key, + submodel_type=SubModelType.Scheduler, ), loras=[], ), clip=ClipField( tokenizer=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Tokenizer, + key=model_key, + submodel_type=SubModelType.Tokenizer, ), text_encoder=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.TextEncoder, + key=model_key, + submodel_type=SubModelType.TextEncoder, ), loras=[], skipped_layers=0, ), clip2=ClipField( tokenizer=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Tokenizer2, + key=model_key, + submodel_type=SubModelType.Tokenizer2, ), text_encoder=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.TextEncoder2, + key=model_key, + submodel_type=SubModelType.TextEncoder2, ), loras=[], skipped_layers=0, ), vae=VaeField( vae=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Vae, + key=model_key, + submodel_type=SubModelType.Vae, ), ), ) @@ -120,7 +96,7 @@ class SDXLModelLoaderInvocation(BaseInvocation): title="SDXL Refiner Model", tags=["model", "sdxl", "refiner"], category="model", - version="1.0.0", + version="1.0.1", ) class SDXLRefinerModelLoaderInvocation(BaseInvocation): """Loads an sdxl refiner model, outputting its submodels.""" @@ -133,56 +109,40 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation): # TODO: precision? def invoke(self, context: InvocationContext) -> SDXLRefinerModelLoaderOutput: - base_model = self.model.base_model - model_name = self.model.model_name - model_type = ModelType.Main + model_key = self.model.key # TODO: not found exceptions - if not context.services.model_manager.model_exists( - model_name=model_name, - base_model=base_model, - model_type=model_type, - ): - raise Exception(f"Unknown {base_model} {model_type} model: {model_name}") + if not context.models.exists(model_key): + raise Exception(f"Unknown model: {model_key}") return SDXLRefinerModelLoaderOutput( unet=UNetField( unet=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.UNet, + key=model_key, + submodel_type=SubModelType.UNet, ), scheduler=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Scheduler, + key=model_key, + submodel_type=SubModelType.Scheduler, ), loras=[], ), clip2=ClipField( tokenizer=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Tokenizer2, + key=model_key, + submodel_type=SubModelType.Tokenizer2, ), text_encoder=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.TextEncoder2, + key=model_key, + submodel_type=SubModelType.TextEncoder2, ), loras=[], skipped_layers=0, ), vae=VaeField( vae=ModelInfo( - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=SubModelType.Vae, + key=model_key, + submodel_type=SubModelType.Vae, ), ), ) diff --git a/invokeai/app/invocations/strings.py b/invokeai/app/invocations/strings.py index 3466206b37..182c976cd7 100644 --- a/invokeai/app/invocations/strings.py +++ b/invokeai/app/invocations/strings.py @@ -2,16 +2,15 @@ import re +from invokeai.app.services.shared.invocation_context import InvocationContext + from .baseinvocation import ( BaseInvocation, BaseInvocationOutput, - InputField, - InvocationContext, - OutputField, - UIComponent, invocation, invocation_output, ) +from .fields import InputField, OutputField, UIComponent from .primitives import StringOutput diff --git a/invokeai/app/invocations/t2i_adapter.py b/invokeai/app/invocations/t2i_adapter.py index e055d23903..0f1e251bb3 100644 --- a/invokeai/app/invocations/t2i_adapter.py +++ b/invokeai/app/invocations/t2i_adapter.py @@ -1,29 +1,21 @@ from typing import Union -from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator +from pydantic import BaseModel, Field, field_validator, model_validator from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, invocation, invocation_output, ) from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES -from invokeai.app.invocations.primitives import ImageField +from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField from invokeai.app.invocations.util import validate_begin_end_step, validate_weights -from invokeai.app.shared.fields import FieldDescriptions -from invokeai.backend.model_management.models.base import BaseModelType +from invokeai.app.services.shared.invocation_context import InvocationContext class T2IAdapterModelField(BaseModel): - model_name: str = Field(description="Name of the T2I-Adapter model") - base_model: BaseModelType = Field(description="Base model") - - model_config = ConfigDict(protected_namespaces=()) + key: str = Field(description="Model record key for the T2I-Adapter model") class T2IAdapterField(BaseModel): diff --git a/invokeai/app/invocations/tiles.py b/invokeai/app/invocations/tiles.py index e51f891a8d..cb5373bbf7 100644 --- a/invokeai/app/invocations/tiles.py +++ b/invokeai/app/invocations/tiles.py @@ -8,16 +8,12 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, Classification, - Input, - InputField, - InvocationContext, - OutputField, - WithMetadata, invocation, invocation_output, ) -from invokeai.app.invocations.primitives import ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin +from invokeai.app.invocations.fields import ImageField, Input, InputField, OutputField, WithBoard, WithMetadata +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.tiles.tiles import ( calc_tiles_even_split, calc_tiles_min_overlap, @@ -236,7 +232,7 @@ BLEND_MODES = Literal["Linear", "Seam"] version="1.1.0", classification=Classification.Beta, ) -class MergeTilesToImageInvocation(BaseInvocation, WithMetadata): +class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithBoard): """Merge multiple tile images into a single image.""" # Inputs @@ -268,7 +264,7 @@ class MergeTilesToImageInvocation(BaseInvocation, WithMetadata): # existed in memory at an earlier point in the graph. tile_np_images: list[np.ndarray] = [] for image in images: - pil_image = context.services.images.get_pil_image(image.image_name) + pil_image = context.images.get_pil(image.image_name) pil_image = pil_image.convert("RGB") tile_np_images.append(np.array(pil_image)) @@ -291,18 +287,5 @@ class MergeTilesToImageInvocation(BaseInvocation, WithMetadata): # Convert into a PIL image and save pil_image = Image.fromarray(np_image) - image_dto = context.services.images.create( - image=pil_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + image_dto = context.images.save(image=pil_image) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/invocations/upscale.py b/invokeai/app/invocations/upscale.py index 5f715c1a7e..2e2a6ce881 100644 --- a/invokeai/app/invocations/upscale.py +++ b/invokeai/app/invocations/upscale.py @@ -8,13 +8,15 @@ import torch from PIL import Image from pydantic import ConfigDict -from invokeai.app.invocations.primitives import ImageField, ImageOutput -from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin +from invokeai.app.invocations.fields import ImageField +from invokeai.app.invocations.primitives import ImageOutput +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN from invokeai.backend.util.devices import choose_torch_device -from .baseinvocation import BaseInvocation, InputField, InvocationContext, WithMetadata, invocation +from .baseinvocation import BaseInvocation, invocation +from .fields import InputField, WithBoard, WithMetadata # TODO: Populate this from disk? # TODO: Use model manager to load? @@ -29,8 +31,8 @@ if choose_torch_device() == torch.device("mps"): from torch import mps -@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.3.0") -class ESRGANInvocation(BaseInvocation, WithMetadata): +@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.3.1") +class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard): """Upscales an image using RealESRGAN.""" image: ImageField = InputField(description="The input image") @@ -42,8 +44,8 @@ class ESRGANInvocation(BaseInvocation, WithMetadata): model_config = ConfigDict(protected_namespaces=()) def invoke(self, context: InvocationContext) -> ImageOutput: - image = context.services.images.get_pil_image(self.image.image_name) - models_path = context.services.configuration.models_path + image = context.images.get_pil(self.image.image_name) + models_path = context.config.get().models_path rrdbnet_model = None netscale = None @@ -87,7 +89,7 @@ class ESRGANInvocation(BaseInvocation, WithMetadata): netscale = 2 else: msg = f"Invalid RealESRGAN model: {self.model_name}" - context.services.logger.error(msg) + context.logger.error(msg) raise ValueError(msg) esrgan_model_path = Path(f"core/upscaling/realesrgan/{self.model_name}") @@ -110,19 +112,6 @@ class ESRGANInvocation(BaseInvocation, WithMetadata): if choose_torch_device() == torch.device("mps"): mps.empty_cache() - image_dto = context.services.images.create( - image=pil_image, - image_origin=ResourceOrigin.INTERNAL, - image_category=ImageCategory.GENERAL, - node_id=self.id, - session_id=context.graph_execution_state_id, - is_intermediate=self.is_intermediate, - metadata=self.metadata, - workflow=context.workflow, - ) + image_dto = context.images.save(image=pil_image) - return ImageOutput( - image=ImageField(image_name=image_dto.image_name), - width=image_dto.width, - height=image_dto.height, - ) + return ImageOutput.build(image_dto) diff --git a/invokeai/app/services/invocation_queue/__init__.py b/invokeai/app/services/bulk_download/__init__.py similarity index 100% rename from invokeai/app/services/invocation_queue/__init__.py rename to invokeai/app/services/bulk_download/__init__.py diff --git a/invokeai/app/services/bulk_download/bulk_download_base.py b/invokeai/app/services/bulk_download/bulk_download_base.py new file mode 100644 index 0000000000..617b611f56 --- /dev/null +++ b/invokeai/app/services/bulk_download/bulk_download_base.py @@ -0,0 +1,44 @@ +from abc import ABC, abstractmethod +from typing import Optional + + +class BulkDownloadBase(ABC): + """Responsible for creating a zip file containing the images specified by the given image names or board id.""" + + @abstractmethod + def handler( + self, image_names: Optional[list[str]], board_id: Optional[str], bulk_download_item_id: Optional[str] + ) -> None: + """ + Create a zip file containing the images specified by the given image names or board id. + + :param image_names: A list of image names to include in the zip file. + :param board_id: The ID of the board. If provided, all images associated with the board will be included in the zip file. + :param bulk_download_item_id: The bulk_download_item_id that will be used to retrieve the bulk download item when it is prepared, if none is provided a uuid will be generated. + """ + + @abstractmethod + def get_path(self, bulk_download_item_name: str) -> str: + """ + Get the path to the bulk download file. + + :param bulk_download_item_name: The name of the bulk download item. + :return: The path to the bulk download file. + """ + + @abstractmethod + def generate_item_id(self, board_id: Optional[str]) -> str: + """ + Generate an item ID for a bulk download item. + + :param board_id: The ID of the board whose name is to be included in the item id. + :return: The generated item ID. + """ + + @abstractmethod + def delete(self, bulk_download_item_name: str) -> None: + """ + Delete the bulk download file. + + :param bulk_download_item_name: The name of the bulk download item. + """ diff --git a/invokeai/app/services/bulk_download/bulk_download_common.py b/invokeai/app/services/bulk_download/bulk_download_common.py new file mode 100644 index 0000000000..68724eb228 --- /dev/null +++ b/invokeai/app/services/bulk_download/bulk_download_common.py @@ -0,0 +1,25 @@ +DEFAULT_BULK_DOWNLOAD_ID = "default" + + +class BulkDownloadException(Exception): + """Exception raised when a bulk download fails.""" + + def __init__(self, message="Bulk download failed"): + super().__init__(message) + self.message = message + + +class BulkDownloadTargetException(BulkDownloadException): + """Exception raised when a bulk download target is not found.""" + + def __init__(self, message="The bulk download target was not found"): + super().__init__(message) + self.message = message + + +class BulkDownloadParametersException(BulkDownloadException): + """Exception raised when a bulk download parameter is invalid.""" + + def __init__(self, message="No image names or board ID provided"): + super().__init__(message) + self.message = message diff --git a/invokeai/app/services/bulk_download/bulk_download_default.py b/invokeai/app/services/bulk_download/bulk_download_default.py new file mode 100644 index 0000000000..04cec928f4 --- /dev/null +++ b/invokeai/app/services/bulk_download/bulk_download_default.py @@ -0,0 +1,157 @@ +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Optional, Union +from zipfile import ZipFile + +from invokeai.app.services.board_records.board_records_common import BoardRecordNotFoundException +from invokeai.app.services.bulk_download.bulk_download_common import ( + DEFAULT_BULK_DOWNLOAD_ID, + BulkDownloadException, + BulkDownloadParametersException, + BulkDownloadTargetException, +) +from invokeai.app.services.image_records.image_records_common import ImageRecordNotFoundException +from invokeai.app.services.images.images_common import ImageDTO +from invokeai.app.services.invoker import Invoker +from invokeai.app.util.misc import uuid_string + +from .bulk_download_base import BulkDownloadBase + + +class BulkDownloadService(BulkDownloadBase): + def start(self, invoker: Invoker) -> None: + self._invoker = invoker + + def __init__(self): + self._temp_directory = TemporaryDirectory() + self._bulk_downloads_folder = Path(self._temp_directory.name) / "bulk_downloads" + self._bulk_downloads_folder.mkdir(parents=True, exist_ok=True) + + def handler( + self, image_names: Optional[list[str]], board_id: Optional[str], bulk_download_item_id: Optional[str] + ) -> None: + bulk_download_id: str = DEFAULT_BULK_DOWNLOAD_ID + bulk_download_item_id = bulk_download_item_id or uuid_string() + bulk_download_item_name = bulk_download_item_id + ".zip" + + self._signal_job_started(bulk_download_id, bulk_download_item_id, bulk_download_item_name) + + try: + image_dtos: list[ImageDTO] = [] + + if board_id: + image_dtos = self._board_handler(board_id) + elif image_names: + image_dtos = self._image_handler(image_names) + else: + raise BulkDownloadParametersException() + + bulk_download_item_name: str = self._create_zip_file(image_dtos, bulk_download_item_id) + self._signal_job_completed(bulk_download_id, bulk_download_item_id, bulk_download_item_name) + except ( + ImageRecordNotFoundException, + BoardRecordNotFoundException, + BulkDownloadException, + BulkDownloadParametersException, + ) as e: + self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e) + except Exception as e: + self._signal_job_failed(bulk_download_id, bulk_download_item_id, bulk_download_item_name, e) + self._invoker.services.logger.error("Problem bulk downloading images.") + raise e + + def _image_handler(self, image_names: list[str]) -> list[ImageDTO]: + return [self._invoker.services.images.get_dto(image_name) for image_name in image_names] + + def _board_handler(self, board_id: str) -> list[ImageDTO]: + image_names = self._invoker.services.board_image_records.get_all_board_image_names_for_board(board_id) + return self._image_handler(image_names) + + def generate_item_id(self, board_id: Optional[str]) -> str: + return uuid_string() if board_id is None else self._get_clean_board_name(board_id) + "_" + uuid_string() + + def _get_clean_board_name(self, board_id: str) -> str: + if board_id == "none": + return "Uncategorized" + + return self._clean_string_to_path_safe(self._invoker.services.board_records.get(board_id).board_name) + + def _create_zip_file(self, image_dtos: list[ImageDTO], bulk_download_item_id: str) -> str: + """ + Create a zip file containing the images specified by the given image names or board id. + If download with the same bulk_download_id already exists, it will be overwritten. + + :return: The name of the zip file. + """ + zip_file_name = bulk_download_item_id + ".zip" + zip_file_path = self._bulk_downloads_folder / (zip_file_name) + + with ZipFile(zip_file_path, "w") as zip_file: + for image_dto in image_dtos: + image_zip_path = Path(image_dto.image_category.value) / image_dto.image_name + image_disk_path = self._invoker.services.images.get_path(image_dto.image_name) + zip_file.write(image_disk_path, arcname=image_zip_path) + + return str(zip_file_name) + + # from https://stackoverflow.com/questions/7406102/create-sane-safe-filename-from-any-unsafe-string + def _clean_string_to_path_safe(self, s: str) -> str: + """Clean a string to be path safe.""" + return "".join([c for c in s if c.isalpha() or c.isdigit() or c == " " or c == "_" or c == "-"]).rstrip() + + def _signal_job_started( + self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str + ) -> None: + """Signal that a bulk download job has started.""" + if self._invoker: + assert bulk_download_id is not None + self._invoker.services.events.emit_bulk_download_started( + bulk_download_id=bulk_download_id, + bulk_download_item_id=bulk_download_item_id, + bulk_download_item_name=bulk_download_item_name, + ) + + def _signal_job_completed( + self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str + ) -> None: + """Signal that a bulk download job has completed.""" + if self._invoker: + assert bulk_download_id is not None + assert bulk_download_item_name is not None + self._invoker.services.events.emit_bulk_download_completed( + bulk_download_id=bulk_download_id, + bulk_download_item_id=bulk_download_item_id, + bulk_download_item_name=bulk_download_item_name, + ) + + def _signal_job_failed( + self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, exception: Exception + ) -> None: + """Signal that a bulk download job has failed.""" + if self._invoker: + assert bulk_download_id is not None + assert exception is not None + self._invoker.services.events.emit_bulk_download_failed( + bulk_download_id=bulk_download_id, + bulk_download_item_id=bulk_download_item_id, + bulk_download_item_name=bulk_download_item_name, + error=str(exception), + ) + + def stop(self, *args, **kwargs): + self._temp_directory.cleanup() + + def delete(self, bulk_download_item_name: str) -> None: + path = self.get_path(bulk_download_item_name) + Path(path).unlink() + + def get_path(self, bulk_download_item_name: str) -> str: + path = str(self._bulk_downloads_folder / bulk_download_item_name) + if not self._is_valid_path(path): + raise BulkDownloadTargetException() + return path + + def _is_valid_path(self, path: Union[str, Path]) -> bool: + """Validates the path given for a bulk download.""" + path = path if isinstance(path, Path) else Path(path) + return path.exists() diff --git a/invokeai/app/services/config/config_base.py b/invokeai/app/services/config/config_base.py index a304b38a95..20dac14937 100644 --- a/invokeai/app/services/config/config_base.py +++ b/invokeai/app/services/config/config_base.py @@ -27,11 +27,11 @@ class InvokeAISettings(BaseSettings): """Runtime configuration settings in which default values are read from an omegaconf .yaml file.""" initconf: ClassVar[Optional[DictConfig]] = None - argparse_groups: ClassVar[Dict] = {} + argparse_groups: ClassVar[Dict[str, Any]] = {} model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True) - def parse_args(self, argv: Optional[list] = sys.argv[1:]): + def parse_args(self, argv: Optional[List[str]] = sys.argv[1:]) -> None: """Call to parse command-line arguments.""" parser = self.get_parser() opt, unknown_opts = parser.parse_known_args(argv) @@ -68,7 +68,7 @@ class InvokeAISettings(BaseSettings): return OmegaConf.to_yaml(conf) @classmethod - def add_parser_arguments(cls, parser): + def add_parser_arguments(cls, parser: ArgumentParser) -> None: """Dynamically create arguments for a settings parser.""" if "type" in get_type_hints(cls): settings_stanza = get_args(get_type_hints(cls)["type"])[0] @@ -117,7 +117,8 @@ class InvokeAISettings(BaseSettings): """Return the category of a setting.""" hints = get_type_hints(cls) if command_field in hints: - return get_args(hints[command_field])[0] + result: str = get_args(hints[command_field])[0] + return result else: return "Uncategorized" @@ -155,10 +156,11 @@ class InvokeAISettings(BaseSettings): "lora_dir", "embedding_dir", "controlnet_dir", + "conf_path", ] @classmethod - def add_field_argument(cls, command_parser, name: str, field, default_override=None): + def add_field_argument(cls, command_parser, name: str, field, default_override=None) -> None: """Add the argparse arguments for a setting parser.""" field_type = get_type_hints(cls).get(name) default = ( diff --git a/invokeai/app/services/config/config_common.py b/invokeai/app/services/config/config_common.py index d11bcabcf9..27a0f859c2 100644 --- a/invokeai/app/services/config/config_common.py +++ b/invokeai/app/services/config/config_common.py @@ -21,7 +21,7 @@ class PagingArgumentParser(argparse.ArgumentParser): It also supports reading defaults from an init file. """ - def print_help(self, file=None): + def print_help(self, file=None) -> None: text = self.format_help() pydoc.pager(text) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 132afc2272..64d464002b 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -30,7 +30,6 @@ InvokeAI: lora_dir: null embedding_dir: null controlnet_dir: null - conf_path: configs/models.yaml models_dir: models legacy_conf_dir: configs/stable-diffusion db_dir: databases @@ -123,7 +122,6 @@ a Path object: root_path - path to InvokeAI root output_path - path to default outputs directory - model_conf_path - path to models.yaml conf - alias for the above embedding_path - path to the embeddings directory lora_path - path to the LoRA directory @@ -163,17 +161,17 @@ two configs are kept in separate sections of the config file: InvokeAI: Paths: root: /home/lstein/invokeai-main - conf_path: configs/models.yaml legacy_conf_dir: configs/stable-diffusion outdir: outputs ... """ + from __future__ import annotations import os from pathlib import Path -from typing import Any, ClassVar, Dict, List, Literal, Optional, Union +from typing import Any, ClassVar, Dict, List, Literal, Optional from omegaconf import DictConfig, OmegaConf from pydantic import Field @@ -185,7 +183,9 @@ from .config_base import InvokeAISettings INIT_FILE = Path("invokeai.yaml") DB_FILE = Path("invokeai.db") LEGACY_INIT_FILE = Path("invokeai.init") -DEFAULT_MAX_VRAM = 0.5 +DEFAULT_RAM_CACHE = 10.0 +DEFAULT_VRAM_CACHE = 0.25 +DEFAULT_CONVERT_CACHE = 20.0 class Categories(object): @@ -235,8 +235,8 @@ class InvokeAIAppConfig(InvokeAISettings): # PATHS root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths) autoimport_dir : Path = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths) - conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths) models_dir : Path = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths) + convert_cache_dir : Path = Field(default=Path('models/.cache'), description='Path to the converted models cache directory', json_schema_extra=Categories.Paths) legacy_conf_dir : Path = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths) db_dir : Path = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths) outdir : Path = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths) @@ -260,8 +260,10 @@ class InvokeAIAppConfig(InvokeAISettings): version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other) # CACHE - ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) - vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + ram : float = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + vram : float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) + convert_cache : float = Field(default=DEFAULT_CONVERT_CACHE, ge=0, description="Maximum size of on-disk converted models cache (GB)", json_schema_extra=Categories.ModelCache) + lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) @@ -296,6 +298,7 @@ class InvokeAIAppConfig(InvokeAISettings): lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths) embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths) controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths) + conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths) # this is not referred to in the source code and can be removed entirely #free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance) @@ -404,6 +407,11 @@ class InvokeAIAppConfig(InvokeAISettings): """Path to the models directory.""" return self._resolve(self.models_dir) + @property + def models_convert_cache_path(self) -> Path: + """Path to the converted cache models directory.""" + return self._resolve(self.convert_cache_dir) + @property def custom_nodes_path(self) -> Path: """Path to the custom nodes directory.""" @@ -433,15 +441,20 @@ class InvokeAIAppConfig(InvokeAISettings): return True @property - def ram_cache_size(self) -> Union[Literal["auto"], float]: - """Return the ram cache size using the legacy or modern setting.""" + def ram_cache_size(self) -> float: + """Return the ram cache size using the legacy or modern setting (GB).""" return self.max_cache_size or self.ram @property - def vram_cache_size(self) -> Union[Literal["auto"], float]: - """Return the vram cache size using the legacy or modern setting.""" + def vram_cache_size(self) -> float: + """Return the vram cache size using the legacy or modern setting (GB).""" return self.max_vram_cache_size or self.vram + @property + def convert_cache_size(self) -> float: + """Return the convert cache size on disk (GB).""" + return self.convert_cache + @property def use_cpu(self) -> bool: """Return true if the device is set to CPU or the always_use_cpu flag is set.""" diff --git a/invokeai/app/services/download/__init__.py b/invokeai/app/services/download/__init__.py index 04c1dfdb1d..371c531387 100644 --- a/invokeai/app/services/download/__init__.py +++ b/invokeai/app/services/download/__init__.py @@ -1,4 +1,5 @@ """Init file for download queue.""" + from .download_base import DownloadJob, DownloadJobStatus, DownloadQueueServiceBase, UnknownJobIDException from .download_default import DownloadQueueService, TqdmProgress diff --git a/invokeai/app/services/download/download_base.py b/invokeai/app/services/download/download_base.py index f854f64f58..2ac13b825f 100644 --- a/invokeai/app/services/download/download_base.py +++ b/invokeai/app/services/download/download_base.py @@ -260,3 +260,16 @@ class DownloadQueueServiceBase(ABC): def join(self) -> None: """Wait until all jobs are off the queue.""" pass + + @abstractmethod + def wait_for_job(self, job: DownloadJob, timeout: int = 0) -> DownloadJob: + """Wait until the indicated download job has reached a terminal state. + + This will block until the indicated install job has completed, + been cancelled, or errored out. + + :param job: The job to wait on. + :param timeout: Wait up to indicated number of seconds. Raise a TimeoutError if + the job hasn't completed within the indicated time. + """ + pass diff --git a/invokeai/app/services/download/download_default.py b/invokeai/app/services/download/download_default.py index 7613c0893f..843351a259 100644 --- a/invokeai/app/services/download/download_default.py +++ b/invokeai/app/services/download/download_default.py @@ -4,10 +4,11 @@ import os import re import threading +import time import traceback from pathlib import Path from queue import Empty, PriorityQueue -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Set import requests from pydantic.networks import AnyHttpUrl @@ -48,11 +49,12 @@ class DownloadQueueService(DownloadQueueServiceBase): :param max_parallel_dl: Number of simultaneous downloads allowed [5]. :param requests_session: Optional requests.sessions.Session object, for unit tests. """ - self._jobs = {} + self._jobs: Dict[int, DownloadJob] = {} self._next_job_id = 0 - self._queue = PriorityQueue() + self._queue: PriorityQueue[DownloadJob] = PriorityQueue() self._stop_event = threading.Event() - self._worker_pool = set() + self._job_completed_event = threading.Event() + self._worker_pool: Set[threading.Thread] = set() self._lock = threading.Lock() self._logger = InvokeAILogger.get_logger("DownloadQueueService") self._event_bus = event_bus @@ -188,6 +190,16 @@ class DownloadQueueService(DownloadQueueServiceBase): if not job.in_terminal_state: self.cancel_job(job) + def wait_for_job(self, job: DownloadJob, timeout: int = 0) -> DownloadJob: + """Block until the indicated job has reached terminal state, or when timeout limit reached.""" + start = time.time() + while not job.in_terminal_state: + if self._job_completed_event.wait(timeout=0.25): # in case we miss an event + self._job_completed_event.clear() + if timeout > 0 and time.time() - start > timeout: + raise TimeoutError("Timeout exceeded") + return job + def _start_workers(self, max_workers: int) -> None: """Start the requested number of worker threads.""" self._stop_event.clear() @@ -212,7 +224,6 @@ class DownloadQueueService(DownloadQueueServiceBase): job.job_started = get_iso_timestamp() self._do_download(job) self._signal_job_complete(job) - except (OSError, HTTPError) as excp: job.error_type = excp.__class__.__name__ + f"({str(excp)})" job.error = traceback.format_exc() @@ -223,6 +234,7 @@ class DownloadQueueService(DownloadQueueServiceBase): finally: job.job_ended = get_iso_timestamp() + self._job_completed_event.set() # signal a change to terminal state self._queue.task_done() self._logger.debug(f"Download queue worker thread {threading.current_thread().name} exiting.") @@ -407,11 +419,11 @@ class DownloadQueueService(DownloadQueueServiceBase): # Example on_progress event handler to display a TQDM status bar # Activate with: -# download_service.download('http://foo.bar/baz', '/tmp', on_progress=TqdmProgress().job_update +# download_service.download(DownloadJob('http://foo.bar/baz', '/tmp', on_progress=TqdmProgress().update)) class TqdmProgress(object): """TQDM-based progress bar object to use in on_progress handlers.""" - _bars: Dict[int, tqdm] # the tqdm object + _bars: Dict[int, tqdm] # type: ignore _last: Dict[int, int] # last bytes downloaded def __init__(self) -> None: # noqa D107 diff --git a/invokeai/app/services/events/events_base.py b/invokeai/app/services/events/events_base.py index e9365f3349..b52b919edd 100644 --- a/invokeai/app/services/events/events_base.py +++ b/invokeai/app/services/events/events_base.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union -from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage +from invokeai.app.services.session_processor.session_processor_common import ProgressImage from invokeai.app.services.session_queue.session_queue_common import ( BatchStatus, EnqueueBatchResult, @@ -11,12 +11,12 @@ from invokeai.app.services.session_queue.session_queue_common import ( SessionQueueStatus, ) from invokeai.app.util.misc import get_timestamp -from invokeai.backend.model_management.model_manager import ModelInfo -from invokeai.backend.model_management.models.base import BaseModelType, ModelType, SubModelType +from invokeai.backend.model_manager import AnyModelConfig class EventServiceBase: queue_event: str = "queue_event" + bulk_download_event: str = "bulk_download_event" download_event: str = "download_event" model_event: str = "model_event" @@ -25,6 +25,14 @@ class EventServiceBase: def dispatch(self, event_name: str, payload: Any) -> None: pass + def _emit_bulk_download_event(self, event_name: str, payload: dict) -> None: + """Bulk download events are emitted to a room with queue_id as the room name""" + payload["timestamp"] = get_timestamp() + self.dispatch( + event_name=EventServiceBase.bulk_download_event, + payload={"event": event_name, "data": payload}, + ) + def __emit_queue_event(self, event_name: str, payload: dict) -> None: """Queue events are emitted to a room with queue_id as the room name""" payload["timestamp"] = get_timestamp() @@ -55,7 +63,7 @@ class EventServiceBase: queue_item_id: int, queue_batch_id: str, graph_execution_state_id: str, - node: dict, + node_id: str, source_node_id: str, progress_image: Optional[ProgressImage], step: int, @@ -70,7 +78,7 @@ class EventServiceBase: "queue_item_id": queue_item_id, "queue_batch_id": queue_batch_id, "graph_execution_state_id": graph_execution_state_id, - "node_id": node.get("id"), + "node_id": node_id, "source_node_id": source_node_id, "progress_image": progress_image.model_dump() if progress_image is not None else None, "step": step, @@ -171,10 +179,7 @@ class EventServiceBase: queue_item_id: int, queue_batch_id: str, graph_execution_state_id: str, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel: SubModelType, + model_config: AnyModelConfig, ) -> None: """Emitted when a model is requested""" self.__emit_queue_event( @@ -184,10 +189,7 @@ class EventServiceBase: "queue_item_id": queue_item_id, "queue_batch_id": queue_batch_id, "graph_execution_state_id": graph_execution_state_id, - "model_name": model_name, - "base_model": base_model, - "model_type": model_type, - "submodel": submodel, + "model_config": model_config.model_dump(), }, ) @@ -197,11 +199,7 @@ class EventServiceBase: queue_item_id: int, queue_batch_id: str, graph_execution_state_id: str, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel: SubModelType, - model_info: ModelInfo, + model_config: AnyModelConfig, ) -> None: """Emitted when a model is correctly loaded (returns model info)""" self.__emit_queue_event( @@ -211,59 +209,7 @@ class EventServiceBase: "queue_item_id": queue_item_id, "queue_batch_id": queue_batch_id, "graph_execution_state_id": graph_execution_state_id, - "model_name": model_name, - "base_model": base_model, - "model_type": model_type, - "submodel": submodel, - "hash": model_info.hash, - "location": str(model_info.location), - "precision": str(model_info.precision), - }, - ) - - def emit_session_retrieval_error( - self, - queue_id: str, - queue_item_id: int, - queue_batch_id: str, - graph_execution_state_id: str, - error_type: str, - error: str, - ) -> None: - """Emitted when session retrieval fails""" - self.__emit_queue_event( - event_name="session_retrieval_error", - payload={ - "queue_id": queue_id, - "queue_item_id": queue_item_id, - "queue_batch_id": queue_batch_id, - "graph_execution_state_id": graph_execution_state_id, - "error_type": error_type, - "error": error, - }, - ) - - def emit_invocation_retrieval_error( - self, - queue_id: str, - queue_item_id: int, - queue_batch_id: str, - graph_execution_state_id: str, - node_id: str, - error_type: str, - error: str, - ) -> None: - """Emitted when invocation retrieval fails""" - self.__emit_queue_event( - event_name="invocation_retrieval_error", - payload={ - "queue_id": queue_id, - "queue_item_id": queue_item_id, - "queue_batch_id": queue_batch_id, - "graph_execution_state_id": graph_execution_state_id, - "node_id": node_id, - "error_type": error_type, - "error": error, + "model_config": model_config.model_dump(), }, ) @@ -411,6 +357,7 @@ class EventServiceBase: bytes: int, total_bytes: int, parts: List[Dict[str, Union[str, int]]], + id: int, ) -> None: """ Emit at intervals while the install job is in progress (remote models only). @@ -430,6 +377,7 @@ class EventServiceBase: "bytes": bytes, "total_bytes": total_bytes, "parts": parts, + "id": id, }, ) @@ -444,7 +392,7 @@ class EventServiceBase: payload={"source": source}, ) - def emit_model_install_completed(self, source: str, key: str, total_bytes: Optional[int] = None) -> None: + def emit_model_install_completed(self, source: str, key: str, id: int, total_bytes: Optional[int] = None) -> None: """ Emit when an install job is completed successfully. @@ -454,11 +402,7 @@ class EventServiceBase: """ self.__emit_model_event( event_name="model_install_completed", - payload={ - "source": source, - "total_bytes": total_bytes, - "key": key, - }, + payload={"source": source, "total_bytes": total_bytes, "key": key, "id": id}, ) def emit_model_install_cancelled(self, source: str) -> None: @@ -472,12 +416,7 @@ class EventServiceBase: payload={"source": source}, ) - def emit_model_install_error( - self, - source: str, - error_type: str, - error: str, - ) -> None: + def emit_model_install_error(self, source: str, error_type: str, error: str, id: int) -> None: """ Emit when an install job encounters an exception. @@ -487,9 +426,45 @@ class EventServiceBase: """ self.__emit_model_event( event_name="model_install_error", + payload={"source": source, "error_type": error_type, "error": error, "id": id}, + ) + + def emit_bulk_download_started( + self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str + ) -> None: + """Emitted when a bulk download starts""" + self._emit_bulk_download_event( + event_name="bulk_download_started", payload={ - "source": source, - "error_type": error_type, + "bulk_download_id": bulk_download_id, + "bulk_download_item_id": bulk_download_item_id, + "bulk_download_item_name": bulk_download_item_name, + }, + ) + + def emit_bulk_download_completed( + self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str + ) -> None: + """Emitted when a bulk download completes""" + self._emit_bulk_download_event( + event_name="bulk_download_completed", + payload={ + "bulk_download_id": bulk_download_id, + "bulk_download_item_id": bulk_download_item_id, + "bulk_download_item_name": bulk_download_item_name, + }, + ) + + def emit_bulk_download_failed( + self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, error: str + ) -> None: + """Emitted when a bulk download fails""" + self._emit_bulk_download_event( + event_name="bulk_download_failed", + payload={ + "bulk_download_id": bulk_download_id, + "bulk_download_item_id": bulk_download_item_id, + "bulk_download_item_name": bulk_download_item_name, "error": error, }, ) diff --git a/invokeai/app/services/image_files/image_files_base.py b/invokeai/app/services/image_files/image_files_base.py index 27dd67531f..f4036277b7 100644 --- a/invokeai/app/services/image_files/image_files_base.py +++ b/invokeai/app/services/image_files/image_files_base.py @@ -4,7 +4,7 @@ from typing import Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.baseinvocation import MetadataField +from invokeai.app.invocations.fields import MetadataField from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID diff --git a/invokeai/app/services/image_files/image_files_disk.py b/invokeai/app/services/image_files/image_files_disk.py index 0844821672..fb687973ba 100644 --- a/invokeai/app/services/image_files/image_files_disk.py +++ b/invokeai/app/services/image_files/image_files_disk.py @@ -7,7 +7,7 @@ from PIL import Image, PngImagePlugin from PIL.Image import Image as PILImageType from send2trash import send2trash -from invokeai.app.invocations.baseinvocation import MetadataField +from invokeai.app.invocations.fields import MetadataField from invokeai.app.services.invoker import Invoker from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail diff --git a/invokeai/app/services/image_records/image_records_base.py b/invokeai/app/services/image_records/image_records_base.py index 727f4977fb..7b7b261eca 100644 --- a/invokeai/app/services/image_records/image_records_base.py +++ b/invokeai/app/services/image_records/image_records_base.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from datetime import datetime from typing import Optional -from invokeai.app.invocations.metadata import MetadataField +from invokeai.app.invocations.fields import MetadataField from invokeai.app.services.shared.pagination import OffsetPaginatedResults from .image_records_common import ImageCategory, ImageRecord, ImageRecordChanges, ResourceOrigin diff --git a/invokeai/app/services/image_records/image_records_sqlite.py b/invokeai/app/services/image_records/image_records_sqlite.py index 74f82e7d84..5b37913c8f 100644 --- a/invokeai/app/services/image_records/image_records_sqlite.py +++ b/invokeai/app/services/image_records/image_records_sqlite.py @@ -3,7 +3,7 @@ import threading from datetime import datetime from typing import Optional, Union, cast -from invokeai.app.invocations.baseinvocation import MetadataField, MetadataFieldValidator +from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase diff --git a/invokeai/app/services/images/images_base.py b/invokeai/app/services/images/images_base.py index df71dadb5b..42c4266774 100644 --- a/invokeai/app/services/images/images_base.py +++ b/invokeai/app/services/images/images_base.py @@ -3,7 +3,7 @@ from typing import Callable, Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.baseinvocation import MetadataField +from invokeai.app.invocations.fields import MetadataField from invokeai.app.services.image_records.image_records_common import ( ImageCategory, ImageRecord, diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 74aeeccca5..adeed73811 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -2,7 +2,7 @@ from typing import Optional from PIL.Image import Image as PILImageType -from invokeai.app.invocations.baseinvocation import MetadataField +from invokeai.app.invocations.fields import MetadataField from invokeai.app.services.invoker import Invoker from invokeai.app.services.shared.pagination import OffsetPaginatedResults from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID @@ -154,7 +154,7 @@ class ImageService(ImageServiceABC): self.__invoker.services.logger.error("Image record not found") raise except Exception as e: - self.__invoker.services.logger.error("Problem getting image DTO") + self.__invoker.services.logger.error("Problem getting image metadata") raise e def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]: diff --git a/invokeai/app/services/invocation_cache/invocation_cache_memory.py b/invokeai/app/services/invocation_cache/invocation_cache_memory.py index 4a503b3c6b..c700f81186 100644 --- a/invokeai/app/services/invocation_cache/invocation_cache_memory.py +++ b/invokeai/app/services/invocation_cache/invocation_cache_memory.py @@ -37,7 +37,8 @@ class MemoryInvocationCache(InvocationCacheBase): if self._max_cache_size == 0: return self._invoker.services.images.on_deleted(self._delete_by_match) - self._invoker.services.latents.on_deleted(self._delete_by_match) + self._invoker.services.tensors.on_deleted(self._delete_by_match) + self._invoker.services.conditioning.on_deleted(self._delete_by_match) def get(self, key: Union[int, str]) -> Optional[BaseInvocationOutput]: with self._lock: diff --git a/invokeai/app/services/invocation_processor/invocation_processor_base.py b/invokeai/app/services/invocation_processor/invocation_processor_base.py deleted file mode 100644 index 7947a201dd..0000000000 --- a/invokeai/app/services/invocation_processor/invocation_processor_base.py +++ /dev/null @@ -1,5 +0,0 @@ -from abc import ABC - - -class InvocationProcessorABC(ABC): # noqa: B024 - pass diff --git a/invokeai/app/services/invocation_processor/invocation_processor_common.py b/invokeai/app/services/invocation_processor/invocation_processor_common.py deleted file mode 100644 index 347f6c7323..0000000000 --- a/invokeai/app/services/invocation_processor/invocation_processor_common.py +++ /dev/null @@ -1,15 +0,0 @@ -from pydantic import BaseModel, Field - - -class ProgressImage(BaseModel): - """The progress image sent intermittently during processing""" - - width: int = Field(description="The effective width of the image in pixels") - height: int = Field(description="The effective height of the image in pixels") - dataURL: str = Field(description="The image data as a b64 data URL") - - -class CanceledException(Exception): - """Execution canceled by user.""" - - pass diff --git a/invokeai/app/services/invocation_processor/invocation_processor_default.py b/invokeai/app/services/invocation_processor/invocation_processor_default.py deleted file mode 100644 index 54342c0da1..0000000000 --- a/invokeai/app/services/invocation_processor/invocation_processor_default.py +++ /dev/null @@ -1,237 +0,0 @@ -import time -import traceback -from contextlib import suppress -from threading import BoundedSemaphore, Event, Thread -from typing import Optional - -import invokeai.backend.util.logging as logger -from invokeai.app.invocations.baseinvocation import InvocationContext -from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem -from invokeai.app.services.invocation_stats.invocation_stats_common import ( - GESStatsNotFoundError, -) -from invokeai.app.util.profiler import Profiler - -from ..invoker import Invoker -from .invocation_processor_base import InvocationProcessorABC -from .invocation_processor_common import CanceledException - - -class DefaultInvocationProcessor(InvocationProcessorABC): - __invoker_thread: Thread - __stop_event: Event - __invoker: Invoker - __threadLimit: BoundedSemaphore - - def start(self, invoker: Invoker) -> None: - # if we do want multithreading at some point, we could make this configurable - self.__threadLimit = BoundedSemaphore(1) - self.__invoker = invoker - self.__stop_event = Event() - self.__invoker_thread = Thread( - name="invoker_processor", - target=self.__process, - kwargs={"stop_event": self.__stop_event}, - ) - self.__invoker_thread.daemon = True # TODO: make async and do not use threads - self.__invoker_thread.start() - - def stop(self, *args, **kwargs) -> None: - self.__stop_event.set() - - def __process(self, stop_event: Event): - try: - self.__threadLimit.acquire() - queue_item: Optional[InvocationQueueItem] = None - - profiler = ( - Profiler( - logger=self.__invoker.services.logger, - output_dir=self.__invoker.services.configuration.profiles_path, - prefix=self.__invoker.services.configuration.profile_prefix, - ) - if self.__invoker.services.configuration.profile_graphs - else None - ) - - def stats_cleanup(graph_execution_state_id: str) -> None: - if profiler: - profile_path = profiler.stop() - stats_path = profile_path.with_suffix(".json") - self.__invoker.services.performance_statistics.dump_stats( - graph_execution_state_id=graph_execution_state_id, output_path=stats_path - ) - with suppress(GESStatsNotFoundError): - self.__invoker.services.performance_statistics.log_stats(graph_execution_state_id) - self.__invoker.services.performance_statistics.reset_stats(graph_execution_state_id) - - while not stop_event.is_set(): - try: - queue_item = self.__invoker.services.queue.get() - except Exception as e: - self.__invoker.services.logger.error("Exception while getting from queue:\n%s" % e) - - if not queue_item: # Probably stopping - # do not hammer the queue - time.sleep(0.5) - continue - - if profiler and profiler.profile_id != queue_item.graph_execution_state_id: - profiler.start(profile_id=queue_item.graph_execution_state_id) - - try: - graph_execution_state = self.__invoker.services.graph_execution_manager.get( - queue_item.graph_execution_state_id - ) - except Exception as e: - self.__invoker.services.logger.error("Exception while retrieving session:\n%s" % e) - self.__invoker.services.events.emit_session_retrieval_error( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=queue_item.graph_execution_state_id, - error_type=e.__class__.__name__, - error=traceback.format_exc(), - ) - continue - - try: - invocation = graph_execution_state.execution_graph.get_node(queue_item.invocation_id) - except Exception as e: - self.__invoker.services.logger.error("Exception while retrieving invocation:\n%s" % e) - self.__invoker.services.events.emit_invocation_retrieval_error( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=queue_item.graph_execution_state_id, - node_id=queue_item.invocation_id, - error_type=e.__class__.__name__, - error=traceback.format_exc(), - ) - continue - - # get the source node id to provide to clients (the prepared node id is not as useful) - source_node_id = graph_execution_state.prepared_source_mapping[invocation.id] - - # Send starting event - self.__invoker.services.events.emit_invocation_started( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=graph_execution_state.id, - node=invocation.model_dump(), - source_node_id=source_node_id, - ) - - # Invoke - try: - graph_id = graph_execution_state.id - with self.__invoker.services.performance_statistics.collect_stats(invocation, graph_id): - # use the internal invoke_internal(), which wraps the node's invoke() method, - # which handles a few things: - # - nodes that require a value, but get it only from a connection - # - referencing the invocation cache instead of executing the node - outputs = invocation.invoke_internal( - InvocationContext( - services=self.__invoker.services, - graph_execution_state_id=graph_execution_state.id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - queue_batch_id=queue_item.session_queue_batch_id, - workflow=queue_item.workflow, - ) - ) - - # Check queue to see if this is canceled, and skip if so - if self.__invoker.services.queue.is_canceled(graph_execution_state.id): - continue - - # Save outputs and history - graph_execution_state.complete(invocation.id, outputs) - - # Save the state changes - self.__invoker.services.graph_execution_manager.set(graph_execution_state) - - # Send complete event - self.__invoker.services.events.emit_invocation_complete( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=graph_execution_state.id, - node=invocation.model_dump(), - source_node_id=source_node_id, - result=outputs.model_dump(), - ) - - except KeyboardInterrupt: - pass - - except CanceledException: - stats_cleanup(graph_execution_state.id) - pass - - except Exception as e: - error = traceback.format_exc() - logger.error(error) - - # Save error - graph_execution_state.set_node_error(invocation.id, error) - - # Save the state changes - self.__invoker.services.graph_execution_manager.set(graph_execution_state) - - self.__invoker.services.logger.error("Error while invoking:\n%s" % e) - # Send error event - self.__invoker.services.events.emit_invocation_error( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=graph_execution_state.id, - node=invocation.model_dump(), - source_node_id=source_node_id, - error_type=e.__class__.__name__, - error=error, - ) - pass - - # Check queue to see if this is canceled, and skip if so - if self.__invoker.services.queue.is_canceled(graph_execution_state.id): - continue - - # Queue any further commands if invoking all - is_complete = graph_execution_state.is_complete() - if queue_item.invoke_all and not is_complete: - try: - self.__invoker.invoke( - session_queue_batch_id=queue_item.session_queue_batch_id, - session_queue_item_id=queue_item.session_queue_item_id, - session_queue_id=queue_item.session_queue_id, - graph_execution_state=graph_execution_state, - workflow=queue_item.workflow, - invoke_all=True, - ) - except Exception as e: - self.__invoker.services.logger.error("Error while invoking:\n%s" % e) - self.__invoker.services.events.emit_invocation_error( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=graph_execution_state.id, - node=invocation.model_dump(), - source_node_id=source_node_id, - error_type=e.__class__.__name__, - error=traceback.format_exc(), - ) - elif is_complete: - self.__invoker.services.events.emit_graph_execution_complete( - queue_batch_id=queue_item.session_queue_batch_id, - queue_item_id=queue_item.session_queue_item_id, - queue_id=queue_item.session_queue_id, - graph_execution_state_id=graph_execution_state.id, - ) - stats_cleanup(graph_execution_state.id) - - except KeyboardInterrupt: - pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor - finally: - self.__threadLimit.release() diff --git a/invokeai/app/services/invocation_queue/invocation_queue_base.py b/invokeai/app/services/invocation_queue/invocation_queue_base.py deleted file mode 100644 index 09f4875c5f..0000000000 --- a/invokeai/app/services/invocation_queue/invocation_queue_base.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - -from abc import ABC, abstractmethod -from typing import Optional - -from .invocation_queue_common import InvocationQueueItem - - -class InvocationQueueABC(ABC): - """Abstract base class for all invocation queues""" - - @abstractmethod - def get(self) -> InvocationQueueItem: - pass - - @abstractmethod - def put(self, item: Optional[InvocationQueueItem]) -> None: - pass - - @abstractmethod - def cancel(self, graph_execution_state_id: str) -> None: - pass - - @abstractmethod - def is_canceled(self, graph_execution_state_id: str) -> bool: - pass diff --git a/invokeai/app/services/invocation_queue/invocation_queue_common.py b/invokeai/app/services/invocation_queue/invocation_queue_common.py deleted file mode 100644 index 696f6a981d..0000000000 --- a/invokeai/app/services/invocation_queue/invocation_queue_common.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - -import time -from typing import Optional - -from pydantic import BaseModel, Field - -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID - - -class InvocationQueueItem(BaseModel): - graph_execution_state_id: str = Field(description="The ID of the graph execution state") - invocation_id: str = Field(description="The ID of the node being invoked") - session_queue_id: str = Field(description="The ID of the session queue from which this invocation queue item came") - session_queue_item_id: int = Field( - description="The ID of session queue item from which this invocation queue item came" - ) - session_queue_batch_id: str = Field( - description="The ID of the session batch from which this invocation queue item came" - ) - workflow: Optional[WorkflowWithoutID] = Field(description="The workflow associated with this queue item") - invoke_all: bool = Field(default=False) - timestamp: float = Field(default_factory=time.time) diff --git a/invokeai/app/services/invocation_queue/invocation_queue_memory.py b/invokeai/app/services/invocation_queue/invocation_queue_memory.py deleted file mode 100644 index 8d6fff7052..0000000000 --- a/invokeai/app/services/invocation_queue/invocation_queue_memory.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) - -import time -from queue import Queue -from typing import Optional - -from .invocation_queue_base import InvocationQueueABC -from .invocation_queue_common import InvocationQueueItem - - -class MemoryInvocationQueue(InvocationQueueABC): - __queue: Queue - __cancellations: dict[str, float] - - def __init__(self): - self.__queue = Queue() - self.__cancellations = {} - - def get(self) -> InvocationQueueItem: - item = self.__queue.get() - - while ( - isinstance(item, InvocationQueueItem) - and item.graph_execution_state_id in self.__cancellations - and self.__cancellations[item.graph_execution_state_id] > item.timestamp - ): - item = self.__queue.get() - - # Clear old items - for graph_execution_state_id in list(self.__cancellations.keys()): - if self.__cancellations[graph_execution_state_id] < item.timestamp: - del self.__cancellations[graph_execution_state_id] - - return item - - def put(self, item: Optional[InvocationQueueItem]) -> None: - self.__queue.put(item) - - def cancel(self, graph_execution_state_id: str) -> None: - if graph_execution_state_id not in self.__cancellations: - self.__cancellations[graph_execution_state_id] = time.time() - - def is_canceled(self, graph_execution_state_id: str) -> bool: - return graph_execution_state_id in self.__cancellations diff --git a/invokeai/app/services/invocation_services.py b/invokeai/app/services/invocation_services.py index 11a4de99d6..a560696692 100644 --- a/invokeai/app/services/invocation_services.py +++ b/invokeai/app/services/invocation_services.py @@ -3,13 +3,20 @@ from __future__ import annotations from typing import TYPE_CHECKING +from invokeai.app.services.object_serializer.object_serializer_base import ObjectSerializerBase + if TYPE_CHECKING: from logging import Logger + import torch + + from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData + from .board_image_records.board_image_records_base import BoardImageRecordStorageBase from .board_images.board_images_base import BoardImagesServiceABC from .board_records.board_records_base import BoardRecordStorageBase from .boards.boards_base import BoardServiceABC + from .bulk_download.bulk_download_base import BulkDownloadBase from .config import InvokeAIAppConfig from .download import DownloadQueueServiceBase from .events.events_base import EventServiceBase @@ -17,18 +24,11 @@ if TYPE_CHECKING: from .image_records.image_records_base import ImageRecordStorageBase from .images.images_base import ImageServiceABC from .invocation_cache.invocation_cache_base import InvocationCacheBase - from .invocation_processor.invocation_processor_base import InvocationProcessorABC - from .invocation_queue.invocation_queue_base import InvocationQueueABC from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase - from .item_storage.item_storage_base import ItemStorageABC - from .latents_storage.latents_storage_base import LatentsStorageBase - from .model_install import ModelInstallServiceBase from .model_manager.model_manager_base import ModelManagerServiceBase - from .model_records import ModelRecordServiceBase from .names.names_base import NameServiceBase from .session_processor.session_processor_base import SessionProcessorBase from .session_queue.session_queue_base import SessionQueueBase - from .shared.graph import GraphExecutionState from .urls.urls_base import UrlServiceBase from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase @@ -36,83 +36,50 @@ if TYPE_CHECKING: class InvocationServices: """Services that can be used by invocations""" - # TODO: Just forward-declared everything due to circular dependencies. Fix structure. - board_images: "BoardImagesServiceABC" - board_image_record_storage: "BoardImageRecordStorageBase" - boards: "BoardServiceABC" - board_records: "BoardRecordStorageBase" - configuration: "InvokeAIAppConfig" - events: "EventServiceBase" - graph_execution_manager: "ItemStorageABC[GraphExecutionState]" - images: "ImageServiceABC" - image_records: "ImageRecordStorageBase" - image_files: "ImageFileStorageBase" - latents: "LatentsStorageBase" - logger: "Logger" - model_manager: "ModelManagerServiceBase" - model_records: "ModelRecordServiceBase" - download_queue: "DownloadQueueServiceBase" - model_install: "ModelInstallServiceBase" - processor: "InvocationProcessorABC" - performance_statistics: "InvocationStatsServiceBase" - queue: "InvocationQueueABC" - session_queue: "SessionQueueBase" - session_processor: "SessionProcessorBase" - invocation_cache: "InvocationCacheBase" - names: "NameServiceBase" - urls: "UrlServiceBase" - workflow_records: "WorkflowRecordsStorageBase" - def __init__( self, board_images: "BoardImagesServiceABC", board_image_records: "BoardImageRecordStorageBase", boards: "BoardServiceABC", board_records: "BoardRecordStorageBase", + bulk_download: "BulkDownloadBase", configuration: "InvokeAIAppConfig", events: "EventServiceBase", - graph_execution_manager: "ItemStorageABC[GraphExecutionState]", images: "ImageServiceABC", image_files: "ImageFileStorageBase", image_records: "ImageRecordStorageBase", - latents: "LatentsStorageBase", logger: "Logger", model_manager: "ModelManagerServiceBase", - model_records: "ModelRecordServiceBase", download_queue: "DownloadQueueServiceBase", - model_install: "ModelInstallServiceBase", - processor: "InvocationProcessorABC", performance_statistics: "InvocationStatsServiceBase", - queue: "InvocationQueueABC", session_queue: "SessionQueueBase", session_processor: "SessionProcessorBase", invocation_cache: "InvocationCacheBase", names: "NameServiceBase", urls: "UrlServiceBase", workflow_records: "WorkflowRecordsStorageBase", + tensors: "ObjectSerializerBase[torch.Tensor]", + conditioning: "ObjectSerializerBase[ConditioningFieldData]", ): self.board_images = board_images self.board_image_records = board_image_records self.boards = boards self.board_records = board_records + self.bulk_download = bulk_download self.configuration = configuration self.events = events - self.graph_execution_manager = graph_execution_manager self.images = images self.image_files = image_files self.image_records = image_records - self.latents = latents self.logger = logger self.model_manager = model_manager - self.model_records = model_records self.download_queue = download_queue - self.model_install = model_install - self.processor = processor self.performance_statistics = performance_statistics - self.queue = queue self.session_queue = session_queue self.session_processor = session_processor self.invocation_cache = invocation_cache self.names = names self.urls = urls self.workflow_records = workflow_records + self.tensors = tensors + self.conditioning = conditioning diff --git a/invokeai/app/services/invocation_stats/invocation_stats_base.py b/invokeai/app/services/invocation_stats/invocation_stats_base.py index 22624a6579..3266d985fe 100644 --- a/invokeai/app/services/invocation_stats/invocation_stats_base.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_base.py @@ -3,7 +3,7 @@ Usage: -statistics = InvocationStatsService(graph_execution_manager) +statistics = InvocationStatsService() with statistics.collect_stats(invocation, graph_execution_state.id): ... execute graphs... statistics.log_stats() @@ -29,8 +29,8 @@ writes to the system log is stored in InvocationServices.performance_statistics. """ from abc import ABC, abstractmethod -from contextlib import AbstractContextManager from pathlib import Path +from typing import ContextManager from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.invocation_stats.invocation_stats_common import InvocationStatsSummary @@ -40,18 +40,17 @@ class InvocationStatsServiceBase(ABC): "Abstract base class for recording node memory/time performance statistics" @abstractmethod - def __init__(self): + def __init__(self) -> None: """ Initialize the InvocationStatsService and reset counters to zero """ - pass @abstractmethod def collect_stats( self, invocation: BaseInvocation, graph_execution_state_id: str, - ) -> AbstractContextManager: + ) -> ContextManager[None]: """ Return a context object that will capture the statistics on the execution of invocaation. Use with: to place around the part of the code that executes the invocation. @@ -61,16 +60,12 @@ class InvocationStatsServiceBase(ABC): pass @abstractmethod - def reset_stats(self, graph_execution_state_id: str): - """ - Reset all statistics for the indicated graph. - :param graph_execution_state_id: The id of the session whose stats to reset. - :raises GESStatsNotFoundError: if the graph isn't tracked in the stats. - """ + def reset_stats(self): + """Reset all stored statistics.""" pass @abstractmethod - def log_stats(self, graph_execution_state_id: str): + def log_stats(self, graph_execution_state_id: str) -> None: """ Write out the accumulated statistics to the log or somewhere else. :param graph_execution_state_id: The id of the session whose stats to log. diff --git a/invokeai/app/services/invocation_stats/invocation_stats_default.py b/invokeai/app/services/invocation_stats/invocation_stats_default.py index be58aaad2d..5a41f1f5d6 100644 --- a/invokeai/app/services/invocation_stats/invocation_stats_default.py +++ b/invokeai/app/services/invocation_stats/invocation_stats_default.py @@ -2,6 +2,7 @@ import json import time from contextlib import contextmanager from pathlib import Path +from typing import Generator import psutil import torch @@ -9,8 +10,7 @@ import torch import invokeai.backend.util.logging as logger from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.invoker import Invoker -from invokeai.app.services.item_storage.item_storage_common import ItemNotFoundError -from invokeai.backend.model_management.model_cache import CacheStats +from invokeai.backend.model_manager.load.model_cache import CacheStats from .invocation_stats_base import InvocationStatsServiceBase from .invocation_stats_common import ( @@ -41,22 +41,23 @@ class InvocationStatsService(InvocationStatsServiceBase): self._invoker = invoker @contextmanager - def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str): + def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str) -> Generator[None, None, None]: + # This is to handle case of the model manager not being initialized, which happens + # during some tests. + services = self._invoker.services if not self._stats.get(graph_execution_state_id): # First time we're seeing this graph_execution_state_id. self._stats[graph_execution_state_id] = GraphExecutionStats() self._cache_stats[graph_execution_state_id] = CacheStats() - # Prune stale stats. There should be none since we're starting a new graph, but just in case. - self._prune_stale_stats() - # Record state before the invocation. start_time = time.time() start_ram = psutil.Process().memory_info().rss if torch.cuda.is_available(): torch.cuda.reset_peak_memory_stats() - if self._invoker.services.model_manager: - self._invoker.services.model_manager.collect_cache_stats(self._cache_stats[graph_execution_state_id]) + + assert services.model_manager.load is not None + services.model_manager.load.ram_cache.stats = self._cache_stats[graph_execution_state_id] try: # Let the invocation run. @@ -73,42 +74,9 @@ class InvocationStatsService(InvocationStatsServiceBase): ) self._stats[graph_execution_state_id].add_node_execution_stats(node_stats) - def _prune_stale_stats(self): - """Check all graphs being tracked and prune any that have completed/errored. - - This shouldn't be necessary, but we don't have totally robust upstream handling of graph completions/errors, so - for now we call this function periodically to prevent them from accumulating. - """ - to_prune: list[str] = [] - for graph_execution_state_id in self._stats: - try: - graph_execution_state = self._invoker.services.graph_execution_manager.get(graph_execution_state_id) - except ItemNotFoundError: - # TODO(ryand): What would cause this? Should this exception just be allowed to propagate? - logger.warning(f"Failed to get graph state for {graph_execution_state_id}.") - continue - - if not graph_execution_state.is_complete(): - # The graph is still running, don't prune it. - continue - - to_prune.append(graph_execution_state_id) - - for graph_execution_state_id in to_prune: - del self._stats[graph_execution_state_id] - del self._cache_stats[graph_execution_state_id] - - if len(to_prune) > 0: - logger.info(f"Pruned stale graph stats for {to_prune}.") - - def reset_stats(self, graph_execution_state_id: str): - try: - del self._stats[graph_execution_state_id] - del self._cache_stats[graph_execution_state_id] - except KeyError as e: - raise GESStatsNotFoundError( - f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}." - ) from e + def reset_stats(self): + self._stats = {} + self._cache_stats = {} def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary: graph_stats_summary = self._get_graph_summary(graph_execution_state_id) diff --git a/invokeai/app/services/invoker.py b/invokeai/app/services/invoker.py index a04c6f2059..527afb37f4 100644 --- a/invokeai/app/services/invoker.py +++ b/invokeai/app/services/invoker.py @@ -1,12 +1,7 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import Optional -from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID - -from .invocation_queue.invocation_queue_common import InvocationQueueItem from .invocation_services import InvocationServices -from .shared.graph import Graph, GraphExecutionState class Invoker: @@ -18,51 +13,6 @@ class Invoker: self.services = services self._start() - def invoke( - self, - session_queue_id: str, - session_queue_item_id: int, - session_queue_batch_id: str, - graph_execution_state: GraphExecutionState, - workflow: Optional[WorkflowWithoutID] = None, - invoke_all: bool = False, - ) -> Optional[str]: - """Determines the next node to invoke and enqueues it, preparing if needed. - Returns the id of the queued node, or `None` if there are no nodes left to enqueue.""" - - # Get the next invocation - invocation = graph_execution_state.next() - if not invocation: - return None - - # Save the execution state - self.services.graph_execution_manager.set(graph_execution_state) - - # Queue the invocation - self.services.queue.put( - InvocationQueueItem( - session_queue_id=session_queue_id, - session_queue_item_id=session_queue_item_id, - session_queue_batch_id=session_queue_batch_id, - graph_execution_state_id=graph_execution_state.id, - invocation_id=invocation.id, - workflow=workflow, - invoke_all=invoke_all, - ) - ) - - return invocation.id - - def create_execution_state(self, graph: Optional[Graph] = None) -> GraphExecutionState: - """Creates a new execution state for the given graph""" - new_state = GraphExecutionState(graph=Graph() if graph is None else graph) - self.services.graph_execution_manager.set(new_state) - return new_state - - def cancel(self, graph_execution_state_id: str) -> None: - """Cancels the given execution state""" - self.services.queue.cancel(graph_execution_state_id) - def __start_service(self, service) -> None: # Call start() method on any services that have it start_op = getattr(service, "start", None) @@ -85,5 +35,3 @@ class Invoker: # First stop all services for service in vars(self.services): self.__stop_service(getattr(self.services, service)) - - self.services.queue.put(None) diff --git a/invokeai/app/services/item_storage/item_storage_base.py b/invokeai/app/services/item_storage/item_storage_base.py index c93edf5188..ef227ba241 100644 --- a/invokeai/app/services/item_storage/item_storage_base.py +++ b/invokeai/app/services/item_storage/item_storage_base.py @@ -30,7 +30,7 @@ class ItemStorageABC(ABC, Generic[T]): @abstractmethod def set(self, item: T) -> None: """ - Sets the item. The id will be extracted based on id_field. + Sets the item. :param item: the item to set """ pass diff --git a/invokeai/app/services/latents_storage/latents_storage_base.py b/invokeai/app/services/latents_storage/latents_storage_base.py deleted file mode 100644 index 9fa42b0ae6..0000000000 --- a/invokeai/app/services/latents_storage/latents_storage_base.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) - -from abc import ABC, abstractmethod -from typing import Callable - -import torch - - -class LatentsStorageBase(ABC): - """Responsible for storing and retrieving latents.""" - - _on_changed_callbacks: list[Callable[[torch.Tensor], None]] - _on_deleted_callbacks: list[Callable[[str], None]] - - def __init__(self) -> None: - self._on_changed_callbacks = [] - self._on_deleted_callbacks = [] - - @abstractmethod - def get(self, name: str) -> torch.Tensor: - pass - - @abstractmethod - def save(self, name: str, data: torch.Tensor) -> None: - pass - - @abstractmethod - def delete(self, name: str) -> None: - pass - - def on_changed(self, on_changed: Callable[[torch.Tensor], None]) -> None: - """Register a callback for when an item is changed""" - self._on_changed_callbacks.append(on_changed) - - def on_deleted(self, on_deleted: Callable[[str], None]) -> None: - """Register a callback for when an item is deleted""" - self._on_deleted_callbacks.append(on_deleted) - - def _on_changed(self, item: torch.Tensor) -> None: - for callback in self._on_changed_callbacks: - callback(item) - - def _on_deleted(self, item_id: str) -> None: - for callback in self._on_deleted_callbacks: - callback(item_id) diff --git a/invokeai/app/services/latents_storage/latents_storage_disk.py b/invokeai/app/services/latents_storage/latents_storage_disk.py deleted file mode 100644 index 9192b9147f..0000000000 --- a/invokeai/app/services/latents_storage/latents_storage_disk.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) - -from pathlib import Path -from typing import Union - -import torch - -from invokeai.app.services.invoker import Invoker - -from .latents_storage_base import LatentsStorageBase - - -class DiskLatentsStorage(LatentsStorageBase): - """Stores latents in a folder on disk without caching""" - - __output_folder: Path - - def __init__(self, output_folder: Union[str, Path]): - self.__output_folder = output_folder if isinstance(output_folder, Path) else Path(output_folder) - self.__output_folder.mkdir(parents=True, exist_ok=True) - - def start(self, invoker: Invoker) -> None: - self._invoker = invoker - self._delete_all_latents() - - def get(self, name: str) -> torch.Tensor: - latent_path = self.get_path(name) - return torch.load(latent_path) - - def save(self, name: str, data: torch.Tensor) -> None: - self.__output_folder.mkdir(parents=True, exist_ok=True) - latent_path = self.get_path(name) - torch.save(data, latent_path) - - def delete(self, name: str) -> None: - latent_path = self.get_path(name) - latent_path.unlink() - - def get_path(self, name: str) -> Path: - return self.__output_folder / name - - def _delete_all_latents(self) -> None: - """ - Deletes all latents from disk. - Must be called after we have access to `self._invoker` (e.g. in `start()`). - """ - deleted_latents_count = 0 - freed_space = 0 - for latents_file in Path(self.__output_folder).glob("*"): - if latents_file.is_file(): - freed_space += latents_file.stat().st_size - deleted_latents_count += 1 - latents_file.unlink() - if deleted_latents_count > 0: - freed_space_in_mb = round(freed_space / 1024 / 1024, 2) - self._invoker.services.logger.info( - f"Deleted {deleted_latents_count} latents files (freed {freed_space_in_mb}MB)" - ) diff --git a/invokeai/app/services/latents_storage/latents_storage_forward_cache.py b/invokeai/app/services/latents_storage/latents_storage_forward_cache.py deleted file mode 100644 index 6232b76a27..0000000000 --- a/invokeai/app/services/latents_storage/latents_storage_forward_cache.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) - -from queue import Queue -from typing import Dict, Optional - -import torch - -from invokeai.app.services.invoker import Invoker - -from .latents_storage_base import LatentsStorageBase - - -class ForwardCacheLatentsStorage(LatentsStorageBase): - """Caches the latest N latents in memory, writing-thorugh to and reading from underlying storage""" - - __cache: Dict[str, torch.Tensor] - __cache_ids: Queue - __max_cache_size: int - __underlying_storage: LatentsStorageBase - - def __init__(self, underlying_storage: LatentsStorageBase, max_cache_size: int = 20): - super().__init__() - self.__underlying_storage = underlying_storage - self.__cache = {} - self.__cache_ids = Queue() - self.__max_cache_size = max_cache_size - - def start(self, invoker: Invoker) -> None: - self._invoker = invoker - start_op = getattr(self.__underlying_storage, "start", None) - if callable(start_op): - start_op(invoker) - - def stop(self, invoker: Invoker) -> None: - self._invoker = invoker - stop_op = getattr(self.__underlying_storage, "stop", None) - if callable(stop_op): - stop_op(invoker) - - def get(self, name: str) -> torch.Tensor: - cache_item = self.__get_cache(name) - if cache_item is not None: - return cache_item - - latent = self.__underlying_storage.get(name) - self.__set_cache(name, latent) - return latent - - def save(self, name: str, data: torch.Tensor) -> None: - self.__underlying_storage.save(name, data) - self.__set_cache(name, data) - self._on_changed(data) - - def delete(self, name: str) -> None: - self.__underlying_storage.delete(name) - if name in self.__cache: - del self.__cache[name] - self._on_deleted(name) - - def __get_cache(self, name: str) -> Optional[torch.Tensor]: - return None if name not in self.__cache else self.__cache[name] - - def __set_cache(self, name: str, data: torch.Tensor): - if name not in self.__cache: - self.__cache[name] = data - self.__cache_ids.put(name) - if self.__cache_ids.qsize() > self.__max_cache_size: - self.__cache.pop(self.__cache_ids.get()) diff --git a/invokeai/app/services/model_install/model_install_base.py b/invokeai/app/services/model_install/model_install_base.py index 635cb154d6..4f2cdaed8e 100644 --- a/invokeai/app/services/model_install/model_install_base.py +++ b/invokeai/app/services/model_install/model_install_base.py @@ -14,11 +14,13 @@ from typing_extensions import Annotated from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase -from invokeai.app.services.events import EventServiceBase +from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.invoker import Invoker from invokeai.app.services.model_records import ModelRecordServiceBase from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant -from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore +from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata + +from ..model_metadata import ModelMetadataStoreBase class InstallStatus(str, Enum): @@ -26,6 +28,7 @@ class InstallStatus(str, Enum): WAITING = "waiting" # waiting to be dequeued DOWNLOADING = "downloading" # downloading of model files in process + DOWNLOADS_DONE = "downloads_done" # downloading done, waiting to run RUNNING = "running" # being processed COMPLETED = "completed" # finished running ERROR = "error" # terminated with an error message @@ -127,8 +130,8 @@ class HFModelSource(StringLikeSource): def __str__(self) -> str: """Return string version of repoid when string rep needed.""" base: str = self.repo_id + base += f":{self.variant or ''}" base += f":{self.subfolder}" if self.subfolder else "" - base += f" ({self.variant})" if self.variant else "" return base @@ -154,6 +157,7 @@ class ModelInstallJob(BaseModel): id: int = Field(description="Unique ID for this job") status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process") + error_reason: Optional[str] = Field(default=None, description="Information about why the job failed") config_in: Dict[str, Any] = Field( default_factory=dict, description="Configuration information (e.g. 'description') to apply to model." ) @@ -175,6 +179,12 @@ class ModelInstallJob(BaseModel): download_parts: Set[DownloadJob] = Field( default_factory=set, description="Download jobs contributing to this install" ) + error: Optional[str] = Field( + default=None, description="On an error condition, this field will contain the text of the exception" + ) + error_traceback: Optional[str] = Field( + default=None, description="On an error condition, this field will contain the exception traceback" + ) # internal flags and transitory settings _install_tmpdir: Optional[Path] = PrivateAttr(default=None) _exception: Optional[Exception] = PrivateAttr(default=None) @@ -182,7 +192,10 @@ class ModelInstallJob(BaseModel): def set_error(self, e: Exception) -> None: """Record the error and traceback from an exception.""" self._exception = e + self.error = str(e) + self.error_traceback = self._format_error(e) self.status = InstallStatus.ERROR + self.error_reason = self._exception.__class__.__name__ if self._exception else None def cancel(self) -> None: """Call to cancel the job.""" @@ -193,10 +206,9 @@ class ModelInstallJob(BaseModel): """Class name of the exception that led to status==ERROR.""" return self._exception.__class__.__name__ if self._exception else None - @property - def error(self) -> Optional[str]: + def _format_error(self, exception: Exception) -> str: """Error traceback.""" - return "".join(traceback.format_exception(self._exception)) if self._exception else None + return "".join(traceback.format_exception(exception)) @property def cancelled(self) -> bool: @@ -218,6 +230,11 @@ class ModelInstallJob(BaseModel): """Return true if job is downloading.""" return self.status == InstallStatus.DOWNLOADING + @property + def downloads_done(self) -> bool: + """Return true if job's downloads ae done.""" + return self.status == InstallStatus.DOWNLOADS_DONE + @property def running(self) -> bool: """Return true if job is running.""" @@ -243,7 +260,7 @@ class ModelInstallServiceBase(ABC): app_config: InvokeAIAppConfig, record_store: ModelRecordServiceBase, download_queue: DownloadQueueServiceBase, - metadata_store: ModelMetadataStore, + metadata_store: ModelMetadataStoreBase, event_bus: Optional["EventServiceBase"] = None, ): """ @@ -324,6 +341,43 @@ class ModelInstallServiceBase(ABC): :returns id: The string ID of the registered model. """ + @abstractmethod + def heuristic_import( + self, + source: str, + config: Optional[Dict[str, Any]] = None, + access_token: Optional[str] = None, + ) -> ModelInstallJob: + r"""Install the indicated model using heuristics to interpret user intentions. + + :param source: String source + :param config: Optional dict. Any fields in this dict + will override corresponding autoassigned probe fields in the + model's config record as described in `import_model()`. + :param access_token: Optional access token for remote sources. + + The source can be: + 1. A local file path in posix() format (`/foo/bar` or `C:\foo\bar`) + 2. An http or https URL (`https://foo.bar/foo`) + 3. A HuggingFace repo_id (`foo/bar`, `foo/bar:fp16`, `foo/bar:fp16:vae`) + + We extend the HuggingFace repo_id syntax to include the variant and the + subfolder or path. The following are acceptable alternatives: + stabilityai/stable-diffusion-v4 + stabilityai/stable-diffusion-v4:fp16 + stabilityai/stable-diffusion-v4:fp16:vae + stabilityai/stable-diffusion-v4::/checkpoints/sd4.safetensors + stabilityai/stable-diffusion-v4:onnx:vae + + Because a local file path can look like a huggingface repo_id, the logic + first checks whether the path exists on disk, and if not, it is treated as + a parseable huggingface repo. + + The previous support for recursing into a local folder and loading all model-like files + has been removed. + """ + pass + @abstractmethod def import_model( self, @@ -385,6 +439,18 @@ class ModelInstallServiceBase(ABC): def cancel_job(self, job: ModelInstallJob) -> None: """Cancel the indicated job.""" + @abstractmethod + def wait_for_job(self, job: ModelInstallJob, timeout: int = 0) -> ModelInstallJob: + """Wait for the indicated job to reach a terminal state. + + This will block until the indicated install job has completed, + been cancelled, or errored out. + + :param job: The job to wait on. + :param timeout: Wait up to indicated number of seconds. Raise a TimeoutError if + the job hasn't completed within the indicated time. + """ + @abstractmethod def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]: """ @@ -394,7 +460,8 @@ class ModelInstallServiceBase(ABC): completed, been cancelled, or errored out. :param timeout: Wait up to indicated number of seconds. Raise an Exception('timeout') if - installs do not complete within the indicated time. + installs do not complete within the indicated time. A timeout of zero (the default) + will block indefinitely until the installs complete. """ @abstractmethod @@ -410,3 +477,22 @@ class ModelInstallServiceBase(ABC): @abstractmethod def sync_to_config(self) -> None: """Synchronize models on disk to those in the model record database.""" + + @abstractmethod + def download_and_cache(self, source: Union[str, AnyHttpUrl], access_token: Optional[str] = None) -> Path: + """ + Download the model file located at source to the models cache and return its Path. + + :param source: A Url or a string that can be converted into one. + :param access_token: Optional access token to access restricted resources. + + The model file will be downloaded into the system-wide model cache + (`models/.cache`) if it isn't already there. Note that the model cache + is periodically cleared of infrequently-used entries when the model + converter runs. + + Note that this doesn't automaticallly install or register the model, but is + intended for use by nodes that need access to models that aren't directly + supported by InvokeAI. The downloading process takes advantage of the download queue + to avoid interrupting other operations. + """ diff --git a/invokeai/app/services/model_install/model_install_default.py b/invokeai/app/services/model_install/model_install_default.py index 82c667f584..f522282fee 100644 --- a/invokeai/app/services/model_install/model_install_default.py +++ b/invokeai/app/services/model_install/model_install_default.py @@ -17,10 +17,10 @@ from pydantic.networks import AnyHttpUrl from requests import Session from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase +from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase, TqdmProgress from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.invoker import Invoker -from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase, ModelRecordServiceSQL +from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase from invokeai.backend.model_manager.config import ( AnyModelConfig, BaseModelType, @@ -28,12 +28,10 @@ from invokeai.backend.model_manager.config import ( ModelRepoVariant, ModelType, ) -from invokeai.backend.model_manager.hash import FastModelHash from invokeai.backend.model_manager.metadata import ( AnyModelRepoMetadata, CivitaiMetadataFetch, HuggingFaceMetadataFetch, - ModelMetadataStore, ModelMetadataWithFiles, RemoteModelFile, ) @@ -50,6 +48,7 @@ from .model_install_base import ( ModelInstallJob, ModelInstallServiceBase, ModelSource, + StringLikeSource, URLModelSource, ) @@ -64,7 +63,6 @@ class ModelInstallService(ModelInstallServiceBase): app_config: InvokeAIAppConfig, record_store: ModelRecordServiceBase, download_queue: DownloadQueueServiceBase, - metadata_store: Optional[ModelMetadataStore] = None, event_bus: Optional[EventServiceBase] = None, session: Optional[Session] = None, ): @@ -86,19 +84,13 @@ class ModelInstallService(ModelInstallServiceBase): self._lock = threading.Lock() self._stop_event = threading.Event() self._downloads_changed_event = threading.Event() + self._install_completed_event = threading.Event() self._download_queue = download_queue self._download_cache: Dict[AnyHttpUrl, ModelInstallJob] = {} self._running = False self._session = session self._next_job_id = 0 - # There may not necessarily be a metadata store initialized - # so we create one and initialize it with the same sql database - # used by the record store service. - if metadata_store: - self._metadata_store = metadata_store - else: - assert isinstance(record_store, ModelRecordServiceSQL) - self._metadata_store = ModelMetadataStore(record_store.db) + self._metadata_store = record_store.metadata_store # for convenience @property def app_config(self) -> InvokeAIAppConfig: # noqa D102 @@ -145,7 +137,7 @@ class ModelInstallService(ModelInstallServiceBase): ) -> str: # noqa D102 model_path = Path(model_path) config = config or {} - if config.get("source") is None: + if not config.get("source"): config["source"] = model_path.resolve().as_posix() return self._register(model_path, config) @@ -156,20 +148,24 @@ class ModelInstallService(ModelInstallServiceBase): ) -> str: # noqa D102 model_path = Path(model_path) config = config or {} - if config.get("source") is None: + if not config.get("source"): config["source"] = model_path.resolve().as_posix() + config["key"] = config.get("key", self._create_key()) info: AnyModelConfig = self._probe_model(Path(model_path), config) - old_hash = info.original_hash - dest_path = self.app_config.models_path / info.base.value / info.type.value / model_path.name + + if preferred_name := config.get("name"): + preferred_name = Path(preferred_name).with_suffix(model_path.suffix) + + dest_path = ( + self.app_config.models_path / info.base.value / info.type.value / (preferred_name or model_path.name) + ) try: new_path = self._copy_model(model_path, dest_path) except FileExistsError as excp: raise DuplicateModelException( f"A model named {model_path.name} is already installed at {dest_path.as_posix()}" ) from excp - new_hash = FastModelHash.hash(new_path) - assert new_hash == old_hash, f"{model_path}: Model hash changed during installation, possibly corrupted." return self._register( new_path, @@ -177,7 +173,40 @@ class ModelInstallService(ModelInstallServiceBase): info, ) + def heuristic_import( + self, + source: str, + config: Optional[Dict[str, Any]] = None, + access_token: Optional[str] = None, + ) -> ModelInstallJob: + variants = "|".join(ModelRepoVariant.__members__.values()) + hf_repoid_re = f"^([^/:]+/[^/:]+)(?::({variants})?(?::/?([^:]+))?)?$" + source_obj: Optional[StringLikeSource] = None + + if Path(source).exists(): # A local file or directory + source_obj = LocalModelSource(path=Path(source)) + elif match := re.match(hf_repoid_re, source): + source_obj = HFModelSource( + repo_id=match.group(1), + variant=match.group(2) if match.group(2) else None, # pass None rather than '' + subfolder=Path(match.group(3)) if match.group(3) else None, + access_token=access_token, + ) + elif re.match(r"^https?://[^/]+", source): + source_obj = URLModelSource( + url=AnyHttpUrl(source), + access_token=access_token, + ) + else: + raise ValueError(f"Unsupported model source: '{source}'") + return self.import_model(source_obj, config) + def import_model(self, source: ModelSource, config: Optional[Dict[str, Any]] = None) -> ModelInstallJob: # noqa D102 + similar_jobs = [x for x in self.list_jobs() if x.source == source and not x.in_terminal_state] + if similar_jobs: + self._logger.warning(f"There is already an active install job for {source}. Not enqueuing.") + return similar_jobs[0] + if isinstance(source, LocalModelSource): install_job = self._import_local_model(source, config) self._install_queue.put(install_job) # synchronously install @@ -207,14 +236,25 @@ class ModelInstallService(ModelInstallServiceBase): assert isinstance(jobs[0], ModelInstallJob) return jobs[0] + def wait_for_job(self, job: ModelInstallJob, timeout: int = 0) -> ModelInstallJob: + """Block until the indicated job has reached terminal state, or when timeout limit reached.""" + start = time.time() + while not job.in_terminal_state: + if self._install_completed_event.wait(timeout=5): # in case we miss an event + self._install_completed_event.clear() + if timeout > 0 and time.time() - start > timeout: + raise TimeoutError("Timeout exceeded") + return job + + # TODO: Better name? Maybe wait_for_jobs()? Maybe too easily confused with above def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]: # noqa D102 """Block until all installation jobs are done.""" start = time.time() while len(self._download_cache) > 0: - if self._downloads_changed_event.wait(timeout=5): # in case we miss an event + if self._downloads_changed_event.wait(timeout=0.25): # in case we miss an event self._downloads_changed_event.clear() if timeout > 0 and time.time() - start > timeout: - raise Exception("Timeout exceeded") + raise TimeoutError("Timeout exceeded") self._install_queue.join() return self._install_jobs @@ -239,9 +279,9 @@ class ModelInstallService(ModelInstallServiceBase): self._logger.info("Model installer (re)initialized") def scan_directory(self, scan_dir: Path, install: bool = False) -> List[str]: # noqa D102 - self._cached_model_paths = {Path(x.path) for x in self.record_store.all_models()} + self._cached_model_paths = {Path(x.path).absolute() for x in self.record_store.all_models()} callback = self._scan_install if install else self._scan_register - search = ModelSearch(on_model_found=callback) + search = ModelSearch(on_model_found=callback, config=self._app_config) self._models_installed.clear() search.search(scan_dir) return list(self._models_installed) @@ -268,6 +308,38 @@ class ModelInstallService(ModelInstallServiceBase): path.unlink() self.unregister(key) + def download_and_cache( + self, + source: Union[str, AnyHttpUrl], + access_token: Optional[str] = None, + timeout: int = 0, + ) -> Path: + """Download the model file located at source to the models cache and return its Path.""" + model_hash = sha256(str(source).encode("utf-8")).hexdigest()[0:32] + model_path = self._app_config.models_convert_cache_path / model_hash + + # We expect the cache directory to contain one and only one downloaded file. + # We don't know the file's name in advance, as it is set by the download + # content-disposition header. + if model_path.exists(): + contents = [x for x in model_path.iterdir() if x.is_file()] + if len(contents) > 0: + return contents[0] + + model_path.mkdir(parents=True, exist_ok=True) + job = self._download_queue.download( + source=AnyHttpUrl(str(source)), + dest=model_path, + access_token=access_token, + on_progress=TqdmProgress().update, + ) + self._download_queue.wait_for_job(job, timeout) + if job.complete: + assert job.download_path is not None + return job.download_path + else: + raise Exception(job.error) + # -------------------------------------------------------------------------------------------- # Internal functions that manage the installer threads # -------------------------------------------------------------------------------------------- @@ -295,11 +367,12 @@ class ModelInstallService(ModelInstallServiceBase): self._signal_job_errored(job) elif ( - job.waiting or job.downloading + job.waiting or job.downloads_done ): # local jobs will be in waiting state, remote jobs will be downloading state job.total_bytes = self._stat_size(job.local_path) job.bytes = job.total_bytes self._signal_job_running(job) + job.config_in["source"] = str(job.source) if job.inplace: key = self.register_path(job.local_path, job.config_in) else: @@ -330,6 +403,7 @@ class ModelInstallService(ModelInstallServiceBase): # if this is an install of a remote file, then clean up the temporary directory if job._install_tmpdir is not None: rmtree(job._install_tmpdir) + self._install_completed_event.set() self._install_queue.task_done() self._logger.info("Install thread exiting") @@ -371,7 +445,7 @@ class ModelInstallService(ModelInstallServiceBase): installed.update(self.scan_directory(models_dir)) self._logger.info(f"{len(installed)} new models registered; {len(defunct_models)} unregistered") - def _sync_model_path(self, key: str, ignore_hash_change: bool = False) -> AnyModelConfig: + def _sync_model_path(self, key: str) -> AnyModelConfig: """ Move model into the location indicated by its basetype, type and name. @@ -392,14 +466,7 @@ class ModelInstallService(ModelInstallServiceBase): new_path = models_dir / model.base.value / model.type.value / model.name self._logger.info(f"Moving {model.name} to {new_path}.") new_path = self._move_model(old_path, new_path) - new_hash = FastModelHash.hash(new_path) model.path = new_path.relative_to(models_dir).as_posix() - if model.current_hash != new_hash: - assert ( - ignore_hash_change - ), f"{model.name}: Model hash changed during installation, model is possibly corrupted" - model.current_hash = new_hash - self._logger.info(f"Model has new hash {model.current_hash}, but will continue to be identified by {key}") self.record_store.update_model(key, model) return model @@ -465,8 +532,10 @@ class ModelInstallService(ModelInstallServiceBase): def _register( self, model_path: Path, config: Optional[Dict[str, Any]] = None, info: Optional[AnyModelConfig] = None ) -> str: + # Note that we may be passed a pre-populated AnyModelConfig object, + # in which case the key field should have been populated by the caller (e.g. in `install_path`). + config["key"] = config.get("key", self._create_key()) info = info or ModelProbe.probe(model_path, config) - key = self._create_key() model_path = model_path.absolute() if model_path.is_relative_to(self.app_config.models_path): @@ -479,8 +548,8 @@ class ModelInstallService(ModelInstallServiceBase): # make config relative to our root legacy_conf = (self.app_config.root_dir / self.app_config.legacy_conf_dir / info.config).resolve() info.config = legacy_conf.relative_to(self.app_config.root_dir).as_posix() - self.record_store.add_model(key, info) - return key + self.record_store.add_model(info.key, info) + return info.key def _next_id(self) -> int: with self._lock: @@ -489,10 +558,10 @@ class ModelInstallService(ModelInstallServiceBase): return id @staticmethod - def _guess_variant() -> ModelRepoVariant: + def _guess_variant() -> Optional[ModelRepoVariant]: """Guess the best HuggingFace variant type to download.""" precision = choose_precision(choose_torch_device()) - return ModelRepoVariant.FP16 if precision == "float16" else ModelRepoVariant.DEFAULT + return ModelRepoVariant.FP16 if precision == "float16" else None def _import_local_model(self, source: LocalModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob: return ModelInstallJob( @@ -517,7 +586,7 @@ class ModelInstallService(ModelInstallServiceBase): if not source.access_token: self._logger.info("No HuggingFace access token present; some models may not be downloadable.") - metadata = HuggingFaceMetadataFetch(self._session).from_id(source.repo_id) + metadata = HuggingFaceMetadataFetch(self._session).from_id(source.repo_id, source.variant) assert isinstance(metadata, ModelMetadataWithFiles) remote_files = metadata.download_urls( variant=source.variant or self._guess_variant(), @@ -565,6 +634,8 @@ class ModelInstallService(ModelInstallServiceBase): # TODO: Replace with tempfile.tmpdir() when multithreading is cleaned up. # Currently the tmpdir isn't automatically removed at exit because it is # being held in a daemon thread. + if len(remote_files) == 0: + raise ValueError(f"{source}: No downloadable files found") tmpdir = Path( mkdtemp( dir=self._app_config.models_path, @@ -580,6 +651,16 @@ class ModelInstallService(ModelInstallServiceBase): bytes=0, total_bytes=0, ) + # In the event that there is a subfolder specified in the source, + # we need to remove it from the destination path in order to avoid + # creating unwanted subfolders + if hasattr(source, "subfolder") and source.subfolder: + root = Path(remote_files[0].path.parts[0]) + subfolder = root / source.subfolder + else: + root = Path(".") + subfolder = Path(".") + # we remember the path up to the top of the tmpdir so that it may be # removed safely at the end of the install process. install_job._install_tmpdir = tmpdir @@ -589,7 +670,7 @@ class ModelInstallService(ModelInstallServiceBase): self._logger.debug(f"remote_files={remote_files}") for model_file in remote_files: url = model_file.url - path = model_file.path + path = root / model_file.path.relative_to(subfolder) self._logger.info(f"Downloading {url} => {path}") install_job.total_bytes += model_file.size assert hasattr(source, "access_token") @@ -652,13 +733,14 @@ class ModelInstallService(ModelInstallServiceBase): self._signal_job_downloading(install_job) def _download_complete_callback(self, download_job: DownloadJob) -> None: + self._logger.info(f"{download_job.source}: model download complete") with self._lock: install_job = self._download_cache[download_job.source] self._download_cache.pop(download_job.source, None) # are there any more active jobs left in this task? - if all(x.complete for x in install_job.download_parts): - # now enqueue job for actual installation into the models directory + if install_job.downloading and all(x.complete for x in install_job.download_parts): + install_job.status = InstallStatus.DOWNLOADS_DONE self._install_queue.put(install_job) # Let other threads know that the number of downloads has changed @@ -684,7 +766,7 @@ class ModelInstallService(ModelInstallServiceBase): if not install_job: return self._downloads_changed_event.set() - self._logger.warning(f"Download {download_job.source} cancelled.") + self._logger.warning(f"{download_job.source}: model download cancelled") # if install job has already registered an error, then do not replace its status with cancelled if not install_job.errored: install_job.cancel() @@ -731,6 +813,7 @@ class ModelInstallService(ModelInstallServiceBase): parts=parts, bytes=job.bytes, total_bytes=job.total_bytes, + id=job.id, ) def _signal_job_completed(self, job: ModelInstallJob) -> None: @@ -743,7 +826,7 @@ class ModelInstallService(ModelInstallServiceBase): assert job.local_path is not None assert job.config_out is not None key = job.config_out.key - self._event_bus.emit_model_install_completed(str(job.source), key) + self._event_bus.emit_model_install_completed(str(job.source), key, id=job.id) def _signal_job_errored(self, job: ModelInstallJob) -> None: self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}\n{job.error}") @@ -752,7 +835,7 @@ class ModelInstallService(ModelInstallServiceBase): error = job.error assert error_type is not None assert error is not None - self._event_bus.emit_model_install_error(str(job.source), error_type, error) + self._event_bus.emit_model_install_error(str(job.source), error_type, error, id=job.id) def _signal_job_cancelled(self, job: ModelInstallJob) -> None: self._logger.info(f"{job.source}: model installation was cancelled") diff --git a/invokeai/app/services/model_load/__init__.py b/invokeai/app/services/model_load/__init__.py new file mode 100644 index 0000000000..b4a86e9348 --- /dev/null +++ b/invokeai/app/services/model_load/__init__.py @@ -0,0 +1,6 @@ +"""Initialization file for model load service module.""" + +from .model_load_base import ModelLoadServiceBase +from .model_load_default import ModelLoadService + +__all__ = ["ModelLoadServiceBase", "ModelLoadService"] diff --git a/invokeai/app/services/model_load/model_load_base.py b/invokeai/app/services/model_load/model_load_base.py new file mode 100644 index 0000000000..cc80333e93 --- /dev/null +++ b/invokeai/app/services/model_load/model_load_base.py @@ -0,0 +1,40 @@ +# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Team +"""Base class for model loader.""" + +from abc import ABC, abstractmethod +from typing import Optional + +from invokeai.app.services.shared.invocation_context import InvocationContextData +from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType +from invokeai.backend.model_manager.load import LoadedModel +from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase + + +class ModelLoadServiceBase(ABC): + """Wrapper around AnyModelLoader.""" + + @abstractmethod + def load_model( + self, + model_config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: + """ + Given a model's configuration, load it and return the LoadedModel object. + + :param model_config: Model configuration record (as returned by ModelRecordBase.get_model()) + :param submodel: For main (pipeline models), the submodel to fetch. + :param context_data: Invocation context data used for event reporting + """ + + @property + @abstractmethod + def ram_cache(self) -> ModelCacheBase[AnyModel]: + """Return the RAM cache used by this loader.""" + + @property + @abstractmethod + def convert_cache(self) -> ModelConvertCacheBase: + """Return the checkpoint convert cache used by this loader.""" diff --git a/invokeai/app/services/model_load/model_load_default.py b/invokeai/app/services/model_load/model_load_default.py new file mode 100644 index 0000000000..3ff7898c0e --- /dev/null +++ b/invokeai/app/services/model_load/model_load_default.py @@ -0,0 +1,113 @@ +# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Team +"""Implementation of model loader service.""" + +from typing import Optional, Type + +from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.shared.invocation_context import InvocationContextData +from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType +from invokeai.backend.model_manager.load import ( + LoadedModel, + ModelLoaderRegistry, + ModelLoaderRegistryBase, +) +from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase +from invokeai.backend.util.logging import InvokeAILogger + +from .model_load_base import ModelLoadServiceBase + + +class ModelLoadService(ModelLoadServiceBase): + """Wrapper around ModelLoaderRegistry.""" + + def __init__( + self, + app_config: InvokeAIAppConfig, + ram_cache: ModelCacheBase[AnyModel], + convert_cache: ModelConvertCacheBase, + registry: Optional[Type[ModelLoaderRegistryBase]] = ModelLoaderRegistry, + ): + """Initialize the model load service.""" + logger = InvokeAILogger.get_logger(self.__class__.__name__) + logger.setLevel(app_config.log_level.upper()) + self._logger = logger + self._app_config = app_config + self._ram_cache = ram_cache + self._convert_cache = convert_cache + self._registry = registry + + def start(self, invoker: Invoker) -> None: + self._invoker = invoker + + @property + def ram_cache(self) -> ModelCacheBase[AnyModel]: + """Return the RAM cache used by this loader.""" + return self._ram_cache + + @property + def convert_cache(self) -> ModelConvertCacheBase: + """Return the checkpoint convert cache used by this loader.""" + return self._convert_cache + + def load_model( + self, + model_config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: + """ + Given a model's configuration, load it and return the LoadedModel object. + + :param model_config: Model configuration record (as returned by ModelRecordBase.get_model()) + :param submodel: For main (pipeline models), the submodel to fetch. + :param context: Invocation context used for event reporting + """ + if context_data: + self._emit_load_event( + context_data=context_data, + model_config=model_config, + ) + + implementation, model_config, submodel_type = self._registry.get_implementation(model_config, submodel_type) # type: ignore + loaded_model: LoadedModel = implementation( + app_config=self._app_config, + logger=self._logger, + ram_cache=self._ram_cache, + convert_cache=self._convert_cache, + ).load_model(model_config, submodel_type) + + if context_data: + self._emit_load_event( + context_data=context_data, + model_config=model_config, + loaded=True, + ) + return loaded_model + + def _emit_load_event( + self, + context_data: InvocationContextData, + model_config: AnyModelConfig, + loaded: Optional[bool] = False, + ) -> None: + if not self._invoker: + return + + if not loaded: + self._invoker.services.events.emit_model_load_started( + queue_id=context_data.queue_item.queue_id, + queue_item_id=context_data.queue_item.item_id, + queue_batch_id=context_data.queue_item.batch_id, + graph_execution_state_id=context_data.queue_item.session_id, + model_config=model_config, + ) + else: + self._invoker.services.events.emit_model_load_completed( + queue_id=context_data.queue_item.queue_id, + queue_item_id=context_data.queue_item.item_id, + queue_batch_id=context_data.queue_item.batch_id, + graph_execution_state_id=context_data.queue_item.session_id, + model_config=model_config, + ) diff --git a/invokeai/app/services/model_manager/__init__.py b/invokeai/app/services/model_manager/__init__.py index 3d6a9c248c..5455577266 100644 --- a/invokeai/app/services/model_manager/__init__.py +++ b/invokeai/app/services/model_manager/__init__.py @@ -1 +1,17 @@ -from .model_manager_default import ModelManagerService # noqa F401 +"""Initialization file for model manager service.""" + +from invokeai.backend.model_manager import AnyModel, AnyModelConfig, BaseModelType, ModelType, SubModelType +from invokeai.backend.model_manager.load import LoadedModel + +from .model_manager_default import ModelManagerService, ModelManagerServiceBase + +__all__ = [ + "ModelManagerServiceBase", + "ModelManagerService", + "AnyModel", + "AnyModelConfig", + "BaseModelType", + "ModelType", + "SubModelType", + "LoadedModel", +] diff --git a/invokeai/app/services/model_manager/model_manager_base.py b/invokeai/app/services/model_manager/model_manager_base.py index 4c2fc4c085..6e886df652 100644 --- a/invokeai/app/services/model_manager/model_manager_base.py +++ b/invokeai/app/services/model_manager/model_manager_base.py @@ -1,286 +1,101 @@ # Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Team -from __future__ import annotations - from abc import ABC, abstractmethod -from logging import Logger -from pathlib import Path -from typing import TYPE_CHECKING, Callable, List, Literal, Optional, Tuple, Union +from typing import Optional -from pydantic import Field +import torch +from typing_extensions import Self -from invokeai.app.services.config.config_default import InvokeAIAppConfig -from invokeai.backend.model_management import ( - AddModelResult, - BaseModelType, - MergeInterpolationMethod, - ModelInfo, - ModelType, - SchedulerPredictionType, - SubModelType, -) -from invokeai.backend.model_management.model_cache import CacheStats +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.shared.invocation_context import InvocationContextData +from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType +from invokeai.backend.model_manager.load.load_base import LoadedModel -if TYPE_CHECKING: - from invokeai.app.invocations.baseinvocation import BaseInvocation, InvocationContext +from ..config import InvokeAIAppConfig +from ..download import DownloadQueueServiceBase +from ..events.events_base import EventServiceBase +from ..model_install import ModelInstallServiceBase +from ..model_load import ModelLoadServiceBase +from ..model_records import ModelRecordServiceBase class ModelManagerServiceBase(ABC): - """Responsible for managing models on disk and in memory""" + """Abstract base class for the model manager service.""" + # attributes: + # store: ModelRecordServiceBase = Field(description="An instance of the model record configuration service.") + # install: ModelInstallServiceBase = Field(description="An instance of the model install service.") + # load: ModelLoadServiceBase = Field(description="An instance of the model load service.") + + @classmethod @abstractmethod - def __init__( - self, - config: InvokeAIAppConfig, - logger: Logger, - ): + def build_model_manager( + cls, + app_config: InvokeAIAppConfig, + model_record_service: ModelRecordServiceBase, + download_queue: DownloadQueueServiceBase, + events: EventServiceBase, + execution_device: torch.device, + ) -> Self: """ - Initialize with the path to the models.yaml config file. - Optional parameters are the torch device type, precision, max_models, - and sequential_offload boolean. Note that the default device - type and precision are set up for a CUDA system running at half precision. + Construct the model manager service instance. + + Use it rather than the __init__ constructor. This class + method simplifies the construction considerably. """ pass + @property @abstractmethod - def get_model( + def store(self) -> ModelRecordServiceBase: + """Return the ModelRecordServiceBase used to store and retrieve configuration records.""" + pass + + @property + @abstractmethod + def load(self) -> ModelLoadServiceBase: + """Return the ModelLoadServiceBase used to load models from their configuration records.""" + pass + + @property + @abstractmethod + def install(self) -> ModelInstallServiceBase: + """Return the ModelInstallServiceBase used to download and manipulate model files.""" + pass + + @abstractmethod + def start(self, invoker: Invoker) -> None: + pass + + @abstractmethod + def stop(self, invoker: Invoker) -> None: + pass + + @abstractmethod + def load_model_by_config( + self, + model_config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: + pass + + @abstractmethod + def load_model_by_key( + self, + key: str, + submodel_type: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: + pass + + @abstractmethod + def load_model_by_attr( self, model_name: str, base_model: BaseModelType, model_type: ModelType, submodel: Optional[SubModelType] = None, - node: Optional[BaseInvocation] = None, - context: Optional[InvocationContext] = None, - ) -> ModelInfo: - """Retrieve the indicated model with name and type. - submodel can be used to get a part (such as the vae) - of a diffusers pipeline.""" - pass - - @property - @abstractmethod - def logger(self): - pass - - @abstractmethod - def model_exists( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ) -> bool: - pass - - @abstractmethod - def model_info(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> dict: - """ - Given a model name returns a dict-like (OmegaConf) object describing it. - Uses the exact format as the omegaconf stanza. - """ - pass - - @abstractmethod - def list_models(self, base_model: Optional[BaseModelType] = None, model_type: Optional[ModelType] = None) -> dict: - """ - Return a dict of models in the format: - { model_type1: - { model_name1: {'status': 'active'|'cached'|'not loaded', - 'model_name' : name, - 'model_type' : SDModelType, - 'description': description, - 'format': 'folder'|'safetensors'|'ckpt' - }, - model_name2: { etc } - }, - model_type2: - { model_name_n: etc - } - """ - pass - - @abstractmethod - def list_model(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> dict: - """ - Return information about the model using the same format as list_models() - """ - pass - - @abstractmethod - def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]: - """ - Returns a list of all the model names known. - """ - pass - - @abstractmethod - def add_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - clobber: bool = False, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with an - assertion error if the name already exists. Pass clobber=True to overwrite. - On a successful update, the config will be changed in memory. Will fail - with an assertion error if provided attributes are incorrect or - the model name is missing. Call commit() to write changes to disk. - """ - pass - - @abstractmethod - def update_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with a - ModelNotFoundException if the name does not already exist. - - On a successful update, the config will be changed in memory. Will fail - with an assertion error if provided attributes are incorrect or - the model name is missing. Call commit() to write changes to disk. - """ - pass - - @abstractmethod - def del_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ): - """ - Delete the named model from configuration. If delete_files is true, - then the underlying weight file or diffusers directory will be deleted - as well. Call commit() to write to disk. - """ - pass - - @abstractmethod - def rename_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - new_name: str, - ): - """ - Rename the indicated model. - """ - pass - - @abstractmethod - def list_checkpoint_configs(self) -> List[Path]: - """ - List the checkpoint config paths from ROOT/configs/stable-diffusion. - """ - pass - - @abstractmethod - def convert_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: Literal[ModelType.Main, ModelType.Vae], - ) -> AddModelResult: - """ - Convert a checkpoint file into a diffusers folder, deleting the cached - version and deleting the original checkpoint file if it is in the models - directory. - :param model_name: Name of the model to convert - :param base_model: Base model type - :param model_type: Type of model ['vae' or 'main'] - - This will raise a ValueError unless the model is not a checkpoint. It will - also raise a ValueError in the event that there is a similarly-named diffusers - directory already in place. - """ - pass - - @abstractmethod - def heuristic_import( - self, - items_to_import: set[str], - prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, - ) -> dict[str, AddModelResult]: - """Import a list of paths, repo_ids or URLs. Returns the set of - successfully imported items. - :param items_to_import: Set of strings corresponding to models to be imported. - :param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType. - - The prediction type helper is necessary to distinguish between - models based on Stable Diffusion 2 Base (requiring - SchedulerPredictionType.Epsilson) and Stable Diffusion 768 - (requiring SchedulerPredictionType.VPrediction). It is - generally impossible to do this programmatically, so the - prediction_type_helper usually asks the user to choose. - - The result is a set of successfully installed models. Each element - of the set is a dict corresponding to the newly-created OmegaConf stanza for - that model. - """ - pass - - @abstractmethod - def merge_models( - self, - model_names: List[str] = Field( - default=None, min_length=2, max_length=3, description="List of model names to merge" - ), - base_model: Union[BaseModelType, str] = Field( - default=None, description="Base model shared by all models to be merged" - ), - merged_model_name: str = Field(default=None, description="Name of destination model after merging"), - alpha: Optional[float] = 0.5, - interp: Optional[MergeInterpolationMethod] = None, - force: Optional[bool] = False, - merge_dest_directory: Optional[Path] = None, - ) -> AddModelResult: - """ - Merge two to three diffusrs pipeline models and save as a new model. - :param model_names: List of 2-3 models to merge - :param base_model: Base model to use for all models - :param merged_model_name: Name of destination merged model - :param alpha: Alpha strength to apply to 2d and 3d model - :param interp: Interpolation method. None (default) - :param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended) - """ - pass - - @abstractmethod - def search_for_models(self, directory: Path) -> List[Path]: - """ - Return list of all models found in the designated directory. - """ - pass - - @abstractmethod - def sync_to_config(self): - """ - Re-read models.yaml, rescan the models directory, and reimport models - in the autoimport directories. Call after making changes outside the - model manager API. - """ - pass - - @abstractmethod - def collect_cache_stats(self, cache_stats: CacheStats): - """ - Reset model cache statistics for graph with graph_id. - """ - pass - - @abstractmethod - def commit(self, conf_file: Optional[Path] = None) -> None: - """ - Write current configuration out to the indicated file. - If no conf_file is provided, then replaces the - original file/database used to initialize the object. - """ + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: pass diff --git a/invokeai/app/services/latents_storage/__init__.py b/invokeai/app/services/model_manager/model_manager_common.py similarity index 100% rename from invokeai/app/services/latents_storage/__init__.py rename to invokeai/app/services/model_manager/model_manager_common.py diff --git a/invokeai/app/services/model_manager/model_manager_default.py b/invokeai/app/services/model_manager/model_manager_default.py index cdb3e59a91..7d4b248323 100644 --- a/invokeai/app/services/model_manager/model_manager_default.py +++ b/invokeai/app/services/model_manager/model_manager_default.py @@ -1,413 +1,155 @@ # Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Team +"""Implementation of ModelManagerServiceBase.""" -from __future__ import annotations - -from logging import Logger -from pathlib import Path -from typing import TYPE_CHECKING, Callable, List, Literal, Optional, Tuple, Union +from typing import Optional import torch -from pydantic import Field +from typing_extensions import Self -from invokeai.app.services.config.config_default import InvokeAIAppConfig -from invokeai.app.services.invocation_processor.invocation_processor_common import CanceledException -from invokeai.backend.model_management import ( - AddModelResult, - BaseModelType, - MergeInterpolationMethod, - ModelInfo, - ModelManager, - ModelMerger, - ModelNotFoundException, - ModelType, - SchedulerPredictionType, - SubModelType, -) -from invokeai.backend.model_management.model_cache import CacheStats -from invokeai.backend.model_management.model_search import FindModels -from invokeai.backend.util import choose_precision, choose_torch_device +from invokeai.app.services.invoker import Invoker +from invokeai.app.services.shared.invocation_context import InvocationContextData +from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, LoadedModel, ModelType, SubModelType +from invokeai.backend.model_manager.load import ModelCache, ModelConvertCache, ModelLoaderRegistry +from invokeai.backend.util.devices import choose_torch_device +from invokeai.backend.util.logging import InvokeAILogger +from ..config import InvokeAIAppConfig +from ..download import DownloadQueueServiceBase +from ..events.events_base import EventServiceBase +from ..model_install import ModelInstallService, ModelInstallServiceBase +from ..model_load import ModelLoadService, ModelLoadServiceBase +from ..model_records import ModelRecordServiceBase, UnknownModelException from .model_manager_base import ModelManagerServiceBase -if TYPE_CHECKING: - from invokeai.app.invocations.baseinvocation import InvocationContext - -# simple implementation class ModelManagerService(ModelManagerServiceBase): - """Responsible for managing models on disk and in memory""" + """ + The ModelManagerService handles various aspects of model installation, maintenance and loading. + + It bundles three distinct services: + model_manager.store -- Routines to manage the database of model configuration records. + model_manager.install -- Routines to install, move and delete models. + model_manager.load -- Routines to load models into memory. + """ def __init__( self, - config: InvokeAIAppConfig, - logger: Logger, + store: ModelRecordServiceBase, + install: ModelInstallServiceBase, + load: ModelLoadServiceBase, ): - """ - Initialize with the path to the models.yaml config file. - Optional parameters are the torch device type, precision, max_models, - and sequential_offload boolean. Note that the default device - type and precision are set up for a CUDA system running at half precision. - """ - if config.model_conf_path and config.model_conf_path.exists(): - config_file = config.model_conf_path - else: - config_file = config.root_dir / "configs/models.yaml" - - logger.debug(f"Config file={config_file}") - - device = torch.device(choose_torch_device()) - device_name = torch.cuda.get_device_name() if device == torch.device("cuda") else "" - logger.info(f"GPU device = {device} {device_name}") - - precision = config.precision - if precision == "auto": - precision = choose_precision(device) - dtype = torch.float32 if precision == "float32" else torch.float16 - - # this is transitional backward compatibility - # support for the deprecated `max_loaded_models` - # configuration value. If present, then the - # cache size is set to 2.5 GB times - # the number of max_loaded_models. Otherwise - # use new `ram_cache_size` config setting - max_cache_size = config.ram_cache_size - - logger.debug(f"Maximum RAM cache size: {max_cache_size} GiB") - - sequential_offload = config.sequential_guidance - - self.mgr = ModelManager( - config=config_file, - device_type=device, - precision=dtype, - max_cache_size=max_cache_size, - sequential_offload=sequential_offload, - logger=logger, - ) - logger.info("Model manager service initialized") - - def get_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel: Optional[SubModelType] = None, - context: Optional[InvocationContext] = None, - ) -> ModelInfo: - """ - Retrieve the indicated model. submodel can be used to get a - part (such as the vae) of a diffusers mode. - """ - - # we can emit model loading events if we are executing with access to the invocation context - if context: - self._emit_load_event( - context=context, - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=submodel, - ) - - model_info = self.mgr.get_model( - model_name, - base_model, - model_type, - submodel, - ) - - if context: - self._emit_load_event( - context=context, - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=submodel, - model_info=model_info, - ) - - return model_info - - def model_exists( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ) -> bool: - """ - Given a model name, returns True if it is a valid - identifier. - """ - return self.mgr.model_exists( - model_name, - base_model, - model_type, - ) - - def model_info(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> Union[dict, None]: - """ - Given a model name returns a dict-like (OmegaConf) object describing it. - """ - return self.mgr.model_info(model_name, base_model, model_type) - - def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]: - """ - Returns a list of all the model names known. - """ - return self.mgr.model_names() - - def list_models( - self, base_model: Optional[BaseModelType] = None, model_type: Optional[ModelType] = None - ) -> list[dict]: - """ - Return a list of models. - """ - return self.mgr.list_models(base_model, model_type) - - def list_model(self, model_name: str, base_model: BaseModelType, model_type: ModelType) -> Union[dict, None]: - """ - Return information about the model using the same format as list_models() - """ - return self.mgr.list_model(model_name=model_name, base_model=base_model, model_type=model_type) - - def add_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - clobber: bool = False, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with an - assertion error if the name already exists. Pass clobber=True to overwrite. - On a successful update, the config will be changed in memory. Will fail - with an assertion error if provided attributes are incorrect or - the model name is missing. Call commit() to write changes to disk. - """ - self.logger.debug(f"add/update model {model_name}") - return self.mgr.add_model(model_name, base_model, model_type, model_attributes, clobber) - - def update_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with a - ModelNotFoundException exception if the name does not already exist. - On a successful update, the config will be changed in memory. Will fail - with an assertion error if provided attributes are incorrect or - the model name is missing. Call commit() to write changes to disk. - """ - self.logger.debug(f"update model {model_name}") - if not self.model_exists(model_name, base_model, model_type): - raise ModelNotFoundException(f"Unknown model {model_name}") - return self.add_model(model_name, base_model, model_type, model_attributes, clobber=True) - - def del_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ): - """ - Delete the named model from configuration. If delete_files is true, - then the underlying weight file or diffusers directory will be deleted - as well. - """ - self.logger.debug(f"delete model {model_name}") - self.mgr.del_model(model_name, base_model, model_type) - self.mgr.commit() - - def convert_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: Literal[ModelType.Main, ModelType.Vae], - convert_dest_directory: Optional[Path] = Field( - default=None, description="Optional directory location for merged model" - ), - ) -> AddModelResult: - """ - Convert a checkpoint file into a diffusers folder, deleting the cached - version and deleting the original checkpoint file if it is in the models - directory. - :param model_name: Name of the model to convert - :param base_model: Base model type - :param model_type: Type of model ['vae' or 'main'] - :param convert_dest_directory: Save the converted model to the designated directory (`models/etc/etc` by default) - - This will raise a ValueError unless the model is not a checkpoint. It will - also raise a ValueError in the event that there is a similarly-named diffusers - directory already in place. - """ - self.logger.debug(f"convert model {model_name}") - return self.mgr.convert_model(model_name, base_model, model_type, convert_dest_directory) - - def collect_cache_stats(self, cache_stats: CacheStats): - """ - Reset model cache statistics for graph with graph_id. - """ - self.mgr.cache.stats = cache_stats - - def commit(self, conf_file: Optional[Path] = None): - """ - Write current configuration out to the indicated file. - If no conf_file is provided, then replaces the - original file/database used to initialize the object. - """ - return self.mgr.commit(conf_file) - - def _emit_load_event( - self, - context: InvocationContext, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel: Optional[SubModelType] = None, - model_info: Optional[ModelInfo] = None, - ): - if context.services.queue.is_canceled(context.graph_execution_state_id): - raise CanceledException() - - if model_info: - context.services.events.emit_model_load_completed( - queue_id=context.queue_id, - queue_item_id=context.queue_item_id, - queue_batch_id=context.queue_batch_id, - graph_execution_state_id=context.graph_execution_state_id, - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=submodel, - model_info=model_info, - ) - else: - context.services.events.emit_model_load_started( - queue_id=context.queue_id, - queue_item_id=context.queue_item_id, - queue_batch_id=context.queue_batch_id, - graph_execution_state_id=context.graph_execution_state_id, - model_name=model_name, - base_model=base_model, - model_type=model_type, - submodel=submodel, - ) + self._store = store + self._install = install + self._load = load @property - def logger(self): - return self.mgr.logger + def store(self) -> ModelRecordServiceBase: + return self._store - def heuristic_import( + @property + def install(self) -> ModelInstallServiceBase: + return self._install + + @property + def load(self) -> ModelLoadServiceBase: + return self._load + + def start(self, invoker: Invoker) -> None: + for service in [self._store, self._install, self._load]: + if hasattr(service, "start"): + service.start(invoker) + + def stop(self, invoker: Invoker) -> None: + for service in [self._store, self._install, self._load]: + if hasattr(service, "stop"): + service.stop(invoker) + + def load_model_by_config( self, - items_to_import: set[str], - prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, - ) -> dict[str, AddModelResult]: - """Import a list of paths, repo_ids or URLs. Returns the set of - successfully imported items. - :param items_to_import: Set of strings corresponding to models to be imported. - :param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType. + model_config: AnyModelConfig, + submodel_type: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: + return self.load.load_model(model_config, submodel_type, context_data) - The prediction type helper is necessary to distinguish between - models based on Stable Diffusion 2 Base (requiring - SchedulerPredictionType.Epsilson) and Stable Diffusion 768 - (requiring SchedulerPredictionType.VPrediction). It is - generally impossible to do this programmatically, so the - prediction_type_helper usually asks the user to choose. - - The result is a set of successfully installed models. Each element - of the set is a dict corresponding to the newly-created OmegaConf stanza for - that model. - """ - return self.mgr.heuristic_import(items_to_import, prediction_type_helper) - - def merge_models( + def load_model_by_key( self, - model_names: List[str] = Field( - default=None, min_length=2, max_length=3, description="List of model names to merge" - ), - base_model: Union[BaseModelType, str] = Field( - default=None, description="Base model shared by all models to be merged" - ), - merged_model_name: str = Field(default=None, description="Name of destination model after merging"), - alpha: float = 0.5, - interp: Optional[MergeInterpolationMethod] = None, - force: bool = False, - merge_dest_directory: Optional[Path] = Field( - default=None, description="Optional directory location for merged model" - ), - ) -> AddModelResult: - """ - Merge two to three diffusrs pipeline models and save as a new model. - :param model_names: List of 2-3 models to merge - :param base_model: Base model to use for all models - :param merged_model_name: Name of destination merged model - :param alpha: Alpha strength to apply to 2d and 3d model - :param interp: Interpolation method. None (default) - :param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended) - """ - merger = ModelMerger(self.mgr) - try: - result = merger.merge_diffusion_models_and_save( - model_names=model_names, - base_model=base_model, - merged_model_name=merged_model_name, - alpha=alpha, - interp=interp, - force=force, - merge_dest_directory=merge_dest_directory, - ) - except AssertionError as e: - raise ValueError(e) - return result + key: str, + submodel_type: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: + config = self.store.get_model(key) + return self.load.load_model(config, submodel_type, context_data) - def search_for_models(self, directory: Path) -> List[Path]: - """ - Return list of all models found in the designated directory. - """ - search = FindModels([directory], self.logger) - return search.list_models() - - def sync_to_config(self): - """ - Re-read models.yaml, rescan the models directory, and reimport models - in the autoimport directories. Call after making changes outside the - model manager API. - """ - return self.mgr.sync_to_config() - - def list_checkpoint_configs(self) -> List[Path]: - """ - List the checkpoint config paths from ROOT/configs/stable-diffusion. - """ - config = self.mgr.app_config - conf_path = config.legacy_conf_path - root_path = config.root_path - return [(conf_path / x).relative_to(root_path) for x in conf_path.glob("**/*.yaml")] - - def rename_model( + def load_model_by_attr( self, model_name: str, base_model: BaseModelType, model_type: ModelType, - new_name: Optional[str] = None, - new_base: Optional[BaseModelType] = None, - ): + submodel: Optional[SubModelType] = None, + context_data: Optional[InvocationContextData] = None, + ) -> LoadedModel: """ - Rename the indicated model. Can provide a new name and/or a new base. - :param model_name: Current name of the model - :param base_model: Current base of the model - :param model_type: Model type (can't be changed) - :param new_name: New name for the model - :param new_base: New base for the model + Given a model's attributes, search the database for it, and if found, load and return the LoadedModel object. + + This is provided for API compatability with the get_model() method + in the original model manager. However, note that LoadedModel is + not the same as the original ModelInfo that ws returned. + + :param model_name: Name of to be fetched. + :param base_model: Base model + :param model_type: Type of the model + :param submodel: For main (pipeline models), the submodel to fetch + :param context: The invocation context. + + Exceptions: UnknownModelException -- model with this key not known + NotImplementedException -- a model loader was not provided at initialization time + ValueError -- more than one model matches this combination """ - self.mgr.rename_model( - base_model=base_model, - model_type=model_type, - model_name=model_name, - new_name=new_name, - new_base=new_base, + configs = self.store.search_by_attr(model_name, base_model, model_type) + if len(configs) == 0: + raise UnknownModelException(f"{base_model}/{model_type}/{model_name}: Unknown model") + elif len(configs) > 1: + raise ValueError(f"{base_model}/{model_type}/{model_name}: More than one model matches.") + else: + return self.load.load_model(configs[0], submodel, context_data) + + @classmethod + def build_model_manager( + cls, + app_config: InvokeAIAppConfig, + model_record_service: ModelRecordServiceBase, + download_queue: DownloadQueueServiceBase, + events: EventServiceBase, + execution_device: torch.device = choose_torch_device(), + ) -> Self: + """ + Construct the model manager service instance. + + For simplicity, use this class method rather than the __init__ constructor. + """ + logger = InvokeAILogger.get_logger(cls.__name__) + logger.setLevel(app_config.log_level.upper()) + + ram_cache = ModelCache( + max_cache_size=app_config.ram_cache_size, + max_vram_cache_size=app_config.vram_cache_size, + logger=logger, + execution_device=execution_device, ) + convert_cache = ModelConvertCache( + cache_path=app_config.models_convert_cache_path, max_size=app_config.convert_cache_size + ) + loader = ModelLoadService( + app_config=app_config, + ram_cache=ram_cache, + convert_cache=convert_cache, + registry=ModelLoaderRegistry, + ) + installer = ModelInstallService( + app_config=app_config, + record_store=model_record_service, + download_queue=download_queue, + event_bus=events, + ) + return cls(store=model_record_service, install=installer, load=loader) diff --git a/invokeai/app/services/model_metadata/__init__.py b/invokeai/app/services/model_metadata/__init__.py new file mode 100644 index 0000000000..981c96b709 --- /dev/null +++ b/invokeai/app/services/model_metadata/__init__.py @@ -0,0 +1,9 @@ +"""Init file for ModelMetadataStoreService module.""" + +from .metadata_store_base import ModelMetadataStoreBase +from .metadata_store_sql import ModelMetadataStoreSQL + +__all__ = [ + "ModelMetadataStoreBase", + "ModelMetadataStoreSQL", +] diff --git a/invokeai/app/services/model_metadata/metadata_store_base.py b/invokeai/app/services/model_metadata/metadata_store_base.py new file mode 100644 index 0000000000..e0e4381b09 --- /dev/null +++ b/invokeai/app/services/model_metadata/metadata_store_base.py @@ -0,0 +1,65 @@ +# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team +""" +Storage for Model Metadata +""" + +from abc import ABC, abstractmethod +from typing import List, Set, Tuple + +from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata + + +class ModelMetadataStoreBase(ABC): + """Store, search and fetch model metadata retrieved from remote repositories.""" + + @abstractmethod + def add_metadata(self, model_key: str, metadata: AnyModelRepoMetadata) -> None: + """ + Add a block of repo metadata to a model record. + + The model record config must already exist in the database with the + same key. Otherwise a FOREIGN KEY constraint exception will be raised. + + :param model_key: Existing model key in the `model_config` table + :param metadata: ModelRepoMetadata object to store + """ + + @abstractmethod + def get_metadata(self, model_key: str) -> AnyModelRepoMetadata: + """Retrieve the ModelRepoMetadata corresponding to model key.""" + + @abstractmethod + def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]: # key, metadata + """Dump out all the metadata.""" + + @abstractmethod + def update_metadata(self, model_key: str, metadata: AnyModelRepoMetadata) -> AnyModelRepoMetadata: + """ + Update metadata corresponding to the model with the indicated key. + + :param model_key: Existing model key in the `model_config` table + :param metadata: ModelRepoMetadata object to update + """ + + @abstractmethod + def list_tags(self) -> Set[str]: + """Return all tags in the tags table.""" + + @abstractmethod + def search_by_tag(self, tags: Set[str]) -> Set[str]: + """Return the keys of models containing all of the listed tags.""" + + @abstractmethod + def search_by_author(self, author: str) -> Set[str]: + """Return the keys of models authored by the indicated author.""" + + @abstractmethod + def search_by_name(self, name: str) -> Set[str]: + """ + Return the keys of models with the indicated name. + + Note that this is the name of the model given to it by + the remote source. The user may have changed the local + name. The local name will be located in the model config + record object. + """ diff --git a/invokeai/app/services/model_metadata/metadata_store_sql.py b/invokeai/app/services/model_metadata/metadata_store_sql.py new file mode 100644 index 0000000000..afe9d2c8c6 --- /dev/null +++ b/invokeai/app/services/model_metadata/metadata_store_sql.py @@ -0,0 +1,222 @@ +# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team +""" +SQL Storage for Model Metadata +""" + +import sqlite3 +from typing import List, Optional, Set, Tuple + +from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase +from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, UnknownMetadataException +from invokeai.backend.model_manager.metadata.fetch import ModelMetadataFetchBase + +from .metadata_store_base import ModelMetadataStoreBase + + +class ModelMetadataStoreSQL(ModelMetadataStoreBase): + """Store, search and fetch model metadata retrieved from remote repositories.""" + + def __init__(self, db: SqliteDatabase): + """ + Initialize a new object from preexisting sqlite3 connection and threading lock objects. + + :param conn: sqlite3 connection object + :param lock: threading Lock object + """ + super().__init__() + self._db = db + self._cursor = self._db.conn.cursor() + + def add_metadata(self, model_key: str, metadata: AnyModelRepoMetadata) -> None: + """ + Add a block of repo metadata to a model record. + + The model record config must already exist in the database with the + same key. Otherwise a FOREIGN KEY constraint exception will be raised. + + :param model_key: Existing model key in the `model_config` table + :param metadata: ModelRepoMetadata object to store + """ + json_serialized = metadata.model_dump_json() + with self._db.lock: + try: + self._cursor.execute( + """--sql + INSERT INTO model_metadata( + id, + metadata + ) + VALUES (?,?); + """, + ( + model_key, + json_serialized, + ), + ) + self._update_tags(model_key, metadata.tags) + self._db.conn.commit() + except sqlite3.IntegrityError as excp: # FOREIGN KEY error: the key was not in model_config table + self._db.conn.rollback() + raise UnknownMetadataException from excp + except sqlite3.Error as excp: + self._db.conn.rollback() + raise excp + + def get_metadata(self, model_key: str) -> AnyModelRepoMetadata: + """Retrieve the ModelRepoMetadata corresponding to model key.""" + with self._db.lock: + self._cursor.execute( + """--sql + SELECT metadata FROM model_metadata + WHERE id=?; + """, + (model_key,), + ) + rows = self._cursor.fetchone() + if not rows: + raise UnknownMetadataException("model metadata not found") + return ModelMetadataFetchBase.from_json(rows[0]) + + def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]: # key, metadata + """Dump out all the metadata.""" + with self._db.lock: + self._cursor.execute( + """--sql + SELECT id,metadata FROM model_metadata; + """, + (), + ) + rows = self._cursor.fetchall() + return [(x[0], ModelMetadataFetchBase.from_json(x[1])) for x in rows] + + def update_metadata(self, model_key: str, metadata: AnyModelRepoMetadata) -> AnyModelRepoMetadata: + """ + Update metadata corresponding to the model with the indicated key. + + :param model_key: Existing model key in the `model_config` table + :param metadata: ModelRepoMetadata object to update + """ + json_serialized = metadata.model_dump_json() # turn it into a json string. + with self._db.lock: + try: + self._cursor.execute( + """--sql + UPDATE model_metadata + SET + metadata=? + WHERE id=?; + """, + (json_serialized, model_key), + ) + if self._cursor.rowcount == 0: + raise UnknownMetadataException("model metadata not found") + self._update_tags(model_key, metadata.tags) + self._db.conn.commit() + except sqlite3.Error as e: + self._db.conn.rollback() + raise e + + return self.get_metadata(model_key) + + def list_tags(self) -> Set[str]: + """Return all tags in the tags table.""" + self._cursor.execute( + """--sql + select tag_text from tags; + """ + ) + return {x[0] for x in self._cursor.fetchall()} + + def search_by_tag(self, tags: Set[str]) -> Set[str]: + """Return the keys of models containing all of the listed tags.""" + with self._db.lock: + try: + matches: Optional[Set[str]] = None + for tag in tags: + self._cursor.execute( + """--sql + SELECT a.model_id FROM model_tags AS a, + tags AS b + WHERE a.tag_id=b.tag_id + AND b.tag_text=?; + """, + (tag,), + ) + model_keys = {x[0] for x in self._cursor.fetchall()} + if matches is None: + matches = model_keys + matches = matches.intersection(model_keys) + except sqlite3.Error as e: + raise e + return matches if matches else set() + + def search_by_author(self, author: str) -> Set[str]: + """Return the keys of models authored by the indicated author.""" + self._cursor.execute( + """--sql + SELECT id FROM model_metadata + WHERE author=?; + """, + (author,), + ) + return {x[0] for x in self._cursor.fetchall()} + + def search_by_name(self, name: str) -> Set[str]: + """ + Return the keys of models with the indicated name. + + Note that this is the name of the model given to it by + the remote source. The user may have changed the local + name. The local name will be located in the model config + record object. + """ + self._cursor.execute( + """--sql + SELECT id FROM model_metadata + WHERE name=?; + """, + (name,), + ) + return {x[0] for x in self._cursor.fetchall()} + + def _update_tags(self, model_key: str, tags: Set[str]) -> None: + """Update tags for the model referenced by model_key.""" + # remove previous tags from this model + self._cursor.execute( + """--sql + DELETE FROM model_tags + WHERE model_id=?; + """, + (model_key,), + ) + + for tag in tags: + self._cursor.execute( + """--sql + INSERT OR IGNORE INTO tags ( + tag_text + ) + VALUES (?); + """, + (tag,), + ) + self._cursor.execute( + """--sql + SELECT tag_id + FROM tags + WHERE tag_text = ? + LIMIT 1; + """, + (tag,), + ) + tag_id = self._cursor.fetchone()[0] + self._cursor.execute( + """--sql + INSERT OR IGNORE INTO model_tags ( + model_id, + tag_id + ) + VALUES (?,?); + """, + (model_key, tag_id), + ) diff --git a/invokeai/app/services/model_records/__init__.py b/invokeai/app/services/model_records/__init__.py index 1622066715..7f888cf1f3 100644 --- a/invokeai/app/services/model_records/__init__.py +++ b/invokeai/app/services/model_records/__init__.py @@ -1,4 +1,5 @@ """Init file for model record services.""" + from .model_records_base import ( # noqa F401 DuplicateModelException, InvalidModelException, diff --git a/invokeai/app/services/model_records/model_records_base.py b/invokeai/app/services/model_records/model_records_base.py index 57597570cd..d6014db448 100644 --- a/invokeai/app/services/model_records/model_records_base.py +++ b/invokeai/app/services/model_records/model_records_base.py @@ -11,8 +11,15 @@ from typing import Any, Dict, List, Optional, Set, Tuple, Union from pydantic import BaseModel, Field from invokeai.app.services.shared.pagination import PaginatedResults -from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType -from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore +from invokeai.backend.model_manager import ( + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelType, +) +from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata + +from ..model_metadata import ModelMetadataStoreBase class DuplicateModelException(Exception): @@ -104,7 +111,7 @@ class ModelRecordServiceBase(ABC): @property @abstractmethod - def metadata_store(self) -> ModelMetadataStore: + def metadata_store(self) -> ModelMetadataStoreBase: """Return a ModelMetadataStore initialized on the same database.""" pass @@ -146,7 +153,7 @@ class ModelRecordServiceBase(ABC): @abstractmethod def exists(self, key: str) -> bool: """ - Return True if a model with the indicated key exists in the databse. + Return True if a model with the indicated key exists in the database. :param key: Unique key for the model to be deleted """ diff --git a/invokeai/app/services/model_records/model_records_sql.py b/invokeai/app/services/model_records/model_records_sql.py index 4512da5d41..60f0ad86a8 100644 --- a/invokeai/app/services/model_records/model_records_sql.py +++ b/invokeai/app/services/model_records/model_records_sql.py @@ -39,7 +39,6 @@ Typical usage: configs = store.search_by_attr(base_model='sd-2', model_type='main') """ - import json import sqlite3 from math import ceil @@ -54,8 +53,9 @@ from invokeai.backend.model_manager.config import ( ModelFormat, ModelType, ) -from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore, UnknownMetadataException +from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, UnknownMetadataException +from ..model_metadata import ModelMetadataStoreBase, ModelMetadataStoreSQL from ..shared.sqlite.sqlite_database import SqliteDatabase from .model_records_base import ( DuplicateModelException, @@ -69,16 +69,16 @@ from .model_records_base import ( class ModelRecordServiceSQL(ModelRecordServiceBase): """Implementation of the ModelConfigStore ABC using a SQL database.""" - def __init__(self, db: SqliteDatabase): + def __init__(self, db: SqliteDatabase, metadata_store: ModelMetadataStoreBase): """ Initialize a new object from preexisting sqlite3 connection and threading lock objects. - :param conn: sqlite3 connection object - :param lock: threading Lock object + :param db: Sqlite connection object """ super().__init__() self._db = db - self._cursor = self._db.conn.cursor() + self._cursor = db.conn.cursor() + self._metadata_store = metadata_store @property def db(self) -> SqliteDatabase: @@ -158,7 +158,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): self._db.conn.rollback() raise e - def update_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig: + def update_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig: """ Update the model, returning the updated version. @@ -199,7 +199,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): with self._db.lock: self._cursor.execute( """--sql - SELECT config FROM model_config + SELECT config, strftime('%s',updated_at) FROM model_config WHERE id=?; """, (key,), @@ -207,7 +207,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): rows = self._cursor.fetchone() if not rows: raise UnknownModelException("model not found") - model = ModelConfigFactory.make_config(json.loads(rows[0])) + model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1]) return model def exists(self, key: str) -> bool: @@ -265,12 +265,14 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): with self._db.lock: self._cursor.execute( f"""--sql - select config FROM model_config + select config, strftime('%s',updated_at) FROM model_config {where}; """, tuple(bindings), ) - results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()] + results = [ + ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall() + ] return results def search_by_path(self, path: Union[str, Path]) -> List[AnyModelConfig]: @@ -279,12 +281,14 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): with self._db.lock: self._cursor.execute( """--sql - SELECT config FROM model_config + SELECT config, strftime('%s',updated_at) FROM model_config WHERE path=?; """, (str(path),), ) - results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()] + results = [ + ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall() + ] return results def search_by_hash(self, hash: str) -> List[AnyModelConfig]: @@ -293,18 +297,20 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): with self._db.lock: self._cursor.execute( """--sql - SELECT config FROM model_config + SELECT config, strftime('%s',updated_at) FROM model_config WHERE original_hash=?; """, (hash,), ) - results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()] + results = [ + ModelConfigFactory.make_config(json.loads(x[0]), timestamp=x[1]) for x in self._cursor.fetchall() + ] return results @property - def metadata_store(self) -> ModelMetadataStore: + def metadata_store(self) -> ModelMetadataStoreBase: """Return a ModelMetadataStore initialized on the same database.""" - return ModelMetadataStore(self._db) + return self._metadata_store def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]: """ @@ -325,18 +331,18 @@ class ModelRecordServiceSQL(ModelRecordServiceBase): :param tags: Set of tags to search for. All tags must be present. """ - store = ModelMetadataStore(self._db) + store = ModelMetadataStoreSQL(self._db) keys = store.search_by_tag(tags) return [self.get_model(x) for x in keys] def list_tags(self) -> Set[str]: """Return a unique set of all the model tags in the metadata database.""" - store = ModelMetadataStore(self._db) + store = ModelMetadataStoreSQL(self._db) return store.list_tags() def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]: """List metadata for all models that have it.""" - store = ModelMetadataStore(self._db) + store = ModelMetadataStoreSQL(self._db) return store.list_all_metadata() def list_models( diff --git a/invokeai/app/services/object_serializer/object_serializer_base.py b/invokeai/app/services/object_serializer/object_serializer_base.py new file mode 100644 index 0000000000..ff19b4a039 --- /dev/null +++ b/invokeai/app/services/object_serializer/object_serializer_base.py @@ -0,0 +1,44 @@ +from abc import ABC, abstractmethod +from typing import Callable, Generic, TypeVar + +T = TypeVar("T") + + +class ObjectSerializerBase(ABC, Generic[T]): + """Saves and loads arbitrary python objects.""" + + def __init__(self) -> None: + self._on_deleted_callbacks: list[Callable[[str], None]] = [] + + @abstractmethod + def load(self, name: str) -> T: + """ + Loads the object. + :param name: The name of the object to load. + :raises ObjectNotFoundError: if the object is not found + """ + pass + + @abstractmethod + def save(self, obj: T) -> str: + """ + Saves the object, returning its name. + :param obj: The object to save. + """ + pass + + @abstractmethod + def delete(self, name: str) -> None: + """ + Deletes the object, if it exists. + :param name: The name of the object to delete. + """ + pass + + def on_deleted(self, on_deleted: Callable[[str], None]) -> None: + """Register a callback for when an object is deleted""" + self._on_deleted_callbacks.append(on_deleted) + + def _on_deleted(self, name: str) -> None: + for callback in self._on_deleted_callbacks: + callback(name) diff --git a/invokeai/app/services/object_serializer/object_serializer_common.py b/invokeai/app/services/object_serializer/object_serializer_common.py new file mode 100644 index 0000000000..7057386541 --- /dev/null +++ b/invokeai/app/services/object_serializer/object_serializer_common.py @@ -0,0 +1,5 @@ +class ObjectNotFoundError(KeyError): + """Raised when an object is not found while loading""" + + def __init__(self, name: str) -> None: + super().__init__(f"Object with name {name} not found") diff --git a/invokeai/app/services/object_serializer/object_serializer_disk.py b/invokeai/app/services/object_serializer/object_serializer_disk.py new file mode 100644 index 0000000000..935fec3060 --- /dev/null +++ b/invokeai/app/services/object_serializer/object_serializer_disk.py @@ -0,0 +1,85 @@ +import tempfile +import typing +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Optional, TypeVar + +import torch + +from invokeai.app.services.object_serializer.object_serializer_base import ObjectSerializerBase +from invokeai.app.services.object_serializer.object_serializer_common import ObjectNotFoundError +from invokeai.app.util.misc import uuid_string + +if TYPE_CHECKING: + from invokeai.app.services.invoker import Invoker + + +T = TypeVar("T") + + +@dataclass +class DeleteAllResult: + deleted_count: int + freed_space_bytes: float + + +class ObjectSerializerDisk(ObjectSerializerBase[T]): + """Disk-backed storage for arbitrary python objects. Serialization is handled by `torch.save` and `torch.load`. + + :param output_dir: The folder where the serialized objects will be stored + :param ephemeral: If True, objects will be stored in a temporary directory inside the given output_dir and cleaned up on exit + """ + + def __init__(self, output_dir: Path, ephemeral: bool = False): + super().__init__() + self._ephemeral = ephemeral + self._base_output_dir = output_dir + self._base_output_dir.mkdir(parents=True, exist_ok=True) + # Must specify `ignore_cleanup_errors` to avoid fatal errors during cleanup on Windows + self._tempdir = ( + tempfile.TemporaryDirectory(dir=self._base_output_dir, ignore_cleanup_errors=True) if ephemeral else None + ) + self._output_dir = Path(self._tempdir.name) if self._tempdir else self._base_output_dir + self.__obj_class_name: Optional[str] = None + + def load(self, name: str) -> T: + file_path = self._get_path(name) + try: + return torch.load(file_path) # pyright: ignore [reportUnknownMemberType] + except FileNotFoundError as e: + raise ObjectNotFoundError(name) from e + + def save(self, obj: T) -> str: + name = self._new_name() + file_path = self._get_path(name) + torch.save(obj, file_path) # pyright: ignore [reportUnknownMemberType] + return name + + def delete(self, name: str) -> None: + file_path = self._get_path(name) + file_path.unlink() + + @property + def _obj_class_name(self) -> str: + if not self.__obj_class_name: + # `__orig_class__` is not available in the constructor for some technical, undoubtedly very pythonic reason + self.__obj_class_name = typing.get_args(self.__orig_class__)[0].__name__ # pyright: ignore [reportUnknownMemberType, reportAttributeAccessIssue] + return self.__obj_class_name + + def _get_path(self, name: str) -> Path: + return self._output_dir / name + + def _new_name(self) -> str: + return f"{self._obj_class_name}_{uuid_string()}" + + def _tempdir_cleanup(self) -> None: + """Calls `cleanup` on the temporary directory, if it exists.""" + if self._tempdir: + self._tempdir.cleanup() + + def __del__(self) -> None: + # In case the service is not properly stopped, clean up the temporary directory when the class instance is GC'd. + self._tempdir_cleanup() + + def stop(self, invoker: "Invoker") -> None: + self._tempdir_cleanup() diff --git a/invokeai/app/services/object_serializer/object_serializer_forward_cache.py b/invokeai/app/services/object_serializer/object_serializer_forward_cache.py new file mode 100644 index 0000000000..b361259a4b --- /dev/null +++ b/invokeai/app/services/object_serializer/object_serializer_forward_cache.py @@ -0,0 +1,65 @@ +from queue import Queue +from typing import TYPE_CHECKING, Optional, TypeVar + +from invokeai.app.services.object_serializer.object_serializer_base import ObjectSerializerBase + +T = TypeVar("T") + +if TYPE_CHECKING: + from invokeai.app.services.invoker import Invoker + + +class ObjectSerializerForwardCache(ObjectSerializerBase[T]): + """ + Provides a LRU cache for an instance of `ObjectSerializerBase`. + Saving an object to the cache always writes through to the underlying storage. + """ + + def __init__(self, underlying_storage: ObjectSerializerBase[T], max_cache_size: int = 20): + super().__init__() + self._underlying_storage = underlying_storage + self._cache: dict[str, T] = {} + self._cache_ids = Queue[str]() + self._max_cache_size = max_cache_size + + def start(self, invoker: "Invoker") -> None: + self._invoker = invoker + start_op = getattr(self._underlying_storage, "start", None) + if callable(start_op): + start_op(invoker) + + def stop(self, invoker: "Invoker") -> None: + self._invoker = invoker + stop_op = getattr(self._underlying_storage, "stop", None) + if callable(stop_op): + stop_op(invoker) + + def load(self, name: str) -> T: + cache_item = self._get_cache(name) + if cache_item is not None: + return cache_item + + obj = self._underlying_storage.load(name) + self._set_cache(name, obj) + return obj + + def save(self, obj: T) -> str: + name = self._underlying_storage.save(obj) + self._set_cache(name, obj) + return name + + def delete(self, name: str) -> None: + self._underlying_storage.delete(name) + if name in self._cache: + del self._cache[name] + self._on_deleted(name) + + def _get_cache(self, name: str) -> Optional[T]: + return None if name not in self._cache else self._cache[name] + + def _set_cache(self, name: str, data: T): + if name not in self._cache: + self._cache[name] = data + self._cache_ids.put(name) + if self._cache_ids.qsize() > self._max_cache_size: + self._cache.pop(self._cache_ids.get()) diff --git a/invokeai/app/services/session_processor/session_processor_common.py b/invokeai/app/services/session_processor/session_processor_common.py index 00195a773f..0ca51de517 100644 --- a/invokeai/app/services/session_processor/session_processor_common.py +++ b/invokeai/app/services/session_processor/session_processor_common.py @@ -4,3 +4,17 @@ from pydantic import BaseModel, Field class SessionProcessorStatus(BaseModel): is_started: bool = Field(description="Whether the session processor is started") is_processing: bool = Field(description="Whether a session is being processed") + + +class CanceledException(Exception): + """Execution canceled by user.""" + + pass + + +class ProgressImage(BaseModel): + """The progress image sent intermittently during processing""" + + width: int = Field(description="The effective width of the image in pixels") + height: int = Field(description="The effective height of the image in pixels") + dataURL: str = Field(description="The image data as a b64 data URL") diff --git a/invokeai/app/services/session_processor/session_processor_default.py b/invokeai/app/services/session_processor/session_processor_default.py index 32e94a305d..c0b98220c8 100644 --- a/invokeai/app/services/session_processor/session_processor_default.py +++ b/invokeai/app/services/session_processor/session_processor_default.py @@ -1,4 +1,5 @@ import traceback +from contextlib import suppress from threading import BoundedSemaphore, Thread from threading import Event as ThreadEvent from typing import Optional @@ -6,136 +7,270 @@ from typing import Optional from fastapi_events.handlers.local import local_handler from fastapi_events.typing import Event as FastAPIEvent +from invokeai.app.invocations.baseinvocation import BaseInvocation from invokeai.app.services.events.events_base import EventServiceBase +from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError +from invokeai.app.services.session_processor.session_processor_common import CanceledException from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem +from invokeai.app.services.shared.invocation_context import InvocationContextData, build_invocation_context +from invokeai.app.util.profiler import Profiler from ..invoker import Invoker from .session_processor_base import SessionProcessorBase from .session_processor_common import SessionProcessorStatus -POLLING_INTERVAL = 1 -THREAD_LIMIT = 1 - class DefaultSessionProcessor(SessionProcessorBase): - def start(self, invoker: Invoker) -> None: - self.__invoker: Invoker = invoker - self.__queue_item: Optional[SessionQueueItem] = None + def start(self, invoker: Invoker, thread_limit: int = 1, polling_interval: int = 1) -> None: + self._invoker: Invoker = invoker + self._queue_item: Optional[SessionQueueItem] = None + self._invocation: Optional[BaseInvocation] = None - self.__resume_event = ThreadEvent() - self.__stop_event = ThreadEvent() - self.__poll_now_event = ThreadEvent() + self._resume_event = ThreadEvent() + self._stop_event = ThreadEvent() + self._poll_now_event = ThreadEvent() + self._cancel_event = ThreadEvent() local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_queue_event) - self.__threadLimit = BoundedSemaphore(THREAD_LIMIT) - self.__thread = Thread( + self._thread_limit = thread_limit + self._thread_semaphore = BoundedSemaphore(thread_limit) + self._polling_interval = polling_interval + + # If profiling is enabled, create a profiler. The same profiler will be used for all sessions. Internally, + # the profiler will create a new profile for each session. + self._profiler = ( + Profiler( + logger=self._invoker.services.logger, + output_dir=self._invoker.services.configuration.profiles_path, + prefix=self._invoker.services.configuration.profile_prefix, + ) + if self._invoker.services.configuration.profile_graphs + else None + ) + + self._thread = Thread( name="session_processor", - target=self.__process, + target=self._process, kwargs={ - "stop_event": self.__stop_event, - "poll_now_event": self.__poll_now_event, - "resume_event": self.__resume_event, + "stop_event": self._stop_event, + "poll_now_event": self._poll_now_event, + "resume_event": self._resume_event, + "cancel_event": self._cancel_event, }, ) - self.__thread.start() + self._thread.start() def stop(self, *args, **kwargs) -> None: - self.__stop_event.set() + self._stop_event.set() def _poll_now(self) -> None: - self.__poll_now_event.set() + self._poll_now_event.set() async def _on_queue_event(self, event: FastAPIEvent) -> None: event_name = event[1]["event"] - # This was a match statement, but match is not supported on python 3.9 - if event_name in [ - "graph_execution_state_complete", - "invocation_error", - "session_retrieval_error", - "invocation_retrieval_error", - ]: - self.__queue_item = None - self._poll_now() - elif ( - event_name == "session_canceled" - and self.__queue_item is not None - and self.__queue_item.session_id == event[1]["data"]["graph_execution_state_id"] - ): - self.__queue_item = None + if event_name == "session_canceled" or event_name == "queue_cleared": + # These both mean we should cancel the current session. + self._cancel_event.set() self._poll_now() elif event_name == "batch_enqueued": self._poll_now() - elif event_name == "queue_cleared": - self.__queue_item = None - self._poll_now() def resume(self) -> SessionProcessorStatus: - if not self.__resume_event.is_set(): - self.__resume_event.set() + if not self._resume_event.is_set(): + self._resume_event.set() return self.get_status() def pause(self) -> SessionProcessorStatus: - if self.__resume_event.is_set(): - self.__resume_event.clear() + if self._resume_event.is_set(): + self._resume_event.clear() return self.get_status() def get_status(self) -> SessionProcessorStatus: return SessionProcessorStatus( - is_started=self.__resume_event.is_set(), - is_processing=self.__queue_item is not None, + is_started=self._resume_event.is_set(), + is_processing=self._queue_item is not None, ) - def __process( + def _process( self, stop_event: ThreadEvent, poll_now_event: ThreadEvent, resume_event: ThreadEvent, + cancel_event: ThreadEvent, ): + # Outermost processor try block; any unhandled exception is a fatal processor error try: + self._thread_semaphore.acquire() stop_event.clear() resume_event.set() - self.__threadLimit.acquire() - queue_item: Optional[SessionQueueItem] = None + cancel_event.clear() + while not stop_event.is_set(): poll_now_event.clear() + # Middle processor try block; any unhandled exception is a non-fatal processor error try: - # do not dequeue if there is already a session running - if self.__queue_item is None and resume_event.is_set(): - queue_item = self.__invoker.services.session_queue.dequeue() + # Get the next session to process + self._queue_item = self._invoker.services.session_queue.dequeue() + if self._queue_item is not None and resume_event.is_set(): + self._invoker.services.logger.debug(f"Executing queue item {self._queue_item.item_id}") + cancel_event.clear() - if queue_item is not None: - self.__invoker.services.logger.debug(f"Executing queue item {queue_item.item_id}") - self.__queue_item = queue_item - self.__invoker.services.graph_execution_manager.set(queue_item.session) - self.__invoker.invoke( - session_queue_batch_id=queue_item.batch_id, - session_queue_id=queue_item.queue_id, - session_queue_item_id=queue_item.item_id, - graph_execution_state=queue_item.session, - workflow=queue_item.workflow, - invoke_all=True, + # If profiling is enabled, start the profiler + if self._profiler is not None: + self._profiler.start(profile_id=self._queue_item.session_id) + + # Prepare invocations and take the first + self._invocation = self._queue_item.session.next() + + # Loop over invocations until the session is complete or canceled + while self._invocation is not None and not cancel_event.is_set(): + # get the source node id to provide to clients (the prepared node id is not as useful) + source_invocation_id = self._queue_item.session.prepared_source_mapping[self._invocation.id] + + # Send starting event + self._invoker.services.events.emit_invocation_started( + queue_batch_id=self._queue_item.batch_id, + queue_item_id=self._queue_item.item_id, + queue_id=self._queue_item.queue_id, + graph_execution_state_id=self._queue_item.session_id, + node=self._invocation.model_dump(), + source_node_id=source_invocation_id, ) - queue_item = None - if queue_item is None: - self.__invoker.services.logger.debug("Waiting for next polling interval or event") - poll_now_event.wait(POLLING_INTERVAL) + # Innermost processor try block; any unhandled exception is an invocation error & will fail the graph + try: + with self._invoker.services.performance_statistics.collect_stats( + self._invocation, self._queue_item.session.id + ): + # Build invocation context (the node-facing API) + data = InvocationContextData( + invocation=self._invocation, + source_invocation_id=source_invocation_id, + queue_item=self._queue_item, + ) + context = build_invocation_context( + data=data, + services=self._invoker.services, + cancel_event=self._cancel_event, + ) + + # Invoke the node + outputs = self._invocation.invoke_internal( + context=context, services=self._invoker.services + ) + + # Save outputs and history + self._queue_item.session.complete(self._invocation.id, outputs) + + # Send complete event + self._invoker.services.events.emit_invocation_complete( + queue_batch_id=self._queue_item.batch_id, + queue_item_id=self._queue_item.item_id, + queue_id=self._queue_item.queue_id, + graph_execution_state_id=self._queue_item.session.id, + node=self._invocation.model_dump(), + source_node_id=source_invocation_id, + result=outputs.model_dump(), + ) + + except KeyboardInterrupt: + # TODO(MM2): Create an event for this + pass + + except CanceledException: + # When the user cancels the graph, we first set the cancel event. The event is checked + # between invocations, in this loop. Some invocations are long-running, and we need to + # be able to cancel them mid-execution. + # + # For example, denoising is a long-running invocation with many steps. A step callback + # is executed after each step. This step callback checks if the canceled event is set, + # then raises a CanceledException to stop execution immediately. + # + # When we get a CanceledException, we don't need to do anything - just pass and let the + # loop go to its next iteration, and the cancel event will be handled correctly. + pass + + except Exception as e: + error = traceback.format_exc() + + # Save error + self._queue_item.session.set_node_error(self._invocation.id, error) + self._invoker.services.logger.error( + f"Error while invoking session {self._queue_item.session_id}, invocation {self._invocation.id} ({self._invocation.get_type()}):\n{e}" + ) + + # Send error event + self._invoker.services.events.emit_invocation_error( + queue_batch_id=self._queue_item.session_id, + queue_item_id=self._queue_item.item_id, + queue_id=self._queue_item.queue_id, + graph_execution_state_id=self._queue_item.session.id, + node=self._invocation.model_dump(), + source_node_id=source_invocation_id, + error_type=e.__class__.__name__, + error=error, + ) + pass + + # The session is complete if the all invocations are complete or there was an error + if self._queue_item.session.is_complete() or cancel_event.is_set(): + # Send complete event + self._invoker.services.events.emit_graph_execution_complete( + queue_batch_id=self._queue_item.batch_id, + queue_item_id=self._queue_item.item_id, + queue_id=self._queue_item.queue_id, + graph_execution_state_id=self._queue_item.session.id, + ) + # If we are profiling, stop the profiler and dump the profile & stats + if self._profiler: + profile_path = self._profiler.stop() + stats_path = profile_path.with_suffix(".json") + self._invoker.services.performance_statistics.dump_stats( + graph_execution_state_id=self._queue_item.session.id, output_path=stats_path + ) + # We'll get a GESStatsNotFoundError if we try to log stats for an untracked graph, but in the processor + # we don't care about that - suppress the error. + with suppress(GESStatsNotFoundError): + self._invoker.services.performance_statistics.log_stats(self._queue_item.session.id) + self._invoker.services.performance_statistics.reset_stats() + + # Set the invocation to None to prepare for the next session + self._invocation = None + else: + # Prepare the next invocation + self._invocation = self._queue_item.session.next() + + # The session is complete, immediately poll for next session + self._queue_item = None + poll_now_event.set() + else: + # The queue was empty, wait for next polling interval or event to try again + self._invoker.services.logger.debug("Waiting for next polling interval or event") + poll_now_event.wait(self._polling_interval) continue - except Exception as e: - self.__invoker.services.logger.error(f"Error in session processor: {e}") - if queue_item is not None: - self.__invoker.services.session_queue.cancel_queue_item( - queue_item.item_id, error=traceback.format_exc() + except Exception: + # Non-fatal error in processor + self._invoker.services.logger.error( + f"Non-fatal error in session processor:\n{traceback.format_exc()}" + ) + # Cancel the queue item + if self._queue_item is not None: + self._invoker.services.session_queue.cancel_queue_item( + self._queue_item.item_id, error=traceback.format_exc() ) - poll_now_event.wait(POLLING_INTERVAL) + # Reset the invocation to None to prepare for the next session + self._invocation = None + # Immediately poll for next queue item + poll_now_event.wait(self._polling_interval) continue - except Exception as e: - self.__invoker.services.logger.error(f"Fatal Error in session processor: {e}") + except Exception: + # Fatal error in processor, log and pass - we're done here + self._invoker.services.logger.error(f"Fatal Error in session processor:\n{traceback.format_exc()}") pass finally: stop_event.clear() poll_now_event.clear() - self.__queue_item = None - self.__threadLimit.release() + self._queue_item = None + self._thread_semaphore.release() diff --git a/invokeai/app/services/session_queue/session_queue_sqlite.py b/invokeai/app/services/session_queue/session_queue_sqlite.py index 64642690e9..7af9f0e08c 100644 --- a/invokeai/app/services/session_queue/session_queue_sqlite.py +++ b/invokeai/app/services/session_queue/session_queue_sqlite.py @@ -60,7 +60,7 @@ class SqliteSessionQueue(SessionQueueBase): # This was a match statement, but match is not supported on python 3.9 if event_name == "graph_execution_state_complete": await self._handle_complete_event(event) - elif event_name in ["invocation_error", "session_retrieval_error", "invocation_retrieval_error"]: + elif event_name == "invocation_error": await self._handle_error_event(event) elif event_name == "session_canceled": await self._handle_cancel_event(event) @@ -429,7 +429,6 @@ class SqliteSessionQueue(SessionQueueBase): if queue_item.status not in ["canceled", "failed", "completed"]: status = "failed" if error is not None else "canceled" queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here - self.__invoker.services.queue.cancel(queue_item.session_id) self.__invoker.services.events.emit_session_canceled( queue_item_id=queue_item.item_id, queue_id=queue_item.queue_id, @@ -471,7 +470,6 @@ class SqliteSessionQueue(SessionQueueBase): ) self.__conn.commit() if current_queue_item is not None and current_queue_item.batch_id in batch_ids: - self.__invoker.services.queue.cancel(current_queue_item.session_id) self.__invoker.services.events.emit_session_canceled( queue_item_id=current_queue_item.item_id, queue_id=current_queue_item.queue_id, @@ -523,7 +521,6 @@ class SqliteSessionQueue(SessionQueueBase): ) self.__conn.commit() if current_queue_item is not None and current_queue_item.queue_id == queue_id: - self.__invoker.services.queue.cancel(current_queue_item.session_id) self.__invoker.services.events.emit_session_canceled( queue_item_id=current_queue_item.item_id, queue_id=current_queue_item.queue_id, diff --git a/invokeai/app/services/shared/default_graphs.py b/invokeai/app/services/shared/default_graphs.py deleted file mode 100644 index 7e62c6d0a1..0000000000 --- a/invokeai/app/services/shared/default_graphs.py +++ /dev/null @@ -1,92 +0,0 @@ -from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC - -from ...invocations.compel import CompelInvocation -from ...invocations.image import ImageNSFWBlurInvocation -from ...invocations.latent import DenoiseLatentsInvocation, LatentsToImageInvocation -from ...invocations.noise import NoiseInvocation -from ...invocations.primitives import IntegerInvocation -from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph - -default_text_to_image_graph_id = "539b2af5-2b4d-4d8c-8071-e54a3255fc74" - - -def create_text_to_image() -> LibraryGraph: - graph = Graph( - nodes={ - "width": IntegerInvocation(id="width", value=512), - "height": IntegerInvocation(id="height", value=512), - "seed": IntegerInvocation(id="seed", value=-1), - "3": NoiseInvocation(id="3"), - "4": CompelInvocation(id="4"), - "5": CompelInvocation(id="5"), - "6": DenoiseLatentsInvocation(id="6"), - "7": LatentsToImageInvocation(id="7"), - "8": ImageNSFWBlurInvocation(id="8"), - }, - edges=[ - Edge( - source=EdgeConnection(node_id="width", field="value"), - destination=EdgeConnection(node_id="3", field="width"), - ), - Edge( - source=EdgeConnection(node_id="height", field="value"), - destination=EdgeConnection(node_id="3", field="height"), - ), - Edge( - source=EdgeConnection(node_id="seed", field="value"), - destination=EdgeConnection(node_id="3", field="seed"), - ), - Edge( - source=EdgeConnection(node_id="3", field="noise"), - destination=EdgeConnection(node_id="6", field="noise"), - ), - Edge( - source=EdgeConnection(node_id="6", field="latents"), - destination=EdgeConnection(node_id="7", field="latents"), - ), - Edge( - source=EdgeConnection(node_id="4", field="conditioning"), - destination=EdgeConnection(node_id="6", field="positive_conditioning"), - ), - Edge( - source=EdgeConnection(node_id="5", field="conditioning"), - destination=EdgeConnection(node_id="6", field="negative_conditioning"), - ), - Edge( - source=EdgeConnection(node_id="7", field="image"), - destination=EdgeConnection(node_id="8", field="image"), - ), - ], - ) - return LibraryGraph( - id=default_text_to_image_graph_id, - name="t2i", - description="Converts text to an image", - graph=graph, - exposed_inputs=[ - ExposedNodeInput(node_path="4", field="prompt", alias="positive_prompt"), - ExposedNodeInput(node_path="5", field="prompt", alias="negative_prompt"), - ExposedNodeInput(node_path="width", field="value", alias="width"), - ExposedNodeInput(node_path="height", field="value", alias="height"), - ExposedNodeInput(node_path="seed", field="value", alias="seed"), - ], - exposed_outputs=[ExposedNodeOutput(node_path="8", field="image", alias="image")], - ) - - -def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[LibraryGraph]: - """Creates the default system graphs, or adds new versions if the old ones don't match""" - - # TODO: Uncomment this when we are ready to fix this up to prevent breaking changes - graphs: list[LibraryGraph] = [] - - text_to_image = graph_library.get(default_text_to_image_graph_id) - - # TODO: Check if the graph is the same as the default one, and if not, update it - # if text_to_image is None: - text_to_image = create_text_to_image() - graph_library.set(text_to_image) - - graphs.append(text_to_image) - - return graphs diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 80f56b49d3..e3941d9ca3 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -5,22 +5,25 @@ import itertools from typing import Annotated, Any, Optional, TypeVar, Union, get_args, get_origin, get_type_hints import networkx as nx -from pydantic import BaseModel, ConfigDict, field_validator, model_validator +from pydantic import ( + BaseModel, + GetJsonSchemaHandler, + field_validator, +) from pydantic.fields import Field +from pydantic.json_schema import JsonSchemaValue +from pydantic_core import CoreSchema # Importing * is bad karma but needed here for node detection from invokeai.app.invocations import * # noqa: F401 F403 from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Input, - InputField, - InvocationContext, - OutputField, - UIType, invocation, invocation_output, ) +from invokeai.app.invocations.fields import Input, InputField, OutputField, UIType +from invokeai.app.services.shared.invocation_context import InvocationContext from invokeai.app.util.misc import uuid_string # in 3.10 this would be "from types import NoneType" @@ -179,10 +182,6 @@ class NodeIdMismatchError(ValueError): pass -class InvalidSubGraphError(ValueError): - pass - - class CyclicalGraphError(ValueError): pass @@ -191,25 +190,6 @@ class UnknownGraphValidationError(ValueError): pass -# TODO: Create and use an Empty output? -@invocation_output("graph_output") -class GraphInvocationOutput(BaseInvocationOutput): - pass - - -# TODO: Fill this out and move to invocations -@invocation("graph", version="1.0.0") -class GraphInvocation(BaseInvocation): - """Execute a graph""" - - # TODO: figure out how to create a default here - graph: "Graph" = InputField(description="The graph to run", default=None) - - def invoke(self, context: InvocationContext) -> GraphInvocationOutput: - """Invoke with provided services and return outputs.""" - return GraphInvocationOutput() - - @invocation_output("iterate_output") class IterateInvocationOutput(BaseInvocationOutput): """Used to connect iteration outputs. Will be expanded to a specific output.""" @@ -263,21 +243,73 @@ class CollectInvocation(BaseInvocation): return CollectInvocationOutput(collection=copy.copy(self.collection)) -InvocationsUnion: Any = BaseInvocation.get_invocations_union() -InvocationOutputsUnion: Any = BaseInvocationOutput.get_outputs_union() - - class Graph(BaseModel): id: str = Field(description="The id of this graph", default_factory=uuid_string) # TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me - nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field( - description="The nodes in this graph", default_factory=dict - ) + nodes: dict[str, BaseInvocation] = Field(description="The nodes in this graph", default_factory=dict) edges: list[Edge] = Field( description="The connections between nodes and their fields in this graph", default_factory=list, ) + @field_validator("nodes", mode="plain") + @classmethod + def validate_nodes(cls, v: dict[str, Any]): + """Validates the nodes in the graph by retrieving a union of all node types and validating each node.""" + + # Invocations register themselves as their python modules are executed. The union of all invocations is + # constructed at runtime. We use pydantic to validate `Graph.nodes` using that union. + # + # It's possible that when `graph.py` is executed, not all invocation-containing modules will have executed. If + # we construct the invocation union as `graph.py` is executed, we may miss some invocations. Those missing + # invocations will cause a graph to fail if they are used. + # + # We can get around this by validating the nodes in the graph using a "plain" validator, which overrides the + # pydantic validation entirely. This allows us to validate the nodes using the union of invocations at runtime. + # + # This same pattern is used in `GraphExecutionState`. + + nodes: dict[str, BaseInvocation] = {} + typeadapter = BaseInvocation.get_typeadapter() + for node_id, node in v.items(): + nodes[node_id] = typeadapter.validate_python(node) + return nodes + + @classmethod + def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: + # We use a "plain" validator to validate the nodes in the graph. Pydantic is unable to create a JSON Schema for + # fields that use "plain" validators, so we have to hack around this. Also, we need to add all invocations to + # the generated schema as options for the `nodes` field. + # + # The workaround is to create a new BaseModel that has the same fields as `Graph` but without the validator and + # with the invocation union as the type for the `nodes` field. Pydantic then generates the JSON Schema as + # expected. + # + # You might be tempted to do something like this: + # + # ```py + # cloned_model = create_model(cls.__name__, __base__=cls, nodes=...) + # delattr(cloned_model, "validate_nodes") + # cloned_model.model_rebuild(force=True) + # json_schema = handler(cloned_model.__pydantic_core_schema__) + # ``` + # + # Unfortunately, this does not work. Calling `handler` here results in infinite recursion as pydantic attempts + # to build the JSON Schema for the cloned model. Instead, we have to manually clone the model. + # + # This same pattern is used in `GraphExecutionState`. + + class Graph(BaseModel): + id: Optional[str] = Field(default=None, description="The id of this graph") + nodes: dict[ + str, Annotated[Union[tuple(BaseInvocation._invocation_classes)], Field(discriminator="type")] + ] = Field(description="The nodes in this graph") + edges: list[Edge] = Field(description="The connections between nodes and their fields in this graph") + + json_schema = handler(Graph.__pydantic_core_schema__) + json_schema = handler.resolve_ref_schema(json_schema) + return json_schema + def add_node(self, node: BaseInvocation) -> None: """Adds a node to a graph @@ -289,41 +321,21 @@ class Graph(BaseModel): self.nodes[node.id] = node - def _get_graph_and_node(self, node_path: str) -> tuple["Graph", str]: - """Returns the graph and node id for a node path.""" - # Materialized graphs may have nodes at the top level - if node_path in self.nodes: - return (self, node_path) - - node_id = node_path if "." not in node_path else node_path[: node_path.index(".")] - if node_id not in self.nodes: - raise NodeNotFoundError(f"Node {node_path} not found in graph") - - node = self.nodes[node_id] - - if not isinstance(node, GraphInvocation): - # There's more node path left but this isn't a graph - failure - raise NodeNotFoundError("Node path terminated early at a non-graph node") - - return node.graph._get_graph_and_node(node_path[node_path.index(".") + 1 :]) - - def delete_node(self, node_path: str) -> None: + def delete_node(self, node_id: str) -> None: """Deletes a node from a graph""" try: - graph, node_id = self._get_graph_and_node(node_path) - # Delete edges for this node - input_edges = self._get_input_edges_and_graphs(node_path) - output_edges = self._get_output_edges_and_graphs(node_path) + input_edges = self._get_input_edges(node_id) + output_edges = self._get_output_edges(node_id) - for edge_graph, _, edge in input_edges: - edge_graph.delete_edge(edge) + for edge in input_edges: + self.delete_edge(edge) - for edge_graph, _, edge in output_edges: - edge_graph.delete_edge(edge) + for edge in output_edges: + self.delete_edge(edge) - del graph.nodes[node_id] + del self.nodes[node_id] except NodeNotFoundError: pass # Ignore, not doesn't exist (should this throw?) @@ -373,13 +385,6 @@ class Graph(BaseModel): if k != v.id: raise NodeIdMismatchError(f"Node ids must match, got {k} and {v.id}") - # Validate all subgraphs - for gn in (n for n in self.nodes.values() if isinstance(n, GraphInvocation)): - try: - gn.graph.validate_self() - except Exception as e: - raise InvalidSubGraphError(f"Subgraph {gn.id} is invalid") from e - # Validate that all edges match nodes and fields in the graph for edge in self.edges: source_node = self.nodes.get(edge.source.node_id, None) @@ -441,7 +446,6 @@ class Graph(BaseModel): except ( DuplicateNodeIdError, NodeIdMismatchError, - InvalidSubGraphError, NodeNotFoundError, NodeFieldNotFoundError, CyclicalGraphError, @@ -462,7 +466,7 @@ class Graph(BaseModel): def _validate_edge(self, edge: Edge): """Validates that a new edge doesn't create a cycle in the graph""" - # Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly) + # Validate that the nodes exist try: from_node = self.get_node(edge.source.node_id) to_node = self.get_node(edge.destination.node_id) @@ -529,171 +533,90 @@ class Graph(BaseModel): f"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}" ) - def has_node(self, node_path: str) -> bool: + def has_node(self, node_id: str) -> bool: """Determines whether or not a node exists in the graph.""" try: - n = self.get_node(node_path) - if n is not None: - return True - else: - return False + _ = self.get_node(node_id) + return True except NodeNotFoundError: return False - def get_node(self, node_path: str) -> InvocationsUnion: - """Gets a node from the graph using a node path.""" - # Materialized graphs may have nodes at the top level - graph, node_id = self._get_graph_and_node(node_path) - return graph.nodes[node_id] + def get_node(self, node_id: str) -> BaseInvocation: + """Gets a node from the graph.""" + try: + return self.nodes[node_id] + except KeyError as e: + raise NodeNotFoundError(f"Node {node_id} not found in graph") from e - def _get_node_path(self, node_id: str, prefix: Optional[str] = None) -> str: - return node_id if prefix is None or prefix == "" else f"{prefix}.{node_id}" - - def update_node(self, node_path: str, new_node: BaseInvocation) -> None: + def update_node(self, node_id: str, new_node: BaseInvocation) -> None: """Updates a node in the graph.""" - graph, node_id = self._get_graph_and_node(node_path) - node = graph.nodes[node_id] + node = self.nodes[node_id] # Ensure the node type matches the new node if type(node) is not type(new_node): - raise TypeError(f"Node {node_path} is type {type(node)} but new node is type {type(new_node)}") + raise TypeError(f"Node {node_id} is type {type(node)} but new node is type {type(new_node)}") # Ensure the new id is either the same or is not in the graph - prefix = None if "." not in node_path else node_path[: node_path.rindex(".")] - new_path = self._get_node_path(new_node.id, prefix=prefix) - if new_node.id != node.id and self.has_node(new_path): - raise NodeAlreadyInGraphError("Node with id {new_node.id} already exists in graph") + if new_node.id != node.id and self.has_node(new_node.id): + raise NodeAlreadyInGraphError(f"Node with id {new_node.id} already exists in graph") # Set the new node in the graph - graph.nodes[new_node.id] = new_node + self.nodes[new_node.id] = new_node if new_node.id != node.id: - input_edges = self._get_input_edges_and_graphs(node_path) - output_edges = self._get_output_edges_and_graphs(node_path) + input_edges = self._get_input_edges(node_id) + output_edges = self._get_output_edges(node_id) # Delete node and all edges - graph.delete_node(node_path) + self.delete_node(node_id) # Create new edges for each input and output - for graph, _, edge in input_edges: - # Remove the graph prefix from the node path - new_graph_node_path = ( - new_node.id - if "." not in edge.destination.node_id - else f'{edge.destination.node_id[edge.destination.node_id.rindex("."):]}.{new_node.id}' - ) - graph.add_edge( + for edge in input_edges: + self.add_edge( Edge( source=edge.source, - destination=EdgeConnection(node_id=new_graph_node_path, field=edge.destination.field), + destination=EdgeConnection(node_id=new_node.id, field=edge.destination.field), ) ) - for graph, _, edge in output_edges: - # Remove the graph prefix from the node path - new_graph_node_path = ( - new_node.id - if "." not in edge.source.node_id - else f'{edge.source.node_id[edge.source.node_id.rindex("."):]}.{new_node.id}' - ) - graph.add_edge( + for edge in output_edges: + self.add_edge( Edge( - source=EdgeConnection(node_id=new_graph_node_path, field=edge.source.field), + source=EdgeConnection(node_id=new_node.id, field=edge.source.field), destination=edge.destination, ) ) - def _get_input_edges(self, node_path: str, field: Optional[str] = None) -> list[Edge]: - """Gets all input edges for a node""" - edges = self._get_input_edges_and_graphs(node_path) + def _get_input_edges(self, node_id: str, field: Optional[str] = None) -> list[Edge]: + """Gets all input edges for a node. If field is provided, only edges to that field are returned.""" - # Filter to edges that match the field - filtered_edges = (e for e in edges if field is None or e[2].destination.field == field) + edges = [e for e in self.edges if e.destination.node_id == node_id] - # Create full node paths for each edge - return [ - Edge( - source=EdgeConnection( - node_id=self._get_node_path(e.source.node_id, prefix=prefix), - field=e.source.field, - ), - destination=EdgeConnection( - node_id=self._get_node_path(e.destination.node_id, prefix=prefix), - field=e.destination.field, - ), - ) - for _, prefix, e in filtered_edges - ] + if field is None: + return edges - def _get_input_edges_and_graphs( - self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", Union[str, None], Edge]]: - """Gets all input edges for a node along with the graph they are in and the graph's path""" - edges = [] + filtered_edges = [e for e in edges if e.destination.field == field] - # Return any input edges that appear in this graph - edges.extend([(self, prefix, e) for e in self.edges if e.destination.node_id == node_path]) + return filtered_edges - node_id = node_path if "." not in node_path else node_path[: node_path.index(".")] - node = self.nodes[node_id] + def _get_output_edges(self, node_id: str, field: Optional[str] = None) -> list[Edge]: + """Gets all output edges for a node. If field is provided, only edges from that field are returned.""" + edges = [e for e in self.edges if e.source.node_id == node_id] - if isinstance(node, GraphInvocation): - graph = node.graph - graph_path = node.id if prefix is None or prefix == "" else self._get_node_path(node.id, prefix=prefix) - graph_edges = graph._get_input_edges_and_graphs(node_path[(len(node_id) + 1) :], prefix=graph_path) - edges.extend(graph_edges) + if field is None: + return edges - return edges + filtered_edges = [e for e in edges if e.source.field == field] - def _get_output_edges(self, node_path: str, field: str) -> list[Edge]: - """Gets all output edges for a node""" - edges = self._get_output_edges_and_graphs(node_path) - - # Filter to edges that match the field - filtered_edges = (e for e in edges if e[2].source.field == field) - - # Create full node paths for each edge - return [ - Edge( - source=EdgeConnection( - node_id=self._get_node_path(e.source.node_id, prefix=prefix), - field=e.source.field, - ), - destination=EdgeConnection( - node_id=self._get_node_path(e.destination.node_id, prefix=prefix), - field=e.destination.field, - ), - ) - for _, prefix, e in filtered_edges - ] - - def _get_output_edges_and_graphs( - self, node_path: str, prefix: Optional[str] = None - ) -> list[tuple["Graph", Union[str, None], Edge]]: - """Gets all output edges for a node along with the graph they are in and the graph's path""" - edges = [] - - # Return any input edges that appear in this graph - edges.extend([(self, prefix, e) for e in self.edges if e.source.node_id == node_path]) - - node_id = node_path if "." not in node_path else node_path[: node_path.index(".")] - node = self.nodes[node_id] - - if isinstance(node, GraphInvocation): - graph = node.graph - graph_path = node.id if prefix is None or prefix == "" else self._get_node_path(node.id, prefix=prefix) - graph_edges = graph._get_output_edges_and_graphs(node_path[(len(node_id) + 1) :], prefix=graph_path) - edges.extend(graph_edges) - - return edges + return filtered_edges def _is_iterator_connection_valid( self, - node_path: str, + node_id: str, new_input: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None, ) -> bool: - inputs = [e.source for e in self._get_input_edges(node_path, "collection")] - outputs = [e.destination for e in self._get_output_edges(node_path, "item")] + inputs = [e.source for e in self._get_input_edges(node_id, "collection")] + outputs = [e.destination for e in self._get_output_edges(node_id, "item")] if new_input is not None: inputs.append(new_input) @@ -721,12 +644,12 @@ class Graph(BaseModel): def _is_collector_connection_valid( self, - node_path: str, + node_id: str, new_input: Optional[EdgeConnection] = None, new_output: Optional[EdgeConnection] = None, ) -> bool: - inputs = [e.source for e in self._get_input_edges(node_path, "item")] - outputs = [e.destination for e in self._get_output_edges(node_path, "collection")] + inputs = [e.source for e in self._get_input_edges(node_id, "item")] + outputs = [e.destination for e in self._get_output_edges(node_id, "collection")] if new_input is not None: inputs.append(new_input) @@ -782,27 +705,17 @@ class Graph(BaseModel): g.add_edges_from({(e.source.node_id, e.destination.node_id) for e in self.edges}) return g - def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None, prefix: Optional[str] = None) -> nx.DiGraph: + def nx_graph_flat(self, nx_graph: Optional[nx.DiGraph] = None) -> nx.DiGraph: """Returns a flattened NetworkX DiGraph, including all subgraphs (but not with iterations expanded)""" g = nx_graph or nx.DiGraph() # Add all nodes from this graph except graph/iteration nodes - g.add_nodes_from( - [ - self._get_node_path(n.id, prefix) - for n in self.nodes.values() - if not isinstance(n, GraphInvocation) and not isinstance(n, IterateInvocation) - ] - ) - - # Expand graph nodes - for sgn in (gn for gn in self.nodes.values() if isinstance(gn, GraphInvocation)): - g = sgn.graph.nx_graph_flat(g, self._get_node_path(sgn.id, prefix)) + g.add_nodes_from([n.id for n in self.nodes.values() if not isinstance(n, IterateInvocation)]) # TODO: figure out if iteration nodes need to be expanded unique_edges = {(e.source.node_id, e.destination.node_id) for e in self.edges} - g.add_edges_from([(self._get_node_path(e[0], prefix), self._get_node_path(e[1], prefix)) for e in unique_edges]) + g.add_edges_from([(e[0], e[1]) for e in unique_edges]) return g @@ -827,9 +740,7 @@ class GraphExecutionState(BaseModel): ) # The results of executed nodes - results: dict[str, Annotated[InvocationOutputsUnion, Field(discriminator="type")]] = Field( - description="The results of node executions", default_factory=dict - ) + results: dict[str, BaseInvocationOutput] = Field(description="The results of node executions", default_factory=dict) # Errors raised when executing nodes errors: dict[str, str] = Field(description="Errors raised when executing nodes", default_factory=dict) @@ -846,27 +757,51 @@ class GraphExecutionState(BaseModel): default_factory=dict, ) + @field_validator("results", mode="plain") + @classmethod + def validate_results(cls, v: dict[str, BaseInvocationOutput]): + """Validates the results in the GES by retrieving a union of all output types and validating each result.""" + + # See the comment in `Graph.validate_nodes` for an explanation of this logic. + results: dict[str, BaseInvocationOutput] = {} + typeadapter = BaseInvocationOutput.get_typeadapter() + for result_id, result in v.items(): + results[result_id] = typeadapter.validate_python(result) + return results + @field_validator("graph") def graph_is_valid(cls, v: Graph): """Validates that the graph is valid""" v.validate_self() return v - model_config = ConfigDict( - json_schema_extra={ - "required": [ - "id", - "graph", - "execution_graph", - "executed", - "executed_history", - "results", - "errors", - "prepared_source_mapping", - "source_prepared_mapping", - ] - } - ) + @classmethod + def __get_pydantic_json_schema__(cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue: + # See the comment in `Graph.__get_pydantic_json_schema__` for an explanation of this logic. + class GraphExecutionState(BaseModel): + """Tracks the state of a graph execution""" + + id: str = Field(description="The id of the execution state") + graph: Graph = Field(description="The graph being executed") + execution_graph: Graph = Field(description="The expanded graph of activated and executed nodes") + executed: set[str] = Field(description="The set of node ids that have been executed") + executed_history: list[str] = Field( + description="The list of node ids that have been executed, in order of execution" + ) + results: dict[ + str, Annotated[Union[tuple(BaseInvocationOutput._output_classes)], Field(discriminator="type")] + ] = Field(description="The results of node executions") + errors: dict[str, str] = Field(description="Errors raised when executing nodes") + prepared_source_mapping: dict[str, str] = Field( + description="The map of prepared nodes to original graph nodes" + ) + source_prepared_mapping: dict[str, set[str]] = Field( + description="The map of original graph nodes to prepared nodes" + ) + + json_schema = handler(GraphExecutionState.__pydantic_core_schema__) + json_schema = handler.resolve_ref_schema(json_schema) + return json_schema def next(self) -> Optional[BaseInvocation]: """Gets the next node ready to execute.""" @@ -891,7 +826,7 @@ class GraphExecutionState(BaseModel): # If next is still none, there's no next node, return None return next_node - def complete(self, node_id: str, output: InvocationOutputsUnion): + def complete(self, node_id: str, output: BaseInvocationOutput) -> None: """Marks a node as complete""" if node_id not in self.execution_graph.nodes: @@ -922,17 +857,17 @@ class GraphExecutionState(BaseModel): """Returns true if the graph has any errors""" return len(self.errors) > 0 - def _create_execution_node(self, node_path: str, iteration_node_map: list[tuple[str, str]]) -> list[str]: + def _create_execution_node(self, node_id: str, iteration_node_map: list[tuple[str, str]]) -> list[str]: """Prepares an iteration node and connects all edges, returning the new node id""" - node = self.graph.get_node(node_path) + node = self.graph.get_node(node_id) self_iteration_count = -1 # If this is an iterator node, we must create a copy for each iteration if isinstance(node, IterateInvocation): # Get input collection edge (should error if there are no inputs) - input_collection_edge = next(iter(self.graph._get_input_edges(node_path, "collection"))) + input_collection_edge = next(iter(self.graph._get_input_edges(node_id, "collection"))) input_collection_prepared_node_id = next( n[1] for n in iteration_node_map if n[0] == input_collection_edge.source.node_id ) @@ -946,7 +881,7 @@ class GraphExecutionState(BaseModel): return new_nodes # Get all input edges - input_edges = self.graph._get_input_edges(node_path) + input_edges = self.graph._get_input_edges(node_id) # Create new edges for this iteration # For collect nodes, this may contain multiple inputs to the same field @@ -973,10 +908,10 @@ class GraphExecutionState(BaseModel): # Add to execution graph self.execution_graph.add_node(new_node) - self.prepared_source_mapping[new_node.id] = node_path - if node_path not in self.source_prepared_mapping: - self.source_prepared_mapping[node_path] = set() - self.source_prepared_mapping[node_path].add(new_node.id) + self.prepared_source_mapping[new_node.id] = node_id + if node_id not in self.source_prepared_mapping: + self.source_prepared_mapping[node_id] = set() + self.source_prepared_mapping[node_id].add(new_node.id) # Add new edges to execution graph for edge in new_edges: @@ -1080,13 +1015,13 @@ class GraphExecutionState(BaseModel): def _get_iteration_node( self, - source_node_path: str, + source_node_id: str, graph: nx.DiGraph, execution_graph: nx.DiGraph, prepared_iterator_nodes: list[str], ) -> Optional[str]: """Gets the prepared version of the specified source node that matches every iteration specified""" - prepared_nodes = self.source_prepared_mapping[source_node_path] + prepared_nodes = self.source_prepared_mapping[source_node_id] if len(prepared_nodes) == 1: return next(iter(prepared_nodes)) @@ -1097,7 +1032,7 @@ class GraphExecutionState(BaseModel): # Filter to only iterator nodes that are a parent of the specified node, in tuple format (prepared, source) iterator_source_node_mapping = [(n, self.prepared_source_mapping[n]) for n in prepared_iterator_nodes] - parent_iterators = [itn for itn in iterator_source_node_mapping if nx.has_path(graph, itn[1], source_node_path)] + parent_iterators = [itn for itn in iterator_source_node_mapping if nx.has_path(graph, itn[1], source_node_id)] return next( (n for n in prepared_nodes if all(nx.has_path(execution_graph, pit[0], n) for pit in parent_iterators)), @@ -1166,19 +1101,19 @@ class GraphExecutionState(BaseModel): def add_node(self, node: BaseInvocation) -> None: self.graph.add_node(node) - def update_node(self, node_path: str, new_node: BaseInvocation) -> None: - if not self._is_node_updatable(node_path): + def update_node(self, node_id: str, new_node: BaseInvocation) -> None: + if not self._is_node_updatable(node_id): raise NodeAlreadyExecutedError( - f"Node {node_path} has already been prepared or executed and cannot be updated" + f"Node {node_id} has already been prepared or executed and cannot be updated" ) - self.graph.update_node(node_path, new_node) + self.graph.update_node(node_id, new_node) - def delete_node(self, node_path: str) -> None: - if not self._is_node_updatable(node_path): + def delete_node(self, node_id: str) -> None: + if not self._is_node_updatable(node_id): raise NodeAlreadyExecutedError( - f"Node {node_path} has already been prepared or executed and cannot be deleted" + f"Node {node_id} has already been prepared or executed and cannot be deleted" ) - self.graph.delete_node(node_path) + self.graph.delete_node(node_id) def add_edge(self, edge: Edge) -> None: if not self._is_node_updatable(edge.destination.node_id): @@ -1193,63 +1128,3 @@ class GraphExecutionState(BaseModel): f"Destination node {edge.destination.node_id} has already been prepared or executed and cannot have a source edge deleted" ) self.graph.delete_edge(edge) - - -class ExposedNodeInput(BaseModel): - node_path: str = Field(description="The node path to the node with the input") - field: str = Field(description="The field name of the input") - alias: str = Field(description="The alias of the input") - - -class ExposedNodeOutput(BaseModel): - node_path: str = Field(description="The node path to the node with the output") - field: str = Field(description="The field name of the output") - alias: str = Field(description="The alias of the output") - - -class LibraryGraph(BaseModel): - id: str = Field(description="The unique identifier for this library graph", default_factory=uuid_string) - graph: Graph = Field(description="The graph") - name: str = Field(description="The name of the graph") - description: str = Field(description="The description of the graph") - exposed_inputs: list[ExposedNodeInput] = Field(description="The inputs exposed by this graph", default_factory=list) - exposed_outputs: list[ExposedNodeOutput] = Field( - description="The outputs exposed by this graph", default_factory=list - ) - - @field_validator("exposed_inputs", "exposed_outputs") - def validate_exposed_aliases(cls, v: list[Union[ExposedNodeInput, ExposedNodeOutput]]): - if len(v) != len({i.alias for i in v}): - raise ValueError("Duplicate exposed alias") - return v - - @model_validator(mode="after") - def validate_exposed_nodes(cls, values): - graph = values.graph - - # Validate exposed inputs - for exposed_input in values.exposed_inputs: - if not graph.has_node(exposed_input.node_path): - raise ValueError(f"Exposed input node {exposed_input.node_path} does not exist") - node = graph.get_node(exposed_input.node_path) - if get_input_field(node, exposed_input.field) is None: - raise ValueError( - f"Exposed input field {exposed_input.field} does not exist on node {exposed_input.node_path}" - ) - - # Validate exposed outputs - for exposed_output in values.exposed_outputs: - if not graph.has_node(exposed_output.node_path): - raise ValueError(f"Exposed output node {exposed_output.node_path} does not exist") - node = graph.get_node(exposed_output.node_path) - if get_output_field(node, exposed_output.field) is None: - raise ValueError( - f"Exposed output field {exposed_output.field} does not exist on node {exposed_output.node_path}" - ) - - return values - - -GraphInvocation.model_rebuild(force=True) -Graph.model_rebuild(force=True) -GraphExecutionState.model_rebuild(force=True) diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py new file mode 100644 index 0000000000..7d378e22e3 --- /dev/null +++ b/invokeai/app/services/shared/invocation_context.py @@ -0,0 +1,547 @@ +import threading +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Optional + +from PIL.Image import Image +from torch import Tensor + +from invokeai.app.invocations.constants import IMAGE_MODES +from invokeai.app.invocations.fields import MetadataField, WithBoard, WithMetadata +from invokeai.app.services.boards.boards_common import BoardDTO +from invokeai.app.services.config.config_default import InvokeAIAppConfig +from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin +from invokeai.app.services.images.images_common import ImageDTO +from invokeai.app.services.invocation_services import InvocationServices +from invokeai.app.util.step_callback import stable_diffusion_step_callback +from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType, SubModelType +from invokeai.backend.model_manager.load.load_base import LoadedModel +from invokeai.backend.model_manager.metadata.metadata_base import AnyModelRepoMetadata +from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState +from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData + +if TYPE_CHECKING: + from invokeai.app.invocations.baseinvocation import BaseInvocation + from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem + +""" +The InvocationContext provides access to various services and data about the current invocation. + +We do not provide the invocation services directly, as their methods are both dangerous and +inconvenient to use. + +For example: +- The `images` service allows nodes to delete or unsafely modify existing images. +- The `configuration` service allows nodes to change the app's config at runtime. +- The `events` service allows nodes to emit arbitrary events. + +Wrapping these services provides a simpler and safer interface for nodes to use. + +When a node executes, a fresh `InvocationContext` is built for it, ensuring nodes cannot interfere +with each other. + +Many of the wrappers have the same signature as the methods they wrap. This allows us to write +user-facing docstrings and not need to go and update the internal services to match. + +Note: The docstrings are in weird places, but that's where they must be to get IDEs to see them. +""" + + +@dataclass +class InvocationContextData: + queue_item: "SessionQueueItem" + """The queue item that is being executed.""" + invocation: "BaseInvocation" + """The invocation that is being executed.""" + source_invocation_id: str + """The ID of the invocation from which the currently executing invocation was prepared.""" + + +class InvocationContextInterface: + def __init__(self, services: InvocationServices, data: InvocationContextData) -> None: + self._services = services + self._data = data + + +class BoardsInterface(InvocationContextInterface): + def create(self, board_name: str) -> BoardDTO: + """Creates a board. + + Args: + board_name: The name of the board to create. + + Returns: + The created board DTO. + """ + return self._services.boards.create(board_name) + + def get_dto(self, board_id: str) -> BoardDTO: + """Gets a board DTO. + + Args: + board_id: The ID of the board to get. + + Returns: + The board DTO. + """ + return self._services.boards.get_dto(board_id) + + def get_all(self) -> list[BoardDTO]: + """Gets all boards. + + Returns: + A list of all boards. + """ + return self._services.boards.get_all() + + def add_image_to_board(self, board_id: str, image_name: str) -> None: + """Adds an image to a board. + + Args: + board_id: The ID of the board to add the image to. + image_name: The name of the image to add to the board. + """ + return self._services.board_images.add_image_to_board(board_id, image_name) + + def get_all_image_names_for_board(self, board_id: str) -> list[str]: + """Gets all image names for a board. + + Args: + board_id: The ID of the board to get the image names for. + + Returns: + A list of all image names for the board. + """ + return self._services.board_images.get_all_board_image_names_for_board(board_id) + + +class LoggerInterface(InvocationContextInterface): + def debug(self, message: str) -> None: + """Logs a debug message. + + Args: + message: The message to log. + """ + self._services.logger.debug(message) + + def info(self, message: str) -> None: + """Logs an info message. + + Args: + message: The message to log. + """ + self._services.logger.info(message) + + def warning(self, message: str) -> None: + """Logs a warning message. + + Args: + message: The message to log. + """ + self._services.logger.warning(message) + + def error(self, message: str) -> None: + """Logs an error message. + + Args: + message: The message to log. + """ + self._services.logger.error(message) + + +class ImagesInterface(InvocationContextInterface): + def save( + self, + image: Image, + board_id: Optional[str] = None, + image_category: ImageCategory = ImageCategory.GENERAL, + metadata: Optional[MetadataField] = None, + ) -> ImageDTO: + """Saves an image, returning its DTO. + + If the current queue item has a workflow or metadata, it is automatically saved with the image. + + Args: + image: The image to save, as a PIL image. + board_id: The board ID to add the image to, if it should be added. It the invocation \ + inherits from `WithBoard`, that board will be used automatically. **Use this only if \ + you want to override or provide a board manually!** + image_category: The category of the image. Only the GENERAL category is added \ + to the gallery. + metadata: The metadata to save with the image, if it should have any. If the \ + invocation inherits from `WithMetadata`, that metadata will be used automatically. \ + **Use this only if you want to override or provide metadata manually!** + + Returns: + The saved image DTO. + """ + + # If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None. + metadata_ = None + if metadata: + metadata_ = metadata + elif isinstance(self._data.invocation, WithMetadata): + metadata_ = self._data.invocation.metadata + + # If `board_id` is provided directly, use that. Else, use the board provided by `WithBoard`, falling back to None. + board_id_ = None + if board_id: + board_id_ = board_id + elif isinstance(self._data.invocation, WithBoard) and self._data.invocation.board: + board_id_ = self._data.invocation.board.board_id + + return self._services.images.create( + image=image, + is_intermediate=self._data.invocation.is_intermediate, + image_category=image_category, + board_id=board_id_, + metadata=metadata_, + image_origin=ResourceOrigin.INTERNAL, + workflow=self._data.queue_item.workflow, + session_id=self._data.queue_item.session_id, + node_id=self._data.invocation.id, + ) + + def get_pil(self, image_name: str, mode: IMAGE_MODES | None = None) -> Image: + """Gets an image as a PIL Image object. + + Args: + image_name: The name of the image to get. + mode: The color mode to convert the image to. If None, the original mode is used. + + Returns: + The image as a PIL Image object. + """ + image = self._services.images.get_pil_image(image_name) + if mode and mode != image.mode: + try: + image = image.convert(mode) + except ValueError: + self._services.logger.warning( + f"Could not convert image from {image.mode} to {mode}. Using original mode instead." + ) + return image + + def get_metadata(self, image_name: str) -> Optional[MetadataField]: + """Gets an image's metadata, if it has any. + + Args: + image_name: The name of the image to get the metadata for. + + Returns: + The image's metadata, if it has any. + """ + return self._services.images.get_metadata(image_name) + + def get_dto(self, image_name: str) -> ImageDTO: + """Gets an image as an ImageDTO object. + + Args: + image_name: The name of the image to get. + + Returns: + The image as an ImageDTO object. + """ + return self._services.images.get_dto(image_name) + + +class TensorsInterface(InvocationContextInterface): + def save(self, tensor: Tensor) -> str: + """Saves a tensor, returning its name. + + Args: + tensor: The tensor to save. + + Returns: + The name of the saved tensor. + """ + + name = self._services.tensors.save(obj=tensor) + return name + + def load(self, name: str) -> Tensor: + """Loads a tensor by name. + + Args: + name: The name of the tensor to load. + + Returns: + The loaded tensor. + """ + return self._services.tensors.load(name) + + +class ConditioningInterface(InvocationContextInterface): + def save(self, conditioning_data: ConditioningFieldData) -> str: + """Saves a conditioning data object, returning its name. + + Args: + conditioning_data: The conditioning data to save. + + Returns: + The name of the saved conditioning data. + """ + + name = self._services.conditioning.save(obj=conditioning_data) + return name + + def load(self, name: str) -> ConditioningFieldData: + """Loads conditioning data by name. + + Args: + name: The name of the conditioning data to load. + + Returns: + The loaded conditioning data. + """ + + return self._services.conditioning.load(name) + + +class ModelsInterface(InvocationContextInterface): + def exists(self, key: str) -> bool: + """Checks if a model exists. + + Args: + key: The key of the model. + + Returns: + True if the model exists, False if not. + """ + return self._services.model_manager.store.exists(key) + + def load(self, key: str, submodel_type: Optional[SubModelType] = None) -> LoadedModel: + """Loads a model. + + Args: + key: The key of the model. + submodel_type: The submodel of the model to get. + + Returns: + An object representing the loaded model. + """ + + # The model manager emits events as it loads the model. It needs the context data to build + # the event payloads. + + return self._services.model_manager.load_model_by_key( + key=key, submodel_type=submodel_type, context_data=self._data + ) + + def load_by_attrs( + self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None + ) -> LoadedModel: + """Loads a model by its attributes. + + Args: + name: Name of the model. + base: The models' base type, e.g. `BaseModelType.StableDiffusion1`, `BaseModelType.StableDiffusionXL`, etc. + type: Type of the model, e.g. `ModelType.Main`, `ModelType.Vae`, etc. + submodel_type: The type of submodel to load, e.g. `SubModelType.UNet`, `SubModelType.TextEncoder`, etc. Only main + models have submodels. + + Returns: + An object representing the loaded model. + """ + return self._services.model_manager.load_model_by_attr( + model_name=name, + base_model=base, + model_type=type, + submodel=submodel_type, + context_data=self._data, + ) + + def get_config(self, key: str) -> AnyModelConfig: + """Gets a model's config. + + Args: + key: The key of the model. + + Returns: + The model's config. + """ + return self._services.model_manager.store.get_model(key=key) + + def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]: + """Gets a model's metadata, if it has any. + + Args: + key: The key of the model. + + Returns: + The model's metadata, if it has any. + """ + return self._services.model_manager.store.get_metadata(key=key) + + def search_by_path(self, path: Path) -> list[AnyModelConfig]: + """Searches for models by path. + + Args: + path: The path to search for. + + Returns: + A list of models that match the path. + """ + return self._services.model_manager.store.search_by_path(path) + + def search_by_attrs( + self, + name: Optional[str] = None, + base: Optional[BaseModelType] = None, + type: Optional[ModelType] = None, + format: Optional[ModelFormat] = None, + ) -> list[AnyModelConfig]: + """Searches for models by attributes. + + Args: + name: The name to search for (exact match). + base: The base to search for, e.g. `BaseModelType.StableDiffusion1`, `BaseModelType.StableDiffusionXL`, etc. + type: Type type of model to search for, e.g. `ModelType.Main`, `ModelType.Vae`, etc. + format: The format of model to search for, e.g. `ModelFormat.Checkpoint`, `ModelFormat.Diffusers`, etc. + + Returns: + A list of models that match the attributes. + """ + + return self._services.model_manager.store.search_by_attr( + model_name=name, + base_model=base, + model_type=type, + model_format=format, + ) + + +class ConfigInterface(InvocationContextInterface): + def get(self) -> InvokeAIAppConfig: + """Gets the app's config. + + Returns: + The app's config. + """ + + return self._services.configuration.get_config() + + +class UtilInterface(InvocationContextInterface): + def __init__( + self, services: InvocationServices, data: InvocationContextData, cancel_event: threading.Event + ) -> None: + super().__init__(services, data) + self._cancel_event = cancel_event + + def is_canceled(self) -> bool: + """Checks if the current session has been canceled. + + Returns: + True if the current session has been canceled, False if not. + """ + return self._cancel_event.is_set() + + def sd_step_callback(self, intermediate_state: PipelineIntermediateState, base_model: BaseModelType) -> None: + """ + The step callback emits a progress event with the current step, the total number of + steps, a preview image, and some other internal metadata. + + This should be called after each denoising step. + + Args: + intermediate_state: The intermediate state of the diffusion pipeline. + base_model: The base model for the current denoising step. + """ + + stable_diffusion_step_callback( + context_data=self._data, + intermediate_state=intermediate_state, + base_model=base_model, + events=self._services.events, + is_canceled=self.is_canceled, + ) + + +class InvocationContext: + """Provides access to various services and data for the current invocation. + + Attributes: + images (ImagesInterface): Methods to save, get and update images and their metadata. + tensors (TensorsInterface): Methods to save and get tensors, including image, noise, masks, and masked images. + conditioning (ConditioningInterface): Methods to save and get conditioning data. + models (ModelsInterface): Methods to check if a model exists, get a model, and get a model's info. + logger (LoggerInterface): The app logger. + config (ConfigInterface): The app config. + util (UtilInterface): Utility methods, including a method to check if an invocation was canceled and step callbacks. + boards (BoardsInterface): Methods to interact with boards. + """ + + def __init__( + self, + images: ImagesInterface, + tensors: TensorsInterface, + conditioning: ConditioningInterface, + models: ModelsInterface, + logger: LoggerInterface, + config: ConfigInterface, + util: UtilInterface, + boards: BoardsInterface, + data: InvocationContextData, + services: InvocationServices, + ) -> None: + self.images = images + """Methods to save, get and update images and their metadata.""" + self.tensors = tensors + """Methods to save and get tensors, including image, noise, masks, and masked images.""" + self.conditioning = conditioning + """Methods to save and get conditioning data.""" + self.models = models + """Methods to check if a model exists, get a model, and get a model's info.""" + self.logger = logger + """The app logger.""" + self.config = config + """The app config.""" + self.util = util + """Utility methods, including a method to check if an invocation was canceled and step callbacks.""" + self.boards = boards + """Methods to interact with boards.""" + self._data = data + """An internal API providing access to data about the current queue item and invocation. You probably shouldn't use this. It may change without warning.""" + self._services = services + """An internal API providing access to all application services. You probably shouldn't use this. It may change without warning.""" + + +def build_invocation_context( + services: InvocationServices, + data: InvocationContextData, + cancel_event: threading.Event, +) -> InvocationContext: + """Builds the invocation context for a specific invocation execution. + + Args: + services: The invocation services to wrap. + data: The invocation context data. + + Returns: + The invocation context. + """ + + logger = LoggerInterface(services=services, data=data) + images = ImagesInterface(services=services, data=data) + tensors = TensorsInterface(services=services, data=data) + models = ModelsInterface(services=services, data=data) + config = ConfigInterface(services=services, data=data) + util = UtilInterface(services=services, data=data, cancel_event=cancel_event) + conditioning = ConditioningInterface(services=services, data=data) + boards = BoardsInterface(services=services, data=data) + + ctx = InvocationContext( + images=images, + logger=logger, + config=config, + tensors=tensors, + models=models, + data=data, + util=util, + conditioning=conditioning, + services=services, + boards=boards, + ) + + return ctx diff --git a/invokeai/app/services/shared/sqlite/sqlite_util.py b/invokeai/app/services/shared/sqlite/sqlite_util.py index 6079b3f08d..681886eacd 100644 --- a/invokeai/app/services/shared/sqlite/sqlite_util.py +++ b/invokeai/app/services/shared/sqlite/sqlite_util.py @@ -8,6 +8,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3 from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4 from invokeai.app.services.shared.sqlite_migrator.migrations.migration_5 import build_migration_5 +from invokeai.app.services.shared.sqlite_migrator.migrations.migration_6 import build_migration_6 from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator @@ -33,6 +34,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto migrator.register_migration(build_migration_3(app_config=config, logger=logger)) migrator.register_migration(build_migration_4()) migrator.register_migration(build_migration_5()) + migrator.register_migration(build_migration_6()) migrator.run_migrations() return db diff --git a/invokeai/app/services/shared/sqlite_migrator/migrations/migration_6.py b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_6.py new file mode 100644 index 0000000000..1f9ac56518 --- /dev/null +++ b/invokeai/app/services/shared/sqlite_migrator/migrations/migration_6.py @@ -0,0 +1,62 @@ +import sqlite3 + +from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration + + +class Migration6Callback: + def __call__(self, cursor: sqlite3.Cursor) -> None: + self._recreate_model_triggers(cursor) + self._delete_ip_adapters(cursor) + + def _recreate_model_triggers(self, cursor: sqlite3.Cursor) -> None: + """ + Adds the timestamp trigger to the model_config table. + + This trigger was inadvertently dropped in earlier migration scripts. + """ + + cursor.execute( + """--sql + CREATE TRIGGER IF NOT EXISTS model_config_updated_at + AFTER UPDATE + ON model_config FOR EACH ROW + BEGIN + UPDATE model_config SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW') + WHERE id = old.id; + END; + """ + ) + + def _delete_ip_adapters(self, cursor: sqlite3.Cursor) -> None: + """ + Delete all the IP adapters. + + The model manager will automatically find and re-add them after the migration + is done. This allows the manager to add the correct image encoder to their + configuration records. + """ + + cursor.execute( + """--sql + DELETE FROM model_config + WHERE type='ip_adapter'; + """ + ) + + +def build_migration_6() -> Migration: + """ + Build the migration from database version 5 to 6. + + This migration does the following: + - Adds the model_config_updated_at trigger if it does not exist + - Delete all ip_adapter models so that the model prober can find and + update with the correct image processor model. + """ + migration_6 = Migration( + from_version=5, + to_version=6, + callback=Migration6Callback(), + ) + + return migration_6 diff --git a/invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_common.py b/invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_common.py index 47ed5da505..9b2444dae4 100644 --- a/invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_common.py +++ b/invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_common.py @@ -17,8 +17,7 @@ class MigrateCallback(Protocol): See :class:`Migration` for an example. """ - def __call__(self, cursor: sqlite3.Cursor) -> None: - ... + def __call__(self, cursor: sqlite3.Cursor) -> None: ... class MigrationError(RuntimeError): diff --git a/invokeai/app/shared/fields.py b/invokeai/app/shared/fields.py deleted file mode 100644 index 3e841ffbf2..0000000000 --- a/invokeai/app/shared/fields.py +++ /dev/null @@ -1,67 +0,0 @@ -class FieldDescriptions: - denoising_start = "When to start denoising, expressed a percentage of total steps" - denoising_end = "When to stop denoising, expressed a percentage of total steps" - cfg_scale = "Classifier-Free Guidance scale" - cfg_rescale_multiplier = "Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR" - scheduler = "Scheduler to use during inference" - positive_cond = "Positive conditioning tensor" - negative_cond = "Negative conditioning tensor" - noise = "Noise tensor" - clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count" - unet = "UNet (scheduler, LoRAs)" - vae = "VAE" - cond = "Conditioning tensor" - controlnet_model = "ControlNet model to load" - vae_model = "VAE model to load" - lora_model = "LoRA model to load" - main_model = "Main model (UNet, VAE, CLIP) to load" - sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load" - sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load" - onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load" - lora_weight = "The weight at which the LoRA is applied to each model" - compel_prompt = "Prompt to be parsed by Compel to create a conditioning tensor" - raw_prompt = "Raw prompt text (no parsing)" - sdxl_aesthetic = "The aesthetic score to apply to the conditioning tensor" - skipped_layers = "Number of layers to skip in text encoder" - seed = "Seed for random number generation" - steps = "Number of steps to run" - width = "Width of output (px)" - height = "Height of output (px)" - control = "ControlNet(s) to apply" - ip_adapter = "IP-Adapter to apply" - t2i_adapter = "T2I-Adapter(s) to apply" - denoised_latents = "Denoised latents tensor" - latents = "Latents tensor" - strength = "Strength of denoising (proportional to steps)" - metadata = "Optional metadata to be saved with the image" - metadata_collection = "Collection of Metadata" - metadata_item_polymorphic = "A single metadata item or collection of metadata items" - metadata_item_label = "Label for this metadata item" - metadata_item_value = "The value for this metadata item (may be any type)" - workflow = "Optional workflow to be saved with the image" - interp_mode = "Interpolation mode" - torch_antialias = "Whether or not to apply antialiasing (bilinear or bicubic only)" - fp32 = "Whether or not to use full float32 precision" - precision = "Precision to use" - tiled = "Processing using overlapping tiles (reduce memory consumption)" - detect_res = "Pixel resolution for detection" - image_res = "Pixel resolution for output image" - safe_mode = "Whether or not to use safe mode" - scribble_mode = "Whether or not to use scribble mode" - scale_factor = "The factor by which to scale" - blend_alpha = ( - "Blending factor. 0.0 = use input A only, 1.0 = use input B only, 0.5 = 50% mix of input A and input B." - ) - num_1 = "The first number" - num_2 = "The second number" - mask = "The mask to use for the operation" - board = "The board to save the image to" - image = "The image to process" - tile_size = "Tile size" - inclusive_low = "The inclusive low value" - exclusive_high = "The exclusive high value" - decimal_places = "The number of decimal places to round to" - freeu_s1 = 'Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' - freeu_s2 = 'Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process.' - freeu_b1 = "Scaling factor for stage 1 to amplify the contributions of backbone features." - freeu_b2 = "Scaling factor for stage 2 to amplify the contributions of backbone features." diff --git a/invokeai/app/shared/models.py b/invokeai/app/shared/models.py index ed68cb287e..1a11b480cc 100644 --- a/invokeai/app/shared/models.py +++ b/invokeai/app/shared/models.py @@ -1,6 +1,6 @@ from pydantic import BaseModel, Field -from invokeai.app.shared.fields import FieldDescriptions +from invokeai.app.invocations.fields import FieldDescriptions class FreeUConfig(BaseModel): diff --git a/invokeai/app/util/misc.py b/invokeai/app/util/misc.py index 910b05d8dd..da431929db 100644 --- a/invokeai/app/util/misc.py +++ b/invokeai/app/util/misc.py @@ -5,7 +5,7 @@ import uuid import numpy as np -def get_timestamp(): +def get_timestamp() -> int: return int(datetime.datetime.now(datetime.timezone.utc).timestamp()) @@ -20,16 +20,16 @@ def get_datetime_from_iso_timestamp(iso_timestamp: str) -> datetime.datetime: SEED_MAX = np.iinfo(np.uint32).max -def get_random_seed(): +def get_random_seed() -> int: rng = np.random.default_rng(seed=None) return int(rng.integers(0, SEED_MAX)) -def uuid_string(): +def uuid_string() -> str: res = uuid.uuid4() return str(res) -def is_optional(value: typing.Any): +def is_optional(value: typing.Any) -> bool: """Checks if a value is typed as Optional. Note that Optional is sugar for Union[x, None].""" return typing.get_origin(value) is typing.Union and type(None) in typing.get_args(value) diff --git a/invokeai/app/util/step_callback.py b/invokeai/app/util/step_callback.py index f166206d52..8cb59f5b3a 100644 --- a/invokeai/app/util/step_callback.py +++ b/invokeai/app/util/step_callback.py @@ -1,12 +1,17 @@ +from typing import TYPE_CHECKING, Callable + import torch from PIL import Image -from invokeai.app.services.invocation_processor.invocation_processor_common import CanceledException, ProgressImage +from invokeai.app.services.session_processor.session_processor_common import CanceledException, ProgressImage +from invokeai.backend.model_manager.config import BaseModelType -from ...backend.model_management.models import BaseModelType from ...backend.stable_diffusion import PipelineIntermediateState from ...backend.util.util import image_to_dataURL -from ..invocations.baseinvocation import InvocationContext + +if TYPE_CHECKING: + from invokeai.app.services.events.events_base import EventServiceBase + from invokeai.app.services.shared.invocation_context import InvocationContextData def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix=None): @@ -25,13 +30,13 @@ def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix= def stable_diffusion_step_callback( - context: InvocationContext, + context_data: "InvocationContextData", intermediate_state: PipelineIntermediateState, - node: dict, - source_node_id: str, base_model: BaseModelType, -): - if context.services.queue.is_canceled(context.graph_execution_state_id): + events: "EventServiceBase", + is_canceled: Callable[[], bool], +) -> None: + if is_canceled(): raise CanceledException # Some schedulers report not only the noisy latents at the current timestep, @@ -108,13 +113,13 @@ def stable_diffusion_step_callback( dataURL = image_to_dataURL(image, image_format="JPEG") - context.services.events.emit_generator_progress( - queue_id=context.queue_id, - queue_item_id=context.queue_item_id, - queue_batch_id=context.queue_batch_id, - graph_execution_state_id=context.graph_execution_state_id, - node=node, - source_node_id=source_node_id, + events.emit_generator_progress( + queue_id=context_data.queue_item.queue_id, + queue_item_id=context_data.queue_item.item_id, + queue_batch_id=context_data.queue_item.batch_id, + graph_execution_state_id=context_data.queue_item.session_id, + node_id=context_data.invocation.id, + source_node_id=context_data.source_invocation_id, progress_image=ProgressImage(width=width, height=height, dataURL=dataURL), step=intermediate_state.step, order=intermediate_state.order, diff --git a/invokeai/app/util/ti_utils.py b/invokeai/app/util/ti_utils.py index a66a832b42..d204a40183 100644 --- a/invokeai/app/util/ti_utils.py +++ b/invokeai/app/util/ti_utils.py @@ -1,8 +1,47 @@ import re +from typing import List, Tuple + +import invokeai.backend.util.logging as logger +from invokeai.app.services.model_records import UnknownModelException +from invokeai.app.services.shared.invocation_context import InvocationContext +from invokeai.backend.model_manager.config import BaseModelType, ModelType +from invokeai.backend.textual_inversion import TextualInversionModelRaw -def extract_ti_triggers_from_prompt(prompt: str) -> list[str]: - ti_triggers = [] +def extract_ti_triggers_from_prompt(prompt: str) -> List[str]: + ti_triggers: List[str] = [] for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt): - ti_triggers.append(trigger) + ti_triggers.append(str(trigger)) return ti_triggers + + +def generate_ti_list( + prompt: str, base: BaseModelType, context: InvocationContext +) -> List[Tuple[str, TextualInversionModelRaw]]: + ti_list: List[Tuple[str, TextualInversionModelRaw]] = [] + for trigger in extract_ti_triggers_from_prompt(prompt): + name_or_key = trigger[1:-1] + try: + loaded_model = context.models.load(key=name_or_key) + model = loaded_model.model + assert isinstance(model, TextualInversionModelRaw) + assert loaded_model.config.base == base + ti_list.append((name_or_key, model)) + except UnknownModelException: + try: + loaded_model = context.models.load_by_attrs( + name=name_or_key, base=base, type=ModelType.TextualInversion + ) + model = loaded_model.model + assert isinstance(model, TextualInversionModelRaw) + assert loaded_model.config.base == base + ti_list.append((name_or_key, model)) + except UnknownModelException: + pass + except ValueError: + logger.warning(f'trigger: "{trigger}" more than one similarly-named textual inversion models') + except AssertionError: + logger.warning(f'trigger: "{trigger}" not a valid textual inversion model for this graph') + except Exception: + logger.warning(f'Failed to load TI model for trigger: "{trigger}"') + return ti_list diff --git a/invokeai/backend/__init__.py b/invokeai/backend/__init__.py index ae9a12edbe..9fe97ee525 100644 --- a/invokeai/backend/__init__.py +++ b/invokeai/backend/__init__.py @@ -1,5 +1,3 @@ """ Initialization file for invokeai.backend """ -from .model_management import BaseModelType, ModelCache, ModelInfo, ModelManager, ModelType, SubModelType # noqa: F401 -from .model_management.models import SilenceWarnings # noqa: F401 diff --git a/invokeai/backend/embeddings/__init__.py b/invokeai/backend/embeddings/__init__.py new file mode 100644 index 0000000000..46ead533c4 --- /dev/null +++ b/invokeai/backend/embeddings/__init__.py @@ -0,0 +1,4 @@ +"""Initialization file for invokeai.backend.embeddings modules.""" + +# from .model_patcher import ModelPatcher +# __all__ = ["ModelPatcher"] diff --git a/invokeai/backend/embeddings/embedding_base.py b/invokeai/backend/embeddings/embedding_base.py new file mode 100644 index 0000000000..5e752a29e1 --- /dev/null +++ b/invokeai/backend/embeddings/embedding_base.py @@ -0,0 +1,12 @@ +"""Base class for LoRA and Textual Inversion models. + +The EmbeddingRaw class is the base class of LoRAModelRaw and TextualInversionModelRaw, +and is used for type checking of calls to the model patcher. + +The use of "Raw" here is a historical artifact, and carried forward in +order to avoid confusion. +""" + + +class EmbeddingModelRaw: + """Base class for LoRA and Textual Inversion models.""" diff --git a/invokeai/backend/image_util/__init__.py b/invokeai/backend/image_util/__init__.py index 0be5a78a93..ea35a5f153 100644 --- a/invokeai/backend/image_util/__init__.py +++ b/invokeai/backend/image_util/__init__.py @@ -1,6 +1,7 @@ """ Initialization file for invokeai.backend.image_util methods. """ + from .patchmatch import PatchMatch # noqa: F401 from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401 from .seamless import configure_model_padding # noqa: F401 diff --git a/invokeai/backend/image_util/invisible_watermark.py b/invokeai/backend/image_util/invisible_watermark.py index 37b3ca918c..b21cc2e124 100644 --- a/invokeai/backend/image_util/invisible_watermark.py +++ b/invokeai/backend/image_util/invisible_watermark.py @@ -3,6 +3,7 @@ This module defines a singleton object, "invisible_watermark" that wraps the invisible watermark model. It respects the global "invisible_watermark" configuration variable, that allows the watermarking to be supressed. """ + import cv2 import numpy as np from imwatermark import WatermarkEncoder diff --git a/invokeai/backend/image_util/patchmatch.py b/invokeai/backend/image_util/patchmatch.py index 5514e3d33a..08f3f4ffff 100644 --- a/invokeai/backend/image_util/patchmatch.py +++ b/invokeai/backend/image_util/patchmatch.py @@ -4,6 +4,7 @@ wraps the actual patchmatch object. It respects the global "try_patchmatch" attribute, so that patchmatch loading can be suppressed or deferred """ + import numpy as np import invokeai.backend.util.logging as logger diff --git a/invokeai/backend/image_util/pngwriter.py b/invokeai/backend/image_util/pngwriter.py index c9c58264c2..f537b4681c 100644 --- a/invokeai/backend/image_util/pngwriter.py +++ b/invokeai/backend/image_util/pngwriter.py @@ -6,6 +6,7 @@ PngWriter -- Converts Images generated by T2I into PNGs, finds Exports function retrieve_metadata(path) """ + import json import os import re diff --git a/invokeai/backend/image_util/safety_checker.py b/invokeai/backend/image_util/safety_checker.py index b9649925e1..b92a73c24f 100644 --- a/invokeai/backend/image_util/safety_checker.py +++ b/invokeai/backend/image_util/safety_checker.py @@ -3,13 +3,14 @@ This module defines a singleton object, "safety_checker" that wraps the safety_checker model. It respects the global "nsfw_checker" configuration variable, that allows the checker to be supressed. """ + import numpy as np from PIL import Image import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend import SilenceWarnings from invokeai.backend.util.devices import choose_torch_device +from invokeai.backend.util.silence_warnings import SilenceWarnings config = InvokeAIAppConfig.get_config() diff --git a/invokeai/backend/install/check_root.py b/invokeai/backend/install/check_root.py index ee264016b4..2b2116993b 100644 --- a/invokeai/backend/install/check_root.py +++ b/invokeai/backend/install/check_root.py @@ -1,6 +1,7 @@ """ Check that the invokeai_root is correctly configured and exit if not. """ + import sys from invokeai.app.services.config import InvokeAIAppConfig @@ -8,7 +9,6 @@ from invokeai.app.services.config import InvokeAIAppConfig def check_invokeai_root(config: InvokeAIAppConfig): try: - assert config.model_conf_path.exists(), f"{config.model_conf_path} not found" assert config.db_path.parent.exists(), f"{config.db_path.parent} not found" assert config.models_path.exists(), f"{config.models_path} not found" if not config.ignore_missing_core_models: diff --git a/invokeai/backend/install/install_helper.py b/invokeai/backend/install/install_helper.py index e54be527d9..975088409c 100644 --- a/invokeai/backend/install/install_helper.py +++ b/invokeai/backend/install/install_helper.py @@ -1,14 +1,12 @@ """Utility (backend) functions used by model_install.py""" -import re + from logging import Logger from pathlib import Path from typing import Any, Dict, List, Optional import omegaconf -from huggingface_hub import HfFolder from pydantic import BaseModel, Field from pydantic.dataclasses import dataclass -from pydantic.networks import AnyHttpUrl from requests import HTTPError from tqdm import tqdm @@ -18,13 +16,10 @@ from invokeai.app.services.download import DownloadQueueService from invokeai.app.services.events.events_base import EventServiceBase from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage from invokeai.app.services.model_install import ( - HFModelSource, - LocalModelSource, ModelInstallService, ModelInstallServiceBase, - ModelSource, - URLModelSource, ) +from invokeai.app.services.model_metadata import ModelMetadataStoreSQL from invokeai.app.services.model_records import ModelRecordServiceBase, ModelRecordServiceSQL from invokeai.app.services.shared.sqlite.sqlite_util import init_db from invokeai.backend.model_manager import ( @@ -36,7 +31,7 @@ from invokeai.backend.model_manager.metadata import UnknownMetadataException from invokeai.backend.util.logging import InvokeAILogger # name of the starter models file -INITIAL_MODELS = "INITIAL_MODELS2.yaml" +INITIAL_MODELS = "INITIAL_MODELS.yaml" def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordServiceBase: @@ -44,7 +39,7 @@ def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordService logger = InvokeAILogger.get_logger(config=app_config) image_files = DiskImageFileStorage(f"{app_config.output_path}/images") db = init_db(config=app_config, logger=logger, image_files=image_files) - obj: ModelRecordServiceBase = ModelRecordServiceSQL(db) + obj: ModelRecordServiceBase = ModelRecordServiceSQL(db, ModelMetadataStoreSQL(db)) return obj @@ -53,12 +48,10 @@ def initialize_installer( ) -> ModelInstallServiceBase: """Return an initialized ModelInstallService object.""" record_store = initialize_record_store(app_config) - metadata_store = record_store.metadata_store download_queue = DownloadQueueService() installer = ModelInstallService( app_config=app_config, record_store=record_store, - metadata_store=metadata_store, download_queue=download_queue, event_bus=event_bus, ) @@ -98,11 +91,13 @@ class TqdmEventService(EventServiceBase): super().__init__() self._bars: Dict[str, tqdm] = {} self._last: Dict[str, int] = {} + self._logger = InvokeAILogger.get_logger(__name__) def dispatch(self, event_name: str, payload: Any) -> None: """Dispatch an event by appending it to self.events.""" + data = payload["data"] + source = data["source"] if payload["event"] == "model_install_downloading": - data = payload["data"] dest = data["local_path"] total_bytes = data["total_bytes"] bytes = data["bytes"] @@ -111,6 +106,12 @@ class TqdmEventService(EventServiceBase): self._last[dest] = 0 self._bars[dest].update(bytes - self._last[dest]) self._last[dest] = bytes + elif payload["event"] == "model_install_completed": + self._logger.info(f"{source}: installed successfully.") + elif payload["event"] == "model_install_error": + self._logger.warning(f"{source}: installation failed with error {data['error']}") + elif payload["event"] == "model_install_cancelled": + self._logger.warning(f"{source}: installation cancelled") class InstallHelper(object): @@ -218,29 +219,13 @@ class InstallHelper(object): additional_models.append(reverse_source[requirement]) model_list.extend(additional_models) - def _make_install_source(self, model_info: UnifiedModelInfo) -> ModelSource: - assert model_info.source - model_path_id_or_url = model_info.source.strip("\"' ") - model_path = Path(model_path_id_or_url) - - if model_path.exists(): # local file on disk - return LocalModelSource(path=model_path.absolute(), inplace=True) - if re.match(r"^[^/]+/[^/]+$", model_path_id_or_url): # hugging face repo_id - return HFModelSource( - repo_id=model_path_id_or_url, - access_token=HfFolder.get_token(), - subfolder=model_info.subfolder, - ) - if re.match(r"^(http|https):", model_path_id_or_url): - return URLModelSource(url=AnyHttpUrl(model_path_id_or_url)) - raise ValueError(f"Unsupported model source: {model_path_id_or_url}") - def add_or_delete(self, selections: InstallSelections) -> None: """Add or delete selected models.""" installer = self._installer self._add_required_models(selections.install_models) for model in selections.install_models: - source = self._make_install_source(model) + assert model.source + model_path_id_or_url = model.source.strip("\"' ") config = ( { "description": model.description, @@ -251,12 +236,12 @@ class InstallHelper(object): ) try: - installer.import_model( - source=source, + installer.heuristic_import( + source=model_path_id_or_url, config=config, ) except (UnknownMetadataException, InvalidModelConfigException, HTTPError, OSError) as e: - self._logger.warning(f"{source}: {e}") + self._logger.warning(f"{model.source}: {e}") for model_to_remove in selections.remove_models: parts = model_to_remove.split("/") @@ -270,12 +255,14 @@ class InstallHelper(object): model_name=model_name, ) if len(matches) > 1: - print(f"{model} is ambiguous. Please use model_type:model_name (e.g. main:my_model) to disambiguate.") + self._logger.error( + "{model_to_remove} is ambiguous. Please use model_base/model_type/model_name (e.g. sd-1/main/my_model) to disambiguate" + ) elif not matches: - print(f"{model}: unknown model") + self._logger.error(f"{model_to_remove}: unknown model") else: for m in matches: - print(f"Deleting {m.type}:{m.name}") + self._logger.info(f"Deleting {m.type}:{m.name}") installer.delete(m.key) installer.wait_for_installs() diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 3cb7db6c82..53cca64a1a 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -18,31 +18,30 @@ from argparse import Namespace from enum import Enum from pathlib import Path from shutil import get_terminal_size -from typing import Any, get_args, get_type_hints +from typing import Any, Optional, Set, Tuple, Type, get_args, get_type_hints from urllib import request import npyscreen -import omegaconf import psutil import torch import transformers -import yaml -from diffusers import AutoencoderKL +from diffusers import AutoencoderKL, ModelMixin from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from huggingface_hub import HfFolder from huggingface_hub import login as hf_hub_login -from omegaconf import OmegaConf -from pydantic import ValidationError +from omegaconf import DictConfig, OmegaConf +from pydantic.error_wrappers import ValidationError from tqdm import tqdm from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer import invokeai.configs as configs from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.backend.install.install_helper import InstallHelper, InstallSelections from invokeai.backend.install.legacy_arg_parsing import legacy_parser -from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained -from invokeai.backend.model_management.model_probe import BaseModelType, ModelType +from invokeai.backend.model_manager import BaseModelType, ModelType +from invokeai.backend.util import choose_precision, choose_torch_device from invokeai.backend.util.logging import InvokeAILogger -from invokeai.frontend.install.model_install import addModelsForm, process_and_execute +from invokeai.frontend.install.model_install import addModelsForm # TO DO - Move all the frontend code into invokeai.frontend.install from invokeai.frontend.install.widgets import ( @@ -61,7 +60,7 @@ warnings.filterwarnings("ignore") transformers.logging.set_verbosity_error() -def get_literal_fields(field) -> list[Any]: +def get_literal_fields(field: str) -> Tuple[Any]: return get_args(get_type_hints(InvokeAIAppConfig).get(field)) @@ -80,12 +79,13 @@ ATTENTION_SLICE_CHOICES = get_literal_fields("attention_slice_size") GENERATION_OPT_CHOICES = ["sequential_guidance", "force_tiled_decode", "lazy_offload"] GB = 1073741824 # GB in bytes HAS_CUDA = torch.cuda.is_available() -_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0) - +_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0.0, 0.0) MAX_VRAM /= GB MAX_RAM = psutil.virtual_memory().total / GB +FORCE_FULL_PRECISION = False + INIT_FILE_PREAMBLE = """# InvokeAI initialization file # This is the InvokeAI initialization file, which contains command-line default values. # Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting @@ -96,13 +96,15 @@ logger = InvokeAILogger.get_logger() class DummyWidgetValue(Enum): + """Dummy widget values.""" + zero = 0 true = True false = False # -------------------------------------------- -def postscript(errors: None): +def postscript(errors: Set[str]) -> None: if not any(errors): message = f""" ** INVOKEAI INSTALLATION SUCCESSFUL ** @@ -112,9 +114,6 @@ then run one of the following commands to start InvokeAI. Web UI: invokeai-web -Command-line client: - invokeai - If you installed using an installation script, run: {config.root_path}/invoke.{"bat" if sys.platform == "win32" else "sh"} @@ -143,7 +142,7 @@ def yes_or_no(prompt: str, default_yes=True): # --------------------------------------------- -def HfLogin(access_token) -> str: +def HfLogin(access_token) -> None: """ Helper for logging in to Huggingface The stdout capture is needed to hide the irrelevant "git credential helper" warning @@ -162,7 +161,7 @@ def HfLogin(access_token) -> str: # ------------------------------------- class ProgressBar: - def __init__(self, model_name="file"): + def __init__(self, model_name: str = "file"): self.pbar = None self.name = model_name @@ -179,6 +178,22 @@ class ProgressBar: self.pbar.update(block_size) +# --------------------------------------------- +def hf_download_from_pretrained(model_class: Type[ModelMixin], model_name: str, destination: Path, **kwargs: Any): + filter = lambda x: "fp16 is not a valid" not in x.getMessage() # noqa E731 + logger.addFilter(filter) + try: + model = model_class.from_pretrained( + model_name, + resume_download=True, + **kwargs, + ) + model.save_pretrained(destination, safe_serialization=True) + finally: + logger.removeFilter(filter) + return destination + + # --------------------------------------------- def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"): try: @@ -249,6 +264,7 @@ def download_conversion_models(): # --------------------------------------------- +# TO DO: use the download queue here. def download_realesrgan(): logger.info("Installing ESRGAN Upscaling models...") URLs = [ @@ -288,18 +304,19 @@ def download_lama(): # --------------------------------------------- -def download_support_models(): +def download_support_models() -> None: download_realesrgan() download_lama() download_conversion_models() # ------------------------------------- -def get_root(root: str = None) -> str: +def get_root(root: Optional[str] = None) -> str: if root: return root - elif os.environ.get("INVOKEAI_ROOT"): - return os.environ.get("INVOKEAI_ROOT") + elif root := os.environ.get("INVOKEAI_ROOT"): + assert root is not None + return root else: return str(config.root_path) @@ -390,7 +407,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle. begin_entry_at=3, max_height=2, relx=30, - max_width=56, + max_width=80, scroll_exit=True, ) self.add_widget_intelligent( @@ -455,6 +472,25 @@ Use cursor arrows to make a checkbox selection, and space to toggle. max_width=110, scroll_exit=True, ) + self.add_widget_intelligent( + npyscreen.TitleFixedText, + name="Model disk conversion cache size (GB). This is used to cache safetensors files that need to be converted to diffusers..", + begin_entry_at=0, + editable=False, + color="CONTROL", + scroll_exit=True, + ) + self.nextrely -= 1 + self.disk = self.add_widget_intelligent( + npyscreen.Slider, + value=clip(old_opts.convert_cache, range=(0, 100), step=0.5), + out_of=100, + lowest=0.0, + step=0.5, + relx=8, + scroll_exit=True, + ) + self.nextrely += 1 self.add_widget_intelligent( npyscreen.TitleFixedText, name="Model RAM cache size (GB). Make this at least large enough to hold a single full model (2GB for SD-1, 6GB for SDXL).", @@ -495,6 +531,14 @@ Use cursor arrows to make a checkbox selection, and space to toggle. ) else: self.vram = DummyWidgetValue.zero + + self.nextrely += 1 + self.add_widget_intelligent( + npyscreen.FixedText, + value="Location of the database used to store model path and configuration information:", + editable=False, + color="CONTROL", + ) self.nextrely += 1 self.outdir = self.add_widget_intelligent( FileBox, @@ -506,19 +550,21 @@ Use cursor arrows to make a checkbox selection, and space to toggle. labelColor="GOOD", begin_entry_at=40, max_height=3, + max_width=127, scroll_exit=True, ) self.autoimport_dirs = {} self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent( FileBox, - name="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models", - value=str(config.root_path / config.autoimport_dir), + name="Optional folder to scan for new checkpoints, ControlNets, LoRAs and TI models", + value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else "", select_dir=True, must_exist=False, use_two_lines=False, labelColor="GOOD", begin_entry_at=32, max_height=3, + max_width=127, scroll_exit=True, ) self.nextrely += 1 @@ -555,6 +601,10 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS self.attention_slice_label.hidden = not show self.attention_slice_size.hidden = not show + def show_hide_model_conf_override(self, value): + self.model_conf_override.hidden = value + self.model_conf_override.display() + def on_ok(self): options = self.marshall_arguments() if self.validate_field_values(options): @@ -584,18 +634,21 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS else: return True - def marshall_arguments(self): + def marshall_arguments(self) -> Namespace: new_opts = Namespace() for attr in [ "ram", "vram", + "convert_cache", "outdir", ]: if hasattr(self, attr): setattr(new_opts, attr, getattr(self, attr).value) for attr in self.autoimport_dirs: + if not self.autoimport_dirs[attr].value: + continue directory = Path(self.autoimport_dirs[attr].value) if directory.is_relative_to(config.root_path): directory = directory.relative_to(config.root_path) @@ -610,18 +663,18 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS generation_options = [GENERATION_OPT_CHOICES[x] for x in self.generation_options.value] for v in GENERATION_OPT_CHOICES: setattr(new_opts, v, v in generation_options) - return new_opts class EditOptApplication(npyscreen.NPSAppManaged): - def __init__(self, program_opts: Namespace, invokeai_opts: Namespace): + def __init__(self, program_opts: Namespace, invokeai_opts: InvokeAIAppConfig, install_helper: InstallHelper): super().__init__() self.program_opts = program_opts self.invokeai_opts = invokeai_opts self.user_cancelled = False self.autoload_pending = True - self.install_selections = default_user_selections(program_opts) + self.install_helper = install_helper + self.install_selections = default_user_selections(program_opts, install_helper) def onStart(self): npyscreen.setTheme(npyscreen.Themes.DefaultTheme) @@ -640,15 +693,6 @@ class EditOptApplication(npyscreen.NPSAppManaged): cycle_widgets=False, ) - def new_opts(self): - return self.options.marshall_arguments() - - -def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Namespace: - editApp = EditOptApplication(program_opts, invokeai_opts) - editApp.run() - return editApp.new_opts() - def default_ramcache() -> float: """Run a heuristic for the default RAM cache based on installed RAM.""" @@ -660,27 +704,19 @@ def default_ramcache() -> float: ) # 2.1 is just large enough for sd 1.5 ;-) -def default_startup_options(init_file: Path) -> Namespace: +def default_startup_options(init_file: Path) -> InvokeAIAppConfig: opts = InvokeAIAppConfig.get_config() - opts.ram = opts.ram or default_ramcache() + opts.ram = default_ramcache() + opts.precision = "float32" if FORCE_FULL_PRECISION else choose_precision(torch.device(choose_torch_device())) return opts -def default_user_selections(program_opts: Namespace) -> InstallSelections: - try: - installer = ModelInstall(config) - except omegaconf.errors.ConfigKeyError: - logger.warning("Your models.yaml file is corrupt or out of date. Reinitializing") - initialize_rootdir(config.root_path, True) - installer = ModelInstall(config) - - models = installer.all_models() +def default_user_selections(program_opts: Namespace, install_helper: InstallHelper) -> InstallSelections: + default_model = install_helper.default_model() + assert default_model is not None + default_models = [default_model] if program_opts.default_only else install_helper.recommended_models() return InstallSelections( - install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id] - if program_opts.default_only - else [models[x].path or models[x].repo_id for x in installer.recommended_models()] - if program_opts.yes_to_all - else [], + install_models=default_models if program_opts.yes_to_all else [], ) @@ -716,22 +752,12 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): path.mkdir(parents=True, exist_ok=True) -def maybe_create_models_yaml(root: Path): - models_yaml = root / "configs" / "models.yaml" - if models_yaml.exists(): - if OmegaConf.load(models_yaml).get("__metadata__"): # up to date - return - else: - logger.info("Creating new models.yaml, original saved as models.yaml.orig") - models_yaml.rename(models_yaml.parent / "models.yaml.orig") - - with open(models_yaml, "w") as yaml_file: - yaml_file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}})) - - # ------------------------------------- -def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace): - invokeai_opts = default_startup_options(initfile) +def run_console_ui( + program_opts: Namespace, initfile: Path, install_helper: InstallHelper +) -> Tuple[Optional[Namespace], Optional[InstallSelections]]: + first_time = not (config.root_path / "invokeai.yaml").exists() + invokeai_opts = default_startup_options(initfile) if first_time else config invokeai_opts.root = program_opts.root if not set_min_terminal_size(MIN_COLS, MIN_LINES): @@ -739,13 +765,7 @@ def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace "Could not increase terminal size. Try running again with a larger window or smaller font size." ) - # the install-models application spawns a subprocess to install - # models, and will crash unless this is set before running. - import torch - - torch.multiprocessing.set_start_method("spawn") - - editApp = EditOptApplication(program_opts, invokeai_opts) + editApp = EditOptApplication(program_opts, invokeai_opts, install_helper) editApp.run() if editApp.user_cancelled: return (None, None) @@ -754,7 +774,7 @@ def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace # ------------------------------------- -def write_opts(opts: Namespace, init_file: Path): +def write_opts(opts: InvokeAIAppConfig, init_file: Path) -> None: """ Update the invokeai.yaml file with values from current settings. """ @@ -762,7 +782,7 @@ def write_opts(opts: Namespace, init_file: Path): new_config = InvokeAIAppConfig.get_config() new_config.root = config.root - for key, value in opts.__dict__.items(): + for key, value in vars(opts).items(): if hasattr(new_config, key): setattr(new_config, key, value) @@ -779,7 +799,7 @@ def default_output_dir() -> Path: # ------------------------------------- -def write_default_options(program_opts: Namespace, initfile: Path): +def write_default_options(program_opts: Namespace, initfile: Path) -> None: opt = default_startup_options(initfile) write_opts(opt, initfile) @@ -789,16 +809,11 @@ def write_default_options(program_opts: Namespace, initfile: Path): # the legacy Args object in order to parse # the old init file and write out the new # yaml format. -def migrate_init_file(legacy_format: Path): +def migrate_init_file(legacy_format: Path) -> None: old = legacy_parser.parse_args([f"@{str(legacy_format)}"]) new = InvokeAIAppConfig.get_config() - fields = [ - x - for x, y in InvokeAIAppConfig.model_fields.items() - if (y.json_schema_extra.get("category", None) if y.json_schema_extra else None) != "DEPRECATED" - ] - for attr in fields: + for attr in InvokeAIAppConfig.model_fields.keys(): if hasattr(old, attr): try: setattr(new, attr, getattr(old, attr)) @@ -819,7 +834,7 @@ def migrate_init_file(legacy_format: Path): # ------------------------------------- -def migrate_models(root: Path): +def migrate_models(root: Path) -> None: from invokeai.backend.install.migrate_to_3 import do_migrate do_migrate(root, root) @@ -838,7 +853,9 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool: ): logger.info("** Migrating invokeai.init to invokeai.yaml") migrate_init_file(old_init_file) - config.parse_args(argv=[], conf=OmegaConf.load(new_init_file)) + omegaconf = OmegaConf.load(new_init_file) + assert isinstance(omegaconf, DictConfig) + config.parse_args(argv=[], conf=omegaconf) if old_hub.exists(): migrate_models(config.root_path) @@ -850,6 +867,7 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool: # ------------------------------------- def main() -> None: + global FORCE_FULL_PRECISION # FIXME parser = argparse.ArgumentParser(description="InvokeAI model downloader") parser.add_argument( "--skip-sd-weights", @@ -901,7 +919,6 @@ def main() -> None: help="path to root of install directory", ) opt = parser.parse_args() - invoke_args = [] if opt.root: invoke_args.extend(["--root", opt.root]) @@ -911,6 +928,7 @@ def main() -> None: logger = InvokeAILogger().get_logger(config=config) errors = set() + FORCE_FULL_PRECISION = opt.full_precision # FIXME global try: # if we do a root migration/upgrade, then we are keeping previous @@ -921,14 +939,18 @@ def main() -> None: # run this unconditionally in case new directories need to be added initialize_rootdir(config.root_path, opt.yes_to_all) - models_to_download = default_user_selections(opt) + # this will initialize and populate the models tables if not present + install_helper = InstallHelper(config, logger) + + models_to_download = default_user_selections(opt, install_helper) new_init_file = config.root_path / "invokeai.yaml" if opt.yes_to_all: write_default_options(opt, new_init_file) init_options = Namespace(precision="float32" if opt.full_precision else "float16") + else: - init_options, models_to_download = run_console_ui(opt, new_init_file) + init_options, models_to_download = run_console_ui(opt, new_init_file, install_helper) if init_options: write_opts(init_options, new_init_file) else: @@ -943,10 +965,12 @@ def main() -> None: if opt.skip_sd_weights: logger.warning("Skipping diffusion weights download per user request") + elif models_to_download: - process_and_execute(opt, models_to_download) + install_helper.add_or_delete(models_to_download) postscript(errors=errors) + if not opt.yes_to_all: input("Press any key to continue...") except WindowTooSmallException as e: diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py deleted file mode 100644 index e15eb23f5b..0000000000 --- a/invokeai/backend/install/migrate_to_3.py +++ /dev/null @@ -1,591 +0,0 @@ -""" -Migrate the models directory and models.yaml file from an existing -InvokeAI 2.3 installation to 3.0.0. -""" - -import argparse -import os -import shutil -import warnings -from dataclasses import dataclass -from pathlib import Path -from typing import Union - -import diffusers -import transformers -import yaml -from diffusers import AutoencoderKL, StableDiffusionPipeline -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from omegaconf import DictConfig, OmegaConf -from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_management import ModelManager -from invokeai.backend.model_management.model_probe import BaseModelType, ModelProbe, ModelProbeInfo, ModelType - -warnings.filterwarnings("ignore") -transformers.logging.set_verbosity_error() -diffusers.logging.set_verbosity_error() - - -# holder for paths that we will migrate -@dataclass -class ModelPaths: - models: Path - embeddings: Path - loras: Path - controlnets: Path - - -class MigrateTo3(object): - def __init__( - self, - from_root: Path, - to_models: Path, - model_manager: ModelManager, - src_paths: ModelPaths, - ): - self.root_directory = from_root - self.dest_models = to_models - self.mgr = model_manager - self.src_paths = src_paths - - @classmethod - def initialize_yaml(cls, yaml_file: Path): - with open(yaml_file, "w") as file: - file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}})) - - def create_directory_structure(self): - """ - Create the basic directory structure for the models folder. - """ - for model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]: - for model_type in [ - ModelType.Main, - ModelType.Vae, - ModelType.Lora, - ModelType.ControlNet, - ModelType.TextualInversion, - ]: - path = self.dest_models / model_base.value / model_type.value - path.mkdir(parents=True, exist_ok=True) - path = self.dest_models / "core" - path.mkdir(parents=True, exist_ok=True) - - @staticmethod - def copy_file(src: Path, dest: Path): - """ - copy a single file with logging - """ - if dest.exists(): - logger.info(f"Skipping existing {str(dest)}") - return - logger.info(f"Copying {str(src)} to {str(dest)}") - try: - shutil.copy(src, dest) - except Exception as e: - logger.error(f"COPY FAILED: {str(e)}") - - @staticmethod - def copy_dir(src: Path, dest: Path): - """ - Recursively copy a directory with logging - """ - if dest.exists(): - logger.info(f"Skipping existing {str(dest)}") - return - - logger.info(f"Copying {str(src)} to {str(dest)}") - try: - shutil.copytree(src, dest) - except Exception as e: - logger.error(f"COPY FAILED: {str(e)}") - - def migrate_models(self, src_dir: Path): - """ - Recursively walk through src directory, probe anything - that looks like a model, and copy the model into the - appropriate location within the destination models directory. - """ - directories_scanned = set() - for root, dirs, files in os.walk(src_dir, followlinks=True): - for d in dirs: - try: - model = Path(root, d) - info = ModelProbe().heuristic_probe(model) - if not info: - continue - dest = self._model_probe_to_path(info) / model.name - self.copy_dir(model, dest) - directories_scanned.add(model) - except Exception as e: - logger.error(str(e)) - except KeyboardInterrupt: - raise - for f in files: - # don't copy raw learned_embeds.bin or pytorch_lora_weights.bin - # let them be copied as part of a tree copy operation - try: - if f in {"learned_embeds.bin", "pytorch_lora_weights.bin"}: - continue - model = Path(root, f) - if model.parent in directories_scanned: - continue - info = ModelProbe().heuristic_probe(model) - if not info: - continue - dest = self._model_probe_to_path(info) / f - self.copy_file(model, dest) - except Exception as e: - logger.error(str(e)) - except KeyboardInterrupt: - raise - - def migrate_support_models(self): - """ - Copy the clipseg, upscaler, and restoration models to their new - locations. - """ - dest_directory = self.dest_models - if (self.root_directory / "models/clipseg").exists(): - self.copy_dir(self.root_directory / "models/clipseg", dest_directory / "core/misc/clipseg") - if (self.root_directory / "models/realesrgan").exists(): - self.copy_dir(self.root_directory / "models/realesrgan", dest_directory / "core/upscaling/realesrgan") - for d in ["codeformer", "gfpgan"]: - path = self.root_directory / "models" / d - if path.exists(): - self.copy_dir(path, dest_directory / f"core/face_restoration/{d}") - - def migrate_tuning_models(self): - """ - Migrate the embeddings, loras and controlnets directories to their new homes. - """ - for src in [self.src_paths.embeddings, self.src_paths.loras, self.src_paths.controlnets]: - if not src: - continue - if src.is_dir(): - logger.info(f"Scanning {src}") - self.migrate_models(src) - else: - logger.info(f"{src} directory not found; skipping") - continue - - def migrate_conversion_models(self): - """ - Migrate all the models that are needed by the ckpt_to_diffusers conversion - script. - """ - - dest_directory = self.dest_models - kwargs = { - "cache_dir": self.root_directory / "models/hub", - # local_files_only = True - } - try: - logger.info("Migrating core tokenizers and text encoders") - target_dir = dest_directory / "core" / "convert" - - self._migrate_pretrained( - BertTokenizerFast, repo_id="bert-base-uncased", dest=target_dir / "bert-base-uncased", **kwargs - ) - - # sd-1 - repo_id = "openai/clip-vit-large-patch14" - self._migrate_pretrained( - CLIPTokenizer, repo_id=repo_id, dest=target_dir / "clip-vit-large-patch14", **kwargs - ) - self._migrate_pretrained( - CLIPTextModel, repo_id=repo_id, dest=target_dir / "clip-vit-large-patch14", force=True, **kwargs - ) - - # sd-2 - repo_id = "stabilityai/stable-diffusion-2" - self._migrate_pretrained( - CLIPTokenizer, - repo_id=repo_id, - dest=target_dir / "stable-diffusion-2-clip" / "tokenizer", - **{"subfolder": "tokenizer", **kwargs}, - ) - self._migrate_pretrained( - CLIPTextModel, - repo_id=repo_id, - dest=target_dir / "stable-diffusion-2-clip" / "text_encoder", - **{"subfolder": "text_encoder", **kwargs}, - ) - - # VAE - logger.info("Migrating stable diffusion VAE") - self._migrate_pretrained( - AutoencoderKL, repo_id="stabilityai/sd-vae-ft-mse", dest=target_dir / "sd-vae-ft-mse", **kwargs - ) - - # safety checking - logger.info("Migrating safety checker") - repo_id = "CompVis/stable-diffusion-safety-checker" - self._migrate_pretrained( - AutoFeatureExtractor, repo_id=repo_id, dest=target_dir / "stable-diffusion-safety-checker", **kwargs - ) - self._migrate_pretrained( - StableDiffusionSafetyChecker, - repo_id=repo_id, - dest=target_dir / "stable-diffusion-safety-checker", - **kwargs, - ) - except KeyboardInterrupt: - raise - except Exception as e: - logger.error(str(e)) - - def _model_probe_to_path(self, info: ModelProbeInfo) -> Path: - return Path(self.dest_models, info.base_type.value, info.model_type.value) - - def _migrate_pretrained(self, model_class, repo_id: str, dest: Path, force: bool = False, **kwargs): - if dest.exists() and not force: - logger.info(f"Skipping existing {dest}") - return - model = model_class.from_pretrained(repo_id, **kwargs) - self._save_pretrained(model, dest, overwrite=force) - - def _save_pretrained(self, model, dest: Path, overwrite: bool = False): - model_name = dest.name - if overwrite: - model.save_pretrained(dest, safe_serialization=True) - else: - download_path = dest.with_name(f"{model_name}.downloading") - model.save_pretrained(download_path, safe_serialization=True) - download_path.replace(dest) - - def _download_vae(self, repo_id: str, subfolder: str = None) -> Path: - vae = AutoencoderKL.from_pretrained(repo_id, cache_dir=self.root_directory / "models/hub", subfolder=subfolder) - info = ModelProbe().heuristic_probe(vae) - _, model_name = repo_id.split("/") - dest = self._model_probe_to_path(info) / self.unique_name(model_name, info) - vae.save_pretrained(dest, safe_serialization=True) - return dest - - def _vae_path(self, vae: Union[str, dict]) -> Path: - """ - Convert 2.3 VAE stanza to a straight path. - """ - vae_path = None - - # First get a path - if isinstance(vae, str): - vae_path = vae - - elif isinstance(vae, DictConfig): - if p := vae.get("path"): - vae_path = p - elif repo_id := vae.get("repo_id"): - if repo_id == "stabilityai/sd-vae-ft-mse": # this guy is already downloaded - vae_path = "models/core/convert/sd-vae-ft-mse" - return vae_path - else: - vae_path = self._download_vae(repo_id, vae.get("subfolder")) - - assert vae_path is not None, "Couldn't find VAE for this model" - - # if the VAE is in the old models directory, then we must move it into the new - # one. VAEs outside of this directory can stay where they are. - vae_path = Path(vae_path) - if vae_path.is_relative_to(self.src_paths.models): - info = ModelProbe().heuristic_probe(vae_path) - dest = self._model_probe_to_path(info) / vae_path.name - if not dest.exists(): - if vae_path.is_dir(): - self.copy_dir(vae_path, dest) - else: - self.copy_file(vae_path, dest) - vae_path = dest - - if vae_path.is_relative_to(self.dest_models): - rel_path = vae_path.relative_to(self.dest_models) - return Path("models", rel_path) - else: - return vae_path - - def migrate_repo_id(self, repo_id: str, model_name: str = None, **extra_config): - """ - Migrate a locally-cached diffusers pipeline identified with a repo_id - """ - dest_dir = self.dest_models - - cache = self.root_directory / "models/hub" - kwargs = { - "cache_dir": cache, - "safety_checker": None, - # local_files_only = True, - } - - owner, repo_name = repo_id.split("/") - model_name = model_name or repo_name - model = cache / "--".join(["models", owner, repo_name]) - - if len(list(model.glob("snapshots/**/model_index.json"))) == 0: - return - revisions = [x.name for x in model.glob("refs/*")] - - # if an fp16 is available we use that - revision = "fp16" if len(revisions) > 1 and "fp16" in revisions else revisions[0] - pipeline = StableDiffusionPipeline.from_pretrained(repo_id, revision=revision, **kwargs) - - info = ModelProbe().heuristic_probe(pipeline) - if not info: - return - - if self.mgr.model_exists(model_name, info.base_type, info.model_type): - logger.warning(f"A model named {model_name} already exists at the destination. Skipping migration.") - return - - dest = self._model_probe_to_path(info) / model_name - self._save_pretrained(pipeline, dest) - - rel_path = Path("models", dest.relative_to(dest_dir)) - self._add_model(model_name, info, rel_path, **extra_config) - - def migrate_path(self, location: Path, model_name: str = None, **extra_config): - """ - Migrate a model referred to using 'weights' or 'path' - """ - - # handle relative paths - dest_dir = self.dest_models - location = self.root_directory / location - model_name = model_name or location.stem - - info = ModelProbe().heuristic_probe(location) - if not info: - return - - if self.mgr.model_exists(model_name, info.base_type, info.model_type): - logger.warning(f"A model named {model_name} already exists at the destination. Skipping migration.") - return - - # uh oh, weights is in the old models directory - move it into the new one - if Path(location).is_relative_to(self.src_paths.models): - dest = Path(dest_dir, info.base_type.value, info.model_type.value, location.name) - if location.is_dir(): - self.copy_dir(location, dest) - else: - self.copy_file(location, dest) - location = Path("models", info.base_type.value, info.model_type.value, location.name) - - self._add_model(model_name, info, location, **extra_config) - - def _add_model(self, model_name: str, info: ModelProbeInfo, location: Path, **extra_config): - if info.model_type != ModelType.Main: - return - - self.mgr.add_model( - model_name=model_name, - base_model=info.base_type, - model_type=info.model_type, - clobber=True, - model_attributes={ - "path": str(location), - "description": f"A {info.base_type.value} {info.model_type.value} model", - "model_format": info.format, - "variant": info.variant_type.value, - **extra_config, - }, - ) - - def migrate_defined_models(self): - """ - Migrate models defined in models.yaml - """ - # find any models referred to in old models.yaml - conf = OmegaConf.load(self.root_directory / "configs/models.yaml") - - for model_name, stanza in conf.items(): - try: - passthru_args = {} - - if vae := stanza.get("vae"): - try: - passthru_args["vae"] = str(self._vae_path(vae)) - except Exception as e: - logger.warning(f'Could not find a VAE matching "{vae}" for model "{model_name}"') - logger.warning(str(e)) - - if config := stanza.get("config"): - passthru_args["config"] = config - - if description := stanza.get("description"): - passthru_args["description"] = description - - if repo_id := stanza.get("repo_id"): - logger.info(f"Migrating diffusers model {model_name}") - self.migrate_repo_id(repo_id, model_name, **passthru_args) - - elif location := stanza.get("weights"): - logger.info(f"Migrating checkpoint model {model_name}") - self.migrate_path(Path(location), model_name, **passthru_args) - - elif location := stanza.get("path"): - logger.info(f"Migrating diffusers model {model_name}") - self.migrate_path(Path(location), model_name, **passthru_args) - - except KeyboardInterrupt: - raise - except Exception as e: - logger.error(str(e)) - - def migrate(self): - self.create_directory_structure() - # the configure script is doing this - self.migrate_support_models() - self.migrate_conversion_models() - self.migrate_tuning_models() - self.migrate_defined_models() - - -def _parse_legacy_initfile(root: Path, initfile: Path) -> ModelPaths: - """ - Returns tuple of (embedding_path, lora_path, controlnet_path) - """ - parser = argparse.ArgumentParser(fromfile_prefix_chars="@") - parser.add_argument( - "--embedding_directory", - "--embedding_path", - type=Path, - dest="embedding_path", - default=Path("embeddings"), - ) - parser.add_argument( - "--lora_directory", - dest="lora_path", - type=Path, - default=Path("loras"), - ) - opt, _ = parser.parse_known_args([f"@{str(initfile)}"]) - return ModelPaths( - models=root / "models", - embeddings=root / str(opt.embedding_path).strip('"'), - loras=root / str(opt.lora_path).strip('"'), - controlnets=root / "controlnets", - ) - - -def _parse_legacy_yamlfile(root: Path, initfile: Path) -> ModelPaths: - """ - Returns tuple of (embedding_path, lora_path, controlnet_path) - """ - # Don't use the config object because it is unforgiving of version updates - # Just use omegaconf directly - opt = OmegaConf.load(initfile) - paths = opt.InvokeAI.Paths - models = paths.get("models_dir", "models") - embeddings = paths.get("embedding_dir", "embeddings") - loras = paths.get("lora_dir", "loras") - controlnets = paths.get("controlnet_dir", "controlnets") - return ModelPaths( - models=root / models if models else None, - embeddings=root / embeddings if embeddings else None, - loras=root / loras if loras else None, - controlnets=root / controlnets if controlnets else None, - ) - - -def get_legacy_embeddings(root: Path) -> ModelPaths: - path = root / "invokeai.init" - if path.exists(): - return _parse_legacy_initfile(root, path) - path = root / "invokeai.yaml" - if path.exists(): - return _parse_legacy_yamlfile(root, path) - - -def do_migrate(src_directory: Path, dest_directory: Path): - """ - Migrate models from src to dest InvokeAI root directories - """ - config_file = dest_directory / "configs" / "models.yaml.3" - dest_models = dest_directory / "models.3" - - version_3 = (dest_directory / "models" / "core").exists() - - # Here we create the destination models.yaml file. - # If we are writing into a version 3 directory and the - # file already exists, then we write into a copy of it to - # avoid deleting its previous customizations. Otherwise we - # create a new empty one. - if version_3: # write into the dest directory - try: - shutil.copy(dest_directory / "configs" / "models.yaml", config_file) - except Exception: - MigrateTo3.initialize_yaml(config_file) - mgr = ModelManager(config_file) # important to initialize BEFORE moving the models directory - (dest_directory / "models").replace(dest_models) - else: - MigrateTo3.initialize_yaml(config_file) - mgr = ModelManager(config_file) - - paths = get_legacy_embeddings(src_directory) - migrator = MigrateTo3(from_root=src_directory, to_models=dest_models, model_manager=mgr, src_paths=paths) - migrator.migrate() - print("Migration successful.") - - if not version_3: - (dest_directory / "models").replace(src_directory / "models.orig") - print(f"Original models directory moved to {dest_directory}/models.orig") - - (dest_directory / "configs" / "models.yaml").replace(src_directory / "configs" / "models.yaml.orig") - print(f"Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig") - - config_file.replace(config_file.with_suffix("")) - dest_models.replace(dest_models.with_suffix("")) - - -def main(): - parser = argparse.ArgumentParser( - prog="invokeai-migrate3", - description=""" -This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format -'--from-directory' root to the InvokeAI 3.0 '--to-directory' root. These may be abbreviated '--from' and '--to'.a - -The old models directory and config file will be renamed 'models.orig' and 'models.yaml.orig' respectively. -It is safe to provide the same directory for both arguments, but it is better to use the invokeai_configure -script, which will perform a full upgrade in place.""", - ) - parser.add_argument( - "--from-directory", - dest="src_root", - type=Path, - required=True, - help='Source InvokeAI 2.3 root directory (containing "invokeai.init" or "invokeai.yaml")', - ) - parser.add_argument( - "--to-directory", - dest="dest_root", - type=Path, - required=True, - help='Destination InvokeAI 3.0 directory (containing "invokeai.yaml")', - ) - args = parser.parse_args() - src_root = args.src_root - assert src_root.is_dir(), f"{src_root} is not a valid directory" - assert (src_root / "models").is_dir(), f"{src_root} does not contain a 'models' subdirectory" - assert (src_root / "models" / "hub").exists(), f"{src_root} does not contain a version 2.3 models directory" - assert (src_root / "invokeai.init").exists() or ( - src_root / "invokeai.yaml" - ).exists(), f"{src_root} does not contain an InvokeAI init file." - - dest_root = args.dest_root - assert dest_root.is_dir(), f"{dest_root} is not a valid directory" - config = InvokeAIAppConfig.get_config() - config.parse_args(["--root", str(dest_root)]) - - # TODO: revisit - don't rely on invokeai.yaml to exist yet! - dest_is_setup = (dest_root / "models/core").exists() and (dest_root / "databases").exists() - if not dest_is_setup: - from invokeai.backend.install.invokeai_configure import initialize_rootdir - - initialize_rootdir(dest_root, True) - - do_migrate(src_root, dest_root) - - -if __name__ == "__main__": - main() diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py deleted file mode 100644 index fdbe714f62..0000000000 --- a/invokeai/backend/install/model_install_backend.py +++ /dev/null @@ -1,637 +0,0 @@ -""" -Utility (backend) functions used by model_install.py -""" -import os -import re -import shutil -import warnings -from dataclasses import dataclass, field -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import Callable, Dict, List, Optional, Set, Union - -import requests -import torch -from diffusers import DiffusionPipeline -from diffusers import logging as dlogging -from huggingface_hub import HfApi, HfFolder, hf_hub_url -from omegaconf import OmegaConf -from tqdm import tqdm - -import invokeai.configs as configs -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType -from invokeai.backend.model_management.model_probe import ModelProbe, ModelProbeInfo, SchedulerPredictionType -from invokeai.backend.util import download_with_resume -from invokeai.backend.util.devices import choose_torch_device, torch_dtype - -from ..util.logging import InvokeAILogger - -warnings.filterwarnings("ignore") - -# --------------------------globals----------------------- -config = InvokeAIAppConfig.get_config() -logger = InvokeAILogger.get_logger(name="InvokeAI") - -# the initial "configs" dir is now bundled in the `invokeai.configs` package -Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml" - -Config_preamble = """ -# This file describes the alternative machine learning models -# available to InvokeAI script. -# -# To add a new model, follow the examples below. Each -# model requires a model config file, a weights file, -# and the width and height of the images it -# was trained on. -""" - -LEGACY_CONFIGS = { - BaseModelType.StableDiffusion1: { - ModelVariantType.Normal: { - SchedulerPredictionType.Epsilon: "v1-inference.yaml", - SchedulerPredictionType.VPrediction: "v1-inference-v.yaml", - }, - ModelVariantType.Inpaint: { - SchedulerPredictionType.Epsilon: "v1-inpainting-inference.yaml", - SchedulerPredictionType.VPrediction: "v1-inpainting-inference-v.yaml", - }, - }, - BaseModelType.StableDiffusion2: { - ModelVariantType.Normal: { - SchedulerPredictionType.Epsilon: "v2-inference.yaml", - SchedulerPredictionType.VPrediction: "v2-inference-v.yaml", - }, - ModelVariantType.Inpaint: { - SchedulerPredictionType.Epsilon: "v2-inpainting-inference.yaml", - SchedulerPredictionType.VPrediction: "v2-inpainting-inference-v.yaml", - }, - }, - BaseModelType.StableDiffusionXL: { - ModelVariantType.Normal: "sd_xl_base.yaml", - }, - BaseModelType.StableDiffusionXLRefiner: { - ModelVariantType.Normal: "sd_xl_refiner.yaml", - }, -} - - -@dataclass -class InstallSelections: - install_models: List[str] = field(default_factory=list) - remove_models: List[str] = field(default_factory=list) - - -@dataclass -class ModelLoadInfo: - name: str - model_type: ModelType - base_type: BaseModelType - path: Optional[Path] = None - repo_id: Optional[str] = None - subfolder: Optional[str] = None - description: str = "" - installed: bool = False - recommended: bool = False - default: bool = False - requires: Optional[List[str]] = field(default_factory=list) - - -class ModelInstall(object): - def __init__( - self, - config: InvokeAIAppConfig, - prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, - model_manager: Optional[ModelManager] = None, - access_token: Optional[str] = None, - civitai_api_key: Optional[str] = None, - ): - self.config = config - self.mgr = model_manager or ModelManager(config.model_conf_path) - self.datasets = OmegaConf.load(Dataset_path) - self.prediction_helper = prediction_type_helper - self.access_token = access_token or HfFolder.get_token() - self.civitai_api_key = civitai_api_key or config.civitai_api_key - self.reverse_paths = self._reverse_paths(self.datasets) - - def all_models(self) -> Dict[str, ModelLoadInfo]: - """ - Return dict of model_key=>ModelLoadInfo objects. - This method consolidates and simplifies the entries in both - models.yaml and INITIAL_MODELS.yaml so that they can - be treated uniformly. It also sorts the models alphabetically - by their name, to improve the display somewhat. - """ - model_dict = {} - - # first populate with the entries in INITIAL_MODELS.yaml - for key, value in self.datasets.items(): - name, base, model_type = ModelManager.parse_key(key) - value["name"] = name - value["base_type"] = base - value["model_type"] = model_type - model_info = ModelLoadInfo(**value) - if model_info.subfolder and model_info.repo_id: - model_info.repo_id += f":{model_info.subfolder}" - model_dict[key] = model_info - - # supplement with entries in models.yaml - installed_models = list(self.mgr.list_models()) - - for md in installed_models: - base = md["base_model"] - model_type = md["model_type"] - name = md["model_name"] - key = ModelManager.create_key(name, base, model_type) - if key in model_dict: - model_dict[key].installed = True - else: - model_dict[key] = ModelLoadInfo( - name=name, - base_type=base, - model_type=model_type, - path=value.get("path"), - installed=True, - ) - return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())} - - def _is_autoloaded(self, model_info: dict) -> bool: - path = model_info.get("path") - if not path: - return False - for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]: - if autodir_path := getattr(self.config, autodir): - autodir_path = self.config.root_path / autodir_path - if Path(path).is_relative_to(autodir_path): - return True - return False - - def list_models(self, model_type): - installed = self.mgr.list_models(model_type=model_type) - print() - print(f"Installed models of type `{model_type}`:") - print(f"{'Model Key':50} Model Path") - for i in installed: - print(f"{'/'.join([i['base_model'],i['model_type'],i['model_name']]):50} {i['path']}") - print() - - # logic here a little reversed to maintain backward compatibility - def starter_models(self, all_models: bool = False) -> Set[str]: - models = set() - for key, _value in self.datasets.items(): - name, base, model_type = ModelManager.parse_key(key) - if all_models or model_type in [ModelType.Main, ModelType.Vae]: - models.add(key) - return models - - def recommended_models(self) -> Set[str]: - starters = self.starter_models(all_models=True) - return {x for x in starters if self.datasets[x].get("recommended", False)} - - def default_model(self) -> str: - starters = self.starter_models() - defaults = [x for x in starters if self.datasets[x].get("default", False)] - return defaults[0] - - def install(self, selections: InstallSelections): - verbosity = dlogging.get_verbosity() # quench NSFW nags - dlogging.set_verbosity_error() - - job = 1 - jobs = len(selections.remove_models) + len(selections.install_models) - - # remove requested models - for key in selections.remove_models: - name, base, mtype = self.mgr.parse_key(key) - logger.info(f"Deleting {mtype} model {name} [{job}/{jobs}]") - try: - self.mgr.del_model(name, base, mtype) - except FileNotFoundError as e: - logger.warning(e) - job += 1 - - # add requested models - self._remove_installed(selections.install_models) - self._add_required_models(selections.install_models) - for path in selections.install_models: - logger.info(f"Installing {path} [{job}/{jobs}]") - try: - self.heuristic_import(path) - except (ValueError, KeyError) as e: - logger.error(str(e)) - job += 1 - - dlogging.set_verbosity(verbosity) - self.mgr.commit() - - def heuristic_import( - self, - model_path_id_or_url: Union[str, Path], - models_installed: Set[Path] = None, - ) -> Dict[str, AddModelResult]: - """ - :param model_path_id_or_url: A Path to a local model to import, or a string representing its repo_id or URL - :param models_installed: Set of installed models, used for recursive invocation - Returns a set of dict objects corresponding to newly-created stanzas in models.yaml. - """ - - if not models_installed: - models_installed = {} - - model_path_id_or_url = str(model_path_id_or_url).strip("\"' ") - - # A little hack to allow nested routines to retrieve info on the requested ID - self.current_id = model_path_id_or_url - path = Path(model_path_id_or_url) - - # fix relative paths - if path.exists() and not path.is_absolute(): - path = path.absolute() # make relative to current WD - - # checkpoint file, or similar - if path.is_file(): - models_installed.update({str(path): self._install_path(path)}) - - # folders style or similar - elif path.is_dir() and any( - (path / x).exists() - for x in { - "config.json", - "model_index.json", - "learned_embeds.bin", - "pytorch_lora_weights.bin", - "pytorch_lora_weights.safetensors", - } - ): - models_installed.update({str(model_path_id_or_url): self._install_path(path)}) - - # recursive scan - elif path.is_dir(): - for child in path.iterdir(): - self.heuristic_import(child, models_installed=models_installed) - - # huggingface repo - elif len(str(model_path_id_or_url).split("/")) == 2: - models_installed.update({str(model_path_id_or_url): self._install_repo(str(model_path_id_or_url))}) - - # a URL - elif str(model_path_id_or_url).startswith(("http:", "https:", "ftp:")): - models_installed.update({str(model_path_id_or_url): self._install_url(model_path_id_or_url)}) - - else: - raise KeyError(f"{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping") - - return models_installed - - def _remove_installed(self, model_list: List[str]): - all_models = self.all_models() - models_to_remove = [] - - for path in model_list: - key = self.reverse_paths.get(path) - if key and all_models[key].installed: - models_to_remove.append(path) - - for path in models_to_remove: - logger.warning(f"{path} already installed. Skipping") - model_list.remove(path) - - def _add_required_models(self, model_list: List[str]): - additional_models = [] - all_models = self.all_models() - for path in model_list: - if not (key := self.reverse_paths.get(path)): - continue - for requirement in all_models[key].requires: - requirement_key = self.reverse_paths.get(requirement) - if not all_models[requirement_key].installed: - additional_models.append(requirement) - model_list.extend(additional_models) - - # install a model from a local path. The optional info parameter is there to prevent - # the model from being probed twice in the event that it has already been probed. - def _install_path(self, path: Path, info: ModelProbeInfo = None) -> AddModelResult: - info = info or ModelProbe().heuristic_probe(path, self.prediction_helper) - if not info: - logger.warning(f"Unable to parse format of {path}") - return None - model_name = path.stem if path.is_file() else path.name - if self.mgr.model_exists(model_name, info.base_type, info.model_type): - raise ValueError(f'A model named "{model_name}" is already installed.') - attributes = self._make_attributes(path, info) - return self.mgr.add_model( - model_name=model_name, - base_model=info.base_type, - model_type=info.model_type, - model_attributes=attributes, - ) - - def _install_url(self, url: str) -> AddModelResult: - with TemporaryDirectory(dir=self.config.models_path) as staging: - CIVITAI_RE = r".*civitai.com.*" - civit_url = re.match(CIVITAI_RE, url, re.IGNORECASE) - location = download_with_resume( - url, Path(staging), access_token=self.civitai_api_key if civit_url else None - ) - if not location: - logger.error(f"Unable to download {url}. Skipping.") - info = ModelProbe().heuristic_probe(location, self.prediction_helper) - dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name - dest.parent.mkdir(parents=True, exist_ok=True) - models_path = shutil.move(location, dest) - - # staged version will be garbage-collected at this time - return self._install_path(Path(models_path), info) - - def _install_repo(self, repo_id: str) -> AddModelResult: - # hack to recover models stored in subfolders -- - # Required to get the "v2" model of monster-labs/control_v1p_sd15_qrcode_monster - subfolder = None - if match := re.match(r"^([^/]+/[^/]+):(\w+)$", repo_id): - repo_id = match.group(1) - subfolder = match.group(2) - - hinfo = HfApi().model_info(repo_id) - - # we try to figure out how to download this most economically - # list all the files in the repo - files = [x.rfilename for x in hinfo.siblings] - if subfolder: - files = [x for x in files if x.startswith(f"{subfolder}/")] - prefix = f"{subfolder}/" if subfolder else "" - - location = None - - with TemporaryDirectory(dir=self.config.models_path) as staging: - staging = Path(staging) - if f"{prefix}model_index.json" in files: - location = self._download_hf_pipeline(repo_id, staging, subfolder=subfolder) # pipeline - elif f"{prefix}unet/model.onnx" in files: - location = self._download_hf_model(repo_id, files, staging) - else: - for suffix in ["safetensors", "bin"]: - if f"{prefix}pytorch_lora_weights.{suffix}" in files: - location = self._download_hf_model( - repo_id, [f"pytorch_lora_weights.{suffix}"], staging, subfolder=subfolder - ) # LoRA - break - elif ( - self.config.precision == "float16" and f"{prefix}diffusion_pytorch_model.fp16.{suffix}" in files - ): # vae, controlnet or some other standalone - files = ["config.json", f"diffusion_pytorch_model.fp16.{suffix}"] - location = self._download_hf_model(repo_id, files, staging, subfolder=subfolder) - break - elif f"{prefix}diffusion_pytorch_model.{suffix}" in files: - files = ["config.json", f"diffusion_pytorch_model.{suffix}"] - location = self._download_hf_model(repo_id, files, staging, subfolder=subfolder) - break - elif f"{prefix}learned_embeds.{suffix}" in files: - location = self._download_hf_model( - repo_id, [f"learned_embeds.{suffix}"], staging, subfolder=subfolder - ) - break - elif ( - f"{prefix}image_encoder.txt" in files and f"{prefix}ip_adapter.{suffix}" in files - ): # IP-Adapter - files = ["image_encoder.txt", f"ip_adapter.{suffix}"] - location = self._download_hf_model(repo_id, files, staging, subfolder=subfolder) - break - elif f"{prefix}model.{suffix}" in files and f"{prefix}config.json" in files: - # This elif-condition is pretty fragile, but it is intended to handle CLIP Vision models hosted - # by InvokeAI for use with IP-Adapters. - files = ["config.json", f"model.{suffix}"] - location = self._download_hf_model(repo_id, files, staging, subfolder=subfolder) - break - if not location: - logger.warning(f"Could not determine type of repo {repo_id}. Skipping install.") - return {} - - info = ModelProbe().heuristic_probe(location, self.prediction_helper) - if not info: - logger.warning(f"Could not probe {location}. Skipping install.") - return {} - dest = ( - self.config.models_path - / info.base_type.value - / info.model_type.value - / self._get_model_name(repo_id, location) - ) - if dest.exists(): - shutil.rmtree(dest) - shutil.copytree(location, dest) - return self._install_path(dest, info) - - def _get_model_name(self, path_name: str, location: Path) -> str: - """ - Calculate a name for the model - primitive implementation. - """ - if key := self.reverse_paths.get(path_name): - (name, base, mtype) = ModelManager.parse_key(key) - return name - elif location.is_dir(): - return location.name - else: - return location.stem - - def _make_attributes(self, path: Path, info: ModelProbeInfo) -> dict: - model_name = path.name if path.is_dir() else path.stem - description = f"{info.base_type.value} {info.model_type.value} model {model_name}" - if key := self.reverse_paths.get(self.current_id): - if key in self.datasets: - description = self.datasets[key].get("description") or description - - rel_path = self.relative_to_root(path, self.config.models_path) - - attributes = { - "path": str(rel_path), - "description": str(description), - "model_format": info.format, - } - legacy_conf = None - if info.model_type == ModelType.Main or info.model_type == ModelType.ONNX: - attributes.update( - { - "variant": info.variant_type, - } - ) - if info.format == "checkpoint": - try: - possible_conf = path.with_suffix(".yaml") - if possible_conf.exists(): - legacy_conf = str(self.relative_to_root(possible_conf)) - elif info.base_type in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]: - legacy_conf = Path( - self.config.legacy_conf_dir, - LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type], - ) - else: - legacy_conf = Path( - self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type] - ) - except KeyError: - legacy_conf = Path(self.config.legacy_conf_dir, "v1-inference.yaml") # best guess - - if info.model_type == ModelType.ControlNet and info.format == "checkpoint": - possible_conf = path.with_suffix(".yaml") - if possible_conf.exists(): - legacy_conf = str(self.relative_to_root(possible_conf)) - else: - legacy_conf = Path( - self.config.root_path, - "configs/controlnet", - ("cldm_v15.yaml" if info.base_type == BaseModelType("sd-1") else "cldm_v21.yaml"), - ) - - if legacy_conf: - attributes.update({"config": str(legacy_conf)}) - return attributes - - def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path: - root = root or self.config.root_path - if path.is_relative_to(root): - return path.relative_to(root) - else: - return path - - def _download_hf_pipeline(self, repo_id: str, staging: Path, subfolder: str = None) -> Path: - """ - Retrieve a StableDiffusion model from cache or remote and then - does a save_pretrained() to the indicated staging area. - """ - _, name = repo_id.split("/") - precision = torch_dtype(choose_torch_device()) - variants = ["fp16", None] if precision == torch.float16 else [None, "fp16"] - - model = None - for variant in variants: - try: - model = DiffusionPipeline.from_pretrained( - repo_id, - variant=variant, - torch_dtype=precision, - safety_checker=None, - subfolder=subfolder, - ) - except Exception as e: # most errors are due to fp16 not being present. Fix this to catch other errors - if "fp16" not in str(e): - print(e) - - if model: - break - - if not model: - logger.error(f"Diffusers model {repo_id} could not be downloaded. Skipping.") - return None - model.save_pretrained(staging / name, safe_serialization=True) - return staging / name - - def _download_hf_model(self, repo_id: str, files: List[str], staging: Path, subfolder: None) -> Path: - _, name = repo_id.split("/") - location = staging / name - paths = [] - for filename in files: - filePath = Path(filename) - p = hf_download_with_resume( - repo_id, - model_dir=location / filePath.parent, - model_name=filePath.name, - access_token=self.access_token, - subfolder=filePath.parent / subfolder if subfolder else filePath.parent, - ) - if p: - paths.append(p) - else: - logger.warning(f"Could not download {filename} from {repo_id}.") - - return location if len(paths) > 0 else None - - @classmethod - def _reverse_paths(cls, datasets) -> dict: - """ - Reverse mapping from repo_id/path to destination name. - """ - return {v.get("path") or v.get("repo_id"): k for k, v in datasets.items()} - - -# ------------------------------------- -def yes_or_no(prompt: str, default_yes=True): - default = "y" if default_yes else "n" - response = input(f"{prompt} [{default}] ") or default - if default_yes: - return response[0] not in ("n", "N") - else: - return response[0] in ("y", "Y") - - -# --------------------------------------------- -def hf_download_from_pretrained(model_class: object, model_name: str, destination: Path, **kwargs): - logger = InvokeAILogger.get_logger("InvokeAI") - logger.addFilter(lambda x: "fp16 is not a valid" not in x.getMessage()) - - model = model_class.from_pretrained( - model_name, - resume_download=True, - **kwargs, - ) - model.save_pretrained(destination, safe_serialization=True) - return destination - - -# --------------------------------------------- -def hf_download_with_resume( - repo_id: str, - model_dir: str, - model_name: str, - model_dest: Path = None, - access_token: str = None, - subfolder: str = None, -) -> Path: - model_dest = model_dest or Path(os.path.join(model_dir, model_name)) - os.makedirs(model_dir, exist_ok=True) - - url = hf_hub_url(repo_id, model_name, subfolder=subfolder) - - header = {"Authorization": f"Bearer {access_token}"} if access_token else {} - open_mode = "wb" - exist_size = 0 - - if os.path.exists(model_dest): - exist_size = os.path.getsize(model_dest) - header["Range"] = f"bytes={exist_size}-" - open_mode = "ab" - - resp = requests.get(url, headers=header, stream=True) - total = int(resp.headers.get("content-length", 0)) - - if resp.status_code == 416: # "range not satisfiable", which means nothing to return - logger.info(f"{model_name}: complete file found. Skipping.") - return model_dest - elif resp.status_code == 404: - logger.warning("File not found") - return None - elif resp.status_code != 200: - logger.warning(f"{model_name}: {resp.reason}") - elif exist_size > 0: - logger.info(f"{model_name}: partial file found. Resuming...") - else: - logger.info(f"{model_name}: Downloading...") - - try: - with ( - open(model_dest, open_mode) as file, - tqdm( - desc=model_name, - initial=exist_size, - total=total + exist_size, - unit="iB", - unit_scale=True, - unit_divisor=1000, - ) as bar, - ): - for data in resp.iter_content(chunk_size=1024): - size = file.write(data) - bar.update(size) - except Exception as e: - logger.error(f"An error occurred while downloading {model_name}: {str(e)}") - return None - return model_dest diff --git a/invokeai/backend/ip_adapter/ip_adapter.py b/invokeai/backend/ip_adapter/ip_adapter.py index 9176bf1f49..e51966c779 100644 --- a/invokeai/backend/ip_adapter/ip_adapter.py +++ b/invokeai/backend/ip_adapter/ip_adapter.py @@ -8,8 +8,8 @@ from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from invokeai.backend.ip_adapter.ip_attention_weights import IPAttentionWeights -from invokeai.backend.model_management.models.base import calc_model_size_by_data +from ..raw_model import RawModel from .resampler import Resampler @@ -92,7 +92,7 @@ class MLPProjModel(torch.nn.Module): return clip_extra_context_tokens -class IPAdapter: +class IPAdapter(RawModel): """IP-Adapter: https://arxiv.org/pdf/2308.06721.pdf""" def __init__( @@ -124,6 +124,9 @@ class IPAdapter: self.attn_weights.to(device=self.device, dtype=self.dtype) def calc_size(self): + # workaround for circular import + from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data + return calc_model_size_by_data(self._image_proj_model) + calc_model_size_by_data(self.attn_weights) def _init_image_proj_model(self, state_dict): diff --git a/invokeai/backend/model_management/models/lora.py b/invokeai/backend/lora.py similarity index 81% rename from invokeai/backend/model_management/models/lora.py rename to invokeai/backend/lora.py index b110d75d22..0b7128034a 100644 --- a/invokeai/backend/model_management/models/lora.py +++ b/invokeai/backend/lora.py @@ -1,98 +1,17 @@ +# Copyright (c) 2024 The InvokeAI Development team +"""LoRA model support.""" + import bisect -import os -from enum import Enum from pathlib import Path -from typing import Dict, Optional, Union +from typing import Dict, List, Optional, Tuple, Union import torch from safetensors.torch import load_file +from typing_extensions import Self -from .base import ( - BaseModelType, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelNotFoundException, - ModelType, - SubModelType, - classproperty, -) +from invokeai.backend.model_manager import BaseModelType - -class LoRAModelFormat(str, Enum): - LyCORIS = "lycoris" - Diffusers = "diffusers" - - -class LoRAModel(ModelBase): - # model_size: int - - class Config(ModelConfigBase): - model_format: LoRAModelFormat # TODO: - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.Lora - super().__init__(model_path, base_model, model_type) - - self.model_size = os.path.getsize(self.model_path) - - def get_size(self, child_type: Optional[SubModelType] = None): - if child_type is not None: - raise Exception("There is no child models in lora") - return self.model_size - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ): - if child_type is not None: - raise Exception("There is no child models in lora") - - model = LoRAModelRaw.from_checkpoint( - file_path=self.model_path, - dtype=torch_dtype, - base_model=self.base_model, - ) - - self.model_size = model.calc_size() - return model - - @classproperty - def save_to_config(cls) -> bool: - return True - - @classmethod - def detect_format(cls, path: str): - if not os.path.exists(path): - raise ModelNotFoundException() - - if os.path.isdir(path): - for ext in ["safetensors", "bin"]: - if os.path.exists(os.path.join(path, f"pytorch_lora_weights.{ext}")): - return LoRAModelFormat.Diffusers - - if os.path.isfile(path): - if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]): - return LoRAModelFormat.LyCORIS - - raise InvalidModelException(f"Not a valid model: {path}") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - if cls.detect_format(model_path) == LoRAModelFormat.Diffusers: - for ext in ["safetensors", "bin"]: # return path to the safetensors file inside the folder - path = Path(model_path, f"pytorch_lora_weights.{ext}") - if path.exists(): - return path - else: - return model_path +from .raw_model import RawModel class LoRALayerBase: @@ -108,7 +27,7 @@ class LoRALayerBase: def __init__( self, layer_key: str, - values: dict, + values: Dict[str, torch.Tensor], ): if "alpha" in values: self.alpha = values["alpha"].item() @@ -116,7 +35,7 @@ class LoRALayerBase: self.alpha = None if "bias_indices" in values and "bias_values" in values and "bias_size" in values: - self.bias = torch.sparse_coo_tensor( + self.bias: Optional[torch.Tensor] = torch.sparse_coo_tensor( values["bias_indices"], values["bias_values"], tuple(values["bias_size"]), @@ -128,7 +47,7 @@ class LoRALayerBase: self.rank = None # set in layer implementation self.layer_key = layer_key - def get_weight(self, orig_weight: torch.Tensor): + def get_weight(self, orig_weight: Optional[torch.Tensor]) -> torch.Tensor: raise NotImplementedError() def calc_size(self) -> int: @@ -142,7 +61,7 @@ class LoRALayerBase: self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, - ): + ) -> None: if self.bias is not None: self.bias = self.bias.to(device=device, dtype=dtype) @@ -156,20 +75,20 @@ class LoRALayer(LoRALayerBase): def __init__( self, layer_key: str, - values: dict, + values: Dict[str, torch.Tensor], ): super().__init__(layer_key, values) self.up = values["lora_up.weight"] self.down = values["lora_down.weight"] if "lora_mid.weight" in values: - self.mid = values["lora_mid.weight"] + self.mid: Optional[torch.Tensor] = values["lora_mid.weight"] else: self.mid = None self.rank = self.down.shape[0] - def get_weight(self, orig_weight: torch.Tensor): + def get_weight(self, orig_weight: Optional[torch.Tensor]) -> torch.Tensor: if self.mid is not None: up = self.up.reshape(self.up.shape[0], self.up.shape[1]) down = self.down.reshape(self.down.shape[0], self.down.shape[1]) @@ -190,7 +109,7 @@ class LoRALayer(LoRALayerBase): self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, - ): + ) -> None: super().to(device=device, dtype=dtype) self.up = self.up.to(device=device, dtype=dtype) @@ -208,11 +127,7 @@ class LoHALayer(LoRALayerBase): # t1: Optional[torch.Tensor] = None # t2: Optional[torch.Tensor] = None - def __init__( - self, - layer_key: str, - values: dict, - ): + def __init__(self, layer_key: str, values: Dict[str, torch.Tensor]): super().__init__(layer_key, values) self.w1_a = values["hada_w1_a"] @@ -221,20 +136,20 @@ class LoHALayer(LoRALayerBase): self.w2_b = values["hada_w2_b"] if "hada_t1" in values: - self.t1 = values["hada_t1"] + self.t1: Optional[torch.Tensor] = values["hada_t1"] else: self.t1 = None if "hada_t2" in values: - self.t2 = values["hada_t2"] + self.t2: Optional[torch.Tensor] = values["hada_t2"] else: self.t2 = None self.rank = self.w1_b.shape[0] - def get_weight(self, orig_weight: torch.Tensor): + def get_weight(self, orig_weight: Optional[torch.Tensor]) -> torch.Tensor: if self.t1 is None: - weight = (self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b) + weight: torch.Tensor = (self.w1_a @ self.w1_b) * (self.w2_a @ self.w2_b) else: rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", self.t1, self.w1_b, self.w1_a) @@ -254,7 +169,7 @@ class LoHALayer(LoRALayerBase): self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, - ): + ) -> None: super().to(device=device, dtype=dtype) self.w1_a = self.w1_a.to(device=device, dtype=dtype) @@ -280,12 +195,12 @@ class LoKRLayer(LoRALayerBase): def __init__( self, layer_key: str, - values: dict, + values: Dict[str, torch.Tensor], ): super().__init__(layer_key, values) if "lokr_w1" in values: - self.w1 = values["lokr_w1"] + self.w1: Optional[torch.Tensor] = values["lokr_w1"] self.w1_a = None self.w1_b = None else: @@ -294,7 +209,7 @@ class LoKRLayer(LoRALayerBase): self.w1_b = values["lokr_w1_b"] if "lokr_w2" in values: - self.w2 = values["lokr_w2"] + self.w2: Optional[torch.Tensor] = values["lokr_w2"] self.w2_a = None self.w2_b = None else: @@ -303,7 +218,7 @@ class LoKRLayer(LoRALayerBase): self.w2_b = values["lokr_w2_b"] if "lokr_t2" in values: - self.t2 = values["lokr_t2"] + self.t2: Optional[torch.Tensor] = values["lokr_t2"] else: self.t2 = None @@ -314,14 +229,18 @@ class LoKRLayer(LoRALayerBase): else: self.rank = None # unscaled - def get_weight(self, orig_weight: torch.Tensor): - w1 = self.w1 + def get_weight(self, orig_weight: Optional[torch.Tensor]) -> torch.Tensor: + w1: Optional[torch.Tensor] = self.w1 if w1 is None: + assert self.w1_a is not None + assert self.w1_b is not None w1 = self.w1_a @ self.w1_b w2 = self.w2 if w2 is None: if self.t2 is None: + assert self.w2_a is not None + assert self.w2_b is not None w2 = self.w2_a @ self.w2_b else: w2 = torch.einsum("i j k l, i p, j r -> p r k l", self.t2, self.w2_a, self.w2_b) @@ -329,6 +248,8 @@ class LoKRLayer(LoRALayerBase): if len(w2.shape) == 4: w1 = w1.unsqueeze(2).unsqueeze(2) w2 = w2.contiguous() + assert w1 is not None + assert w2 is not None weight = torch.kron(w1, w2) return weight @@ -344,18 +265,22 @@ class LoKRLayer(LoRALayerBase): self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, - ): + ) -> None: super().to(device=device, dtype=dtype) if self.w1 is not None: self.w1 = self.w1.to(device=device, dtype=dtype) else: + assert self.w1_a is not None + assert self.w1_b is not None self.w1_a = self.w1_a.to(device=device, dtype=dtype) self.w1_b = self.w1_b.to(device=device, dtype=dtype) if self.w2 is not None: self.w2 = self.w2.to(device=device, dtype=dtype) else: + assert self.w2_a is not None + assert self.w2_b is not None self.w2_a = self.w2_a.to(device=device, dtype=dtype) self.w2_b = self.w2_b.to(device=device, dtype=dtype) @@ -369,7 +294,7 @@ class FullLayer(LoRALayerBase): def __init__( self, layer_key: str, - values: dict, + values: Dict[str, torch.Tensor], ): super().__init__(layer_key, values) @@ -382,7 +307,7 @@ class FullLayer(LoRALayerBase): self.rank = None # unscaled - def get_weight(self, orig_weight: torch.Tensor): + def get_weight(self, orig_weight: Optional[torch.Tensor]) -> torch.Tensor: return self.weight def calc_size(self) -> int: @@ -394,7 +319,7 @@ class FullLayer(LoRALayerBase): self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, - ): + ) -> None: super().to(device=device, dtype=dtype) self.weight = self.weight.to(device=device, dtype=dtype) @@ -407,7 +332,7 @@ class IA3Layer(LoRALayerBase): def __init__( self, layer_key: str, - values: dict, + values: Dict[str, torch.Tensor], ): super().__init__(layer_key, values) @@ -416,10 +341,11 @@ class IA3Layer(LoRALayerBase): self.rank = None # unscaled - def get_weight(self, orig_weight: torch.Tensor): + def get_weight(self, orig_weight: Optional[torch.Tensor]) -> torch.Tensor: weight = self.weight if not self.on_input: weight = weight.reshape(-1, 1) + assert orig_weight is not None return orig_weight * weight def calc_size(self) -> int: @@ -439,28 +365,30 @@ class IA3Layer(LoRALayerBase): self.on_input = self.on_input.to(device=device, dtype=dtype) -# TODO: rename all methods used in model logic with Info postfix and remove here Raw postfix -class LoRAModelRaw: # (torch.nn.Module): +AnyLoRALayer = Union[LoRALayer, LoHALayer, LoKRLayer, FullLayer, IA3Layer] + + +class LoRAModelRaw(RawModel): # (torch.nn.Module): _name: str - layers: Dict[str, LoRALayer] + layers: Dict[str, AnyLoRALayer] def __init__( self, name: str, - layers: Dict[str, LoRALayer], + layers: Dict[str, AnyLoRALayer], ): self._name = name self.layers = layers @property - def name(self): + def name(self) -> str: return self._name def to( self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, - ): + ) -> None: # TODO: try revert if exception? for _key, layer in self.layers.items(): layer.to(device=device, dtype=dtype) @@ -472,7 +400,7 @@ class LoRAModelRaw: # (torch.nn.Module): return model_size @classmethod - def _convert_sdxl_keys_to_diffusers_format(cls, state_dict): + def _convert_sdxl_keys_to_diffusers_format(cls, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Convert the keys of an SDXL LoRA state_dict to diffusers format. The input state_dict can be in either Stability AI format or diffusers format. If the state_dict is already in @@ -536,7 +464,7 @@ class LoRAModelRaw: # (torch.nn.Module): device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, base_model: Optional[BaseModelType] = None, - ): + ) -> Self: device = device or torch.device("cpu") dtype = dtype or torch.float32 @@ -544,16 +472,16 @@ class LoRAModelRaw: # (torch.nn.Module): file_path = Path(file_path) model = cls( - name=file_path.stem, # TODO: + name=file_path.stem, layers={}, ) if file_path.suffix == ".safetensors": - state_dict = load_file(file_path.absolute().as_posix(), device="cpu") + sd = load_file(file_path.absolute().as_posix(), device="cpu") else: - state_dict = torch.load(file_path, map_location="cpu") + sd = torch.load(file_path, map_location="cpu") - state_dict = cls._group_state(state_dict) + state_dict = cls._group_state(sd) if base_model == BaseModelType.StableDiffusionXL: state_dict = cls._convert_sdxl_keys_to_diffusers_format(state_dict) @@ -561,7 +489,7 @@ class LoRAModelRaw: # (torch.nn.Module): for layer_key, values in state_dict.items(): # lora and locon if "lora_down.weight" in values: - layer = LoRALayer(layer_key, values) + layer: AnyLoRALayer = LoRALayer(layer_key, values) # loha elif "hada_w1_b" in values: @@ -592,8 +520,8 @@ class LoRAModelRaw: # (torch.nn.Module): return model @staticmethod - def _group_state(state_dict: dict): - state_dict_groupped = {} + def _group_state(state_dict: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, torch.Tensor]]: + state_dict_groupped: Dict[str, Dict[str, torch.Tensor]] = {} for key, value in state_dict.items(): stem, leaf = key.split(".", 1) @@ -606,7 +534,7 @@ class LoRAModelRaw: # (torch.nn.Module): # code from # https://github.com/bmaltais/kohya_ss/blob/2accb1305979ba62f5077a23aabac23b4c37e935/networks/lora_diffusers.py#L15C1-L97C32 -def make_sdxl_unet_conversion_map(): +def make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]: """Create a dict mapping state_dict keys from Stability AI SDXL format to diffusers SDXL format.""" unet_conversion_map_layer = [] diff --git a/invokeai/backend/model_management/README.md b/invokeai/backend/model_management/README.md deleted file mode 100644 index 0d94f39642..0000000000 --- a/invokeai/backend/model_management/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Model Cache - -## `glibc` Memory Allocator Fragmentation - -Python (and PyTorch) relies on the memory allocator from the C Standard Library (`libc`). On linux, with the GNU C Standard Library implementation (`glibc`), our memory access patterns have been observed to cause severe memory fragmentation. This fragmentation results in large amounts of memory that has been freed but can't be released back to the OS. Loading models from disk and moving them between CPU/CUDA seem to be the operations that contribute most to the fragmentation. This memory fragmentation issue can result in OOM crashes during frequent model switching, even if `max_cache_size` is set to a reasonable value (e.g. a OOM crash with `max_cache_size=16` on a system with 32GB of RAM). - -This problem may also exist on other OSes, and other `libc` implementations. But, at the time of writing, it has only been investigated on linux with `glibc`. - -To better understand how the `glibc` memory allocator works, see these references: -- Basics: https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html -- Details: https://sourceware.org/glibc/wiki/MallocInternals - -Note the differences between memory allocated as chunks in an arena vs. memory allocated with `mmap`. Under `glibc`'s default configuration, most model tensors get allocated as chunks in an arena making them vulnerable to the problem of fragmentation. - -We can work around this memory fragmentation issue by setting the following env var: - -```bash -# Force blocks >1MB to be allocated with `mmap` so that they are released to the system immediately when they are freed. -MALLOC_MMAP_THRESHOLD_=1048576 -``` - -See the following references for more information about the `malloc` tunable parameters: -- https://www.gnu.org/software/libc/manual/html_node/Malloc-Tunable-Parameters.html -- https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html -- https://man7.org/linux/man-pages/man3/mallopt.3.html - -The model cache emits debug logs that provide visibility into the state of the `libc` memory allocator. See the `LibcUtil` class for more info on how these `libc` malloc stats are collected. diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py deleted file mode 100644 index 03abf58eb4..0000000000 --- a/invokeai/backend/model_management/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ruff: noqa: I001, F401 -""" -Initialization file for invokeai.backend.model_management -""" -# This import must be first -from .model_manager import AddModelResult, ModelInfo, ModelManager, SchedulerPredictionType -from .lora import ModelPatcher, ONNXModelPatcher -from .model_cache import ModelCache - -from .models import ( - BaseModelType, - DuplicateModelException, - ModelNotFoundException, - ModelType, - ModelVariantType, - SubModelType, -) - -# This import must be last -from .model_merge import MergeInterpolationMethod, ModelMerger diff --git a/invokeai/backend/model_management/detect_baked_in_vae.py b/invokeai/backend/model_management/detect_baked_in_vae.py deleted file mode 100644 index 9118438548..0000000000 --- a/invokeai/backend/model_management/detect_baked_in_vae.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2024 Lincoln Stein and the InvokeAI Development Team -""" -This module exports the function has_baked_in_sdxl_vae(). -It returns True if an SDXL checkpoint model has the original SDXL 1.0 VAE, -which doesn't work properly in fp16 mode. -""" - -import hashlib -from pathlib import Path - -from safetensors.torch import load_file - -SDXL_1_0_VAE_HASH = "bc40b16c3a0fa4625abdfc01c04ffc21bf3cefa6af6c7768ec61eb1f1ac0da51" - - -def has_baked_in_sdxl_vae(checkpoint_path: Path) -> bool: - """Return true if the checkpoint contains a custom (non SDXL-1.0) VAE.""" - hash = _vae_hash(checkpoint_path) - return hash != SDXL_1_0_VAE_HASH - - -def _vae_hash(checkpoint_path: Path) -> str: - checkpoint = load_file(checkpoint_path, device="cpu") - vae_keys = [x for x in checkpoint.keys() if x.startswith("first_stage_model.")] - hash = hashlib.new("sha256") - for key in vae_keys: - value = checkpoint[key] - hash.update(bytes(key, "UTF-8")) - hash.update(bytes(str(value), "UTF-8")) - - return hash.hexdigest() diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py deleted file mode 100644 index 2a7f4b5a95..0000000000 --- a/invokeai/backend/model_management/model_cache.py +++ /dev/null @@ -1,553 +0,0 @@ -""" -Manage a RAM cache of diffusion/transformer models for fast switching. -They are moved between GPU VRAM and CPU RAM as necessary. If the cache -grows larger than a preset maximum, then the least recently used -model will be cleared and (re)loaded from disk when next needed. - -The cache returns context manager generators designed to load the -model into the GPU within the context, and unload outside the -context. Use like this: - - cache = ModelCache(max_cache_size=7.5) - with cache.get_model('runwayml/stable-diffusion-1-5') as SD1, - cache.get_model('stabilityai/stable-diffusion-2') as SD2: - do_something_in_GPU(SD1,SD2) - - -""" - -import gc -import hashlib -import math -import os -import sys -import time -from contextlib import suppress -from dataclasses import dataclass, field -from pathlib import Path -from typing import Any, Dict, Optional, Type, Union, types - -import torch - -import invokeai.backend.util.logging as logger -from invokeai.backend.model_management.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff -from invokeai.backend.model_management.model_load_optimizations import skip_torch_weight_init - -from ..util.devices import choose_torch_device -from .models import BaseModelType, ModelBase, ModelType, SubModelType - -if choose_torch_device() == torch.device("mps"): - from torch import mps - -# Maximum size of the cache, in gigs -# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously -DEFAULT_MAX_CACHE_SIZE = 6.0 - -# amount of GPU memory to hold in reserve for use by generations (GB) -DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75 - -# actual size of a gig -GIG = 1073741824 -# Size of a MB in bytes. -MB = 2**20 - - -@dataclass -class CacheStats(object): - hits: int = 0 # cache hits - misses: int = 0 # cache misses - high_watermark: int = 0 # amount of cache used - in_cache: int = 0 # number of models in cache - cleared: int = 0 # number of models cleared to make space - cache_size: int = 0 # total size of cache - # {submodel_key => size} - loaded_model_sizes: Dict[str, int] = field(default_factory=dict) - - -class ModelLocker(object): - "Forward declaration" - - pass - - -class ModelCache(object): - "Forward declaration" - - pass - - -class _CacheRecord: - size: int - model: Any - cache: ModelCache - _locks: int - - def __init__(self, cache, model: Any, size: int): - self.size = size - self.model = model - self.cache = cache - self._locks = 0 - - def lock(self): - self._locks += 1 - - def unlock(self): - self._locks -= 1 - assert self._locks >= 0 - - @property - def locked(self): - return self._locks > 0 - - @property - def loaded(self): - if self.model is not None and hasattr(self.model, "device"): - return self.model.device != self.cache.storage_device - else: - return False - - -class ModelCache(object): - def __init__( - self, - max_cache_size: float = DEFAULT_MAX_CACHE_SIZE, - max_vram_cache_size: float = DEFAULT_MAX_VRAM_CACHE_SIZE, - execution_device: torch.device = torch.device("cuda"), - storage_device: torch.device = torch.device("cpu"), - precision: torch.dtype = torch.float16, - sequential_offload: bool = False, - lazy_offloading: bool = True, - sha_chunksize: int = 16777216, - logger: types.ModuleType = logger, - log_memory_usage: bool = False, - ): - """ - :param max_cache_size: Maximum size of the RAM cache [6.0 GB] - :param execution_device: Torch device to load active model into [torch.device('cuda')] - :param storage_device: Torch device to save inactive model in [torch.device('cpu')] - :param precision: Precision for loaded models [torch.float16] - :param lazy_offloading: Keep model in VRAM until another model needs to be loaded - :param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially - :param sha_chunksize: Chunksize to use when calculating sha256 model hash - :param log_memory_usage: If True, a memory snapshot will be captured before and after every model cache - operation, and the result will be logged (at debug level). There is a time cost to capturing the memory - snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's - behaviour. - """ - self.model_infos: Dict[str, ModelBase] = {} - # allow lazy offloading only when vram cache enabled - self.lazy_offloading = lazy_offloading and max_vram_cache_size > 0 - self.precision: torch.dtype = precision - self.max_cache_size: float = max_cache_size - self.max_vram_cache_size: float = max_vram_cache_size - self.execution_device: torch.device = execution_device - self.storage_device: torch.device = storage_device - self.sha_chunksize = sha_chunksize - self.logger = logger - self._log_memory_usage = log_memory_usage - - # used for stats collection - self.stats = None - - self._cached_models = {} - self._cache_stack = [] - - def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]: - if self._log_memory_usage: - return MemorySnapshot.capture() - return None - - def get_key( - self, - model_path: str, - base_model: BaseModelType, - model_type: ModelType, - submodel_type: Optional[SubModelType] = None, - ): - key = f"{model_path}:{base_model}:{model_type}" - if submodel_type: - key += f":{submodel_type}" - return key - - def _get_model_info( - self, - model_path: str, - model_class: Type[ModelBase], - base_model: BaseModelType, - model_type: ModelType, - ): - model_info_key = self.get_key( - model_path=model_path, - base_model=base_model, - model_type=model_type, - submodel_type=None, - ) - - if model_info_key not in self.model_infos: - self.model_infos[model_info_key] = model_class( - model_path, - base_model, - model_type, - ) - - return self.model_infos[model_info_key] - - # TODO: args - def get_model( - self, - model_path: Union[str, Path], - model_class: Type[ModelBase], - base_model: BaseModelType, - model_type: ModelType, - submodel: Optional[SubModelType] = None, - gpu_load: bool = True, - ) -> Any: - if not isinstance(model_path, Path): - model_path = Path(model_path) - - if not os.path.exists(model_path): - raise Exception(f"Model not found: {model_path}") - - model_info = self._get_model_info( - model_path=model_path, - model_class=model_class, - base_model=base_model, - model_type=model_type, - ) - key = self.get_key( - model_path=model_path, - base_model=base_model, - model_type=model_type, - submodel_type=submodel, - ) - # TODO: lock for no copies on simultaneous calls? - cache_entry = self._cached_models.get(key, None) - if cache_entry is None: - self.logger.info( - f"Loading model {model_path}, type" - f" {base_model.value}:{model_type.value}{':'+submodel.value if submodel else ''}" - ) - if self.stats: - self.stats.misses += 1 - - self_reported_model_size_before_load = model_info.get_size(submodel) - # Remove old models from the cache to make room for the new model. - self._make_cache_room(self_reported_model_size_before_load) - - # Load the model from disk and capture a memory snapshot before/after. - start_load_time = time.time() - snapshot_before = self._capture_memory_snapshot() - with skip_torch_weight_init(): - model = model_info.get_model(child_type=submodel, torch_dtype=self.precision) - snapshot_after = self._capture_memory_snapshot() - end_load_time = time.time() - - self_reported_model_size_after_load = model_info.get_size(submodel) - - self.logger.debug( - f"Moved model '{key}' from disk to cpu in {(end_load_time-start_load_time):.2f}s.\n" - f"Self-reported size before/after load: {(self_reported_model_size_before_load/GIG):.3f}GB /" - f" {(self_reported_model_size_after_load/GIG):.3f}GB.\n" - f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" - ) - - if abs(self_reported_model_size_after_load - self_reported_model_size_before_load) > 10 * MB: - self.logger.debug( - f"Model '{key}' mis-reported its size before load. Self-reported size before/after load:" - f" {(self_reported_model_size_before_load/GIG):.2f}GB /" - f" {(self_reported_model_size_after_load/GIG):.2f}GB." - ) - - cache_entry = _CacheRecord(self, model, self_reported_model_size_after_load) - self._cached_models[key] = cache_entry - else: - if self.stats: - self.stats.hits += 1 - - if self.stats: - self.stats.cache_size = self.max_cache_size * GIG - self.stats.high_watermark = max(self.stats.high_watermark, self._cache_size()) - self.stats.in_cache = len(self._cached_models) - self.stats.loaded_model_sizes[key] = max( - self.stats.loaded_model_sizes.get(key, 0), model_info.get_size(submodel) - ) - - with suppress(Exception): - self._cache_stack.remove(key) - self._cache_stack.append(key) - - return self.ModelLocker(self, key, cache_entry.model, gpu_load, cache_entry.size) - - def _move_model_to_device(self, key: str, target_device: torch.device): - cache_entry = self._cached_models[key] - - source_device = cache_entry.model.device - # Note: We compare device types only so that 'cuda' == 'cuda:0'. This would need to be revised to support - # multi-GPU. - if torch.device(source_device).type == torch.device(target_device).type: - return - - start_model_to_time = time.time() - snapshot_before = self._capture_memory_snapshot() - cache_entry.model.to(target_device) - snapshot_after = self._capture_memory_snapshot() - end_model_to_time = time.time() - self.logger.debug( - f"Moved model '{key}' from {source_device} to" - f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s.\n" - f"Estimated model size: {(cache_entry.size/GIG):.3f} GB.\n" - f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" - ) - - if ( - snapshot_before is not None - and snapshot_after is not None - and snapshot_before.vram is not None - and snapshot_after.vram is not None - ): - vram_change = abs(snapshot_before.vram - snapshot_after.vram) - - # If the estimated model size does not match the change in VRAM, log a warning. - if not math.isclose( - vram_change, - cache_entry.size, - rel_tol=0.1, - abs_tol=10 * MB, - ): - self.logger.debug( - f"Moving model '{key}' from {source_device} to" - f" {target_device} caused an unexpected change in VRAM usage. The model's" - " estimated size may be incorrect. Estimated model size:" - f" {(cache_entry.size/GIG):.3f} GB.\n" - f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" - ) - - class ModelLocker(object): - def __init__(self, cache, key, model, gpu_load, size_needed): - """ - :param cache: The model_cache object - :param key: The key of the model to lock in GPU - :param model: The model to lock - :param gpu_load: True if load into gpu - :param size_needed: Size of the model to load - """ - self.gpu_load = gpu_load - self.cache = cache - self.key = key - self.model = model - self.size_needed = size_needed - self.cache_entry = self.cache._cached_models[self.key] - - def __enter__(self) -> Any: - if not hasattr(self.model, "to"): - return self.model - - # NOTE that the model has to have the to() method in order for this - # code to move it into GPU! - if self.gpu_load: - self.cache_entry.lock() - - try: - if self.cache.lazy_offloading: - self.cache._offload_unlocked_models(self.size_needed) - - self.cache._move_model_to_device(self.key, self.cache.execution_device) - - self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}") - self.cache._print_cuda_stats() - - except Exception: - self.cache_entry.unlock() - raise - - # TODO: not fully understand - # in the event that the caller wants the model in RAM, we - # move it into CPU if it is in GPU and not locked - elif self.cache_entry.loaded and not self.cache_entry.locked: - self.cache._move_model_to_device(self.key, self.cache.storage_device) - - return self.model - - def __exit__(self, type, value, traceback): - if not hasattr(self.model, "to"): - return - - self.cache_entry.unlock() - if not self.cache.lazy_offloading: - self.cache._offload_unlocked_models() - self.cache._print_cuda_stats() - - # TODO: should it be called untrack_model? - def uncache_model(self, cache_id: str): - with suppress(ValueError): - self._cache_stack.remove(cache_id) - self._cached_models.pop(cache_id, None) - - def model_hash( - self, - model_path: Union[str, Path], - ) -> str: - """ - Given the HF repo id or path to a model on disk, returns a unique - hash. Works for legacy checkpoint files, HF models on disk, and HF repo IDs - - :param model_path: Path to model file/directory on disk. - """ - return self._local_model_hash(model_path) - - def cache_size(self) -> float: - """Return the current size of the cache, in GB.""" - return self._cache_size() / GIG - - def _has_cuda(self) -> bool: - return self.execution_device.type == "cuda" - - def _print_cuda_stats(self): - vram = "%4.2fG" % (torch.cuda.memory_allocated() / GIG) - ram = "%4.2fG" % self.cache_size() - - cached_models = 0 - loaded_models = 0 - locked_models = 0 - for model_info in self._cached_models.values(): - cached_models += 1 - if model_info.loaded: - loaded_models += 1 - if model_info.locked: - locked_models += 1 - - self.logger.debug( - f"Current VRAM/RAM usage: {vram}/{ram}; cached_models/loaded_models/locked_models/ =" - f" {cached_models}/{loaded_models}/{locked_models}" - ) - - def _cache_size(self) -> int: - return sum([m.size for m in self._cached_models.values()]) - - def _make_cache_room(self, model_size): - # calculate how much memory this model will require - # multiplier = 2 if self.precision==torch.float32 else 1 - bytes_needed = model_size - maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes - current_size = self._cache_size() - - if current_size + bytes_needed > maximum_size: - self.logger.debug( - f"Max cache size exceeded: {(current_size/GIG):.2f}/{self.max_cache_size:.2f} GB, need an additional" - f" {(bytes_needed/GIG):.2f} GB" - ) - - self.logger.debug(f"Before unloading: cached_models={len(self._cached_models)}") - - pos = 0 - models_cleared = 0 - while current_size + bytes_needed > maximum_size and pos < len(self._cache_stack): - model_key = self._cache_stack[pos] - cache_entry = self._cached_models[model_key] - - refs = sys.getrefcount(cache_entry.model) - - # HACK: This is a workaround for a memory-management issue that we haven't tracked down yet. We are directly - # going against the advice in the Python docs by using `gc.get_referrers(...)` in this way: - # https://docs.python.org/3/library/gc.html#gc.get_referrers - - # manualy clear local variable references of just finished function calls - # for some reason python don't want to collect it even by gc.collect() immidiately - if refs > 2: - while True: - cleared = False - for referrer in gc.get_referrers(cache_entry.model): - if type(referrer).__name__ == "frame": - # RuntimeError: cannot clear an executing frame - with suppress(RuntimeError): - referrer.clear() - cleared = True - # break - - # repeat if referrers changes(due to frame clear), else exit loop - if cleared: - gc.collect() - else: - break - - device = cache_entry.model.device if hasattr(cache_entry.model, "device") else None - self.logger.debug( - f"Model: {model_key}, locks: {cache_entry._locks}, device: {device}, loaded: {cache_entry.loaded}," - f" refs: {refs}" - ) - - # Expected refs: - # 1 from cache_entry - # 1 from getrefcount function - # 1 from onnx runtime object - if not cache_entry.locked and refs <= (3 if "onnx" in model_key else 2): - self.logger.debug( - f"Unloading model {model_key} to free {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" - ) - current_size -= cache_entry.size - models_cleared += 1 - if self.stats: - self.stats.cleared += 1 - del self._cache_stack[pos] - del self._cached_models[model_key] - del cache_entry - - else: - pos += 1 - - if models_cleared > 0: - # There would likely be some 'garbage' to be collected regardless of whether a model was cleared or not, but - # there is a significant time cost to calling `gc.collect()`, so we want to use it sparingly. (The time cost - # is high even if no garbage gets collected.) - # - # Calling gc.collect(...) when a model is cleared seems like a good middle-ground: - # - If models had to be cleared, it's a signal that we are close to our memory limit. - # - If models were cleared, there's a good chance that there's a significant amount of garbage to be - # collected. - # - # Keep in mind that gc is only responsible for handling reference cycles. Most objects should be cleaned up - # immediately when their reference count hits 0. - gc.collect() - - torch.cuda.empty_cache() - if choose_torch_device() == torch.device("mps"): - mps.empty_cache() - - self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}") - - def _offload_unlocked_models(self, size_needed: int = 0): - reserved = self.max_vram_cache_size * GIG - vram_in_use = torch.cuda.memory_allocated() - self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB") - for model_key, cache_entry in sorted(self._cached_models.items(), key=lambda x: x[1].size): - if vram_in_use <= reserved: - break - if not cache_entry.locked and cache_entry.loaded: - self._move_model_to_device(model_key, self.storage_device) - - vram_in_use = torch.cuda.memory_allocated() - self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB") - - torch.cuda.empty_cache() - if choose_torch_device() == torch.device("mps"): - mps.empty_cache() - - def _local_model_hash(self, model_path: Union[str, Path]) -> str: - sha = hashlib.sha256() - path = Path(model_path) - - hashpath = path / "checksum.sha256" - if hashpath.exists() and path.stat().st_mtime <= hashpath.stat().st_mtime: - with open(hashpath) as f: - hash = f.read() - return hash - - self.logger.debug(f"computing hash of model {path.name}") - for file in list(path.rglob("*.ckpt")) + list(path.rglob("*.safetensors")) + list(path.rglob("*.pth")): - with open(file, "rb") as f: - while chunk := f.read(self.sha_chunksize): - sha.update(chunk) - hash = sha.hexdigest() - with open(hashpath, "w") as f: - f.write(hash) - return hash diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py deleted file mode 100644 index 362d8d3ff5..0000000000 --- a/invokeai/backend/model_management/model_manager.py +++ /dev/null @@ -1,1121 +0,0 @@ -"""This module manages the InvokeAI `models.yaml` file, mapping -symbolic diffusers model names to the paths and repo_ids used by the -underlying `from_pretrained()` call. - -SYNOPSIS: - - mgr = ModelManager('/home/phi/invokeai/configs/models.yaml') - sd1_5 = mgr.get_model('stable-diffusion-v1-5', - model_type=ModelType.Main, - base_model=BaseModelType.StableDiffusion1, - submodel_type=SubModelType.Unet) - with sd1_5 as unet: - run_some_inference(unet) - -FETCHING MODELS: - -Models are described using four attributes: - - 1) model_name -- the symbolic name for the model - - 2) ModelType -- an enum describing the type of the model. Currently - defined types are: - ModelType.Main -- a full model capable of generating images - ModelType.Vae -- a VAE model - ModelType.Lora -- a LoRA or LyCORIS fine-tune - ModelType.TextualInversion -- a textual inversion embedding - ModelType.ControlNet -- a ControlNet model - ModelType.IPAdapter -- an IPAdapter model - - 3) BaseModelType -- an enum indicating the stable diffusion base model, one of: - BaseModelType.StableDiffusion1 - BaseModelType.StableDiffusion2 - - 4) SubModelType (optional) -- an enum that refers to one of the submodels contained - within the main model. Values are: - - SubModelType.UNet - SubModelType.TextEncoder - SubModelType.Tokenizer - SubModelType.Scheduler - SubModelType.SafetyChecker - -To fetch a model, use `manager.get_model()`. This takes the symbolic -name of the model, the ModelType, the BaseModelType and the -SubModelType. The latter is required for ModelType.Main. - -get_model() will return a ModelInfo object that can then be used in -context to retrieve the model and move it into GPU VRAM (on GPU -systems). - -A typical example is: - - sd1_5 = mgr.get_model('stable-diffusion-v1-5', - model_type=ModelType.Main, - base_model=BaseModelType.StableDiffusion1, - submodel_type=SubModelType.UNet) - with sd1_5 as unet: - run_some_inference(unet) - -The ModelInfo object provides a number of useful fields describing the -model, including: - - name -- symbolic name of the model - base_model -- base model (BaseModelType) - type -- model type (ModelType) - location -- path to the model file - precision -- torch precision of the model - hash -- unique sha256 checksum for this model - -SUBMODELS: - -When fetching a main model, you must specify the submodel. Retrieval -of full pipelines is not supported. - - vae_info = mgr.get_model('stable-diffusion-1.5', - model_type = ModelType.Main, - base_model = BaseModelType.StableDiffusion1, - submodel_type = SubModelType.Vae - ) - with vae_info as vae: - do_something(vae) - -This rule does not apply to controlnets, embeddings, loras and standalone -VAEs, which do not have submodels. - -LISTING MODELS - -The model_names() method will return a list of Tuples describing each -model it knows about: - - >> mgr.model_names() - [ - ('stable-diffusion-1.5', , ), - ('stable-diffusion-2.1', , ), - ('inpaint', , ) - ('Ink scenery', , ) - ... - ] - -The tuple is in the correct order to pass to get_model(): - - for m in mgr.model_names(): - info = get_model(*m) - -In contrast, the list_models() method returns a list of dicts, each -providing information about a model defined in models.yaml. For example: - - >>> models = mgr.list_models() - >>> json.dumps(models[0]) - {"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny", - "model_format": "diffusers", - "name": "canny", - "base_model": "sd-1", - "type": "controlnet" - } - -You can filter by model type and base model as shown here: - - - controlnets = mgr.list_models(model_type=ModelType.ControlNet, - base_model=BaseModelType.StableDiffusion1) - for c in controlnets: - name = c['name'] - format = c['model_format'] - path = c['path'] - type = c['type'] - # etc - -ADDING AND REMOVING MODELS - -At startup time, the `models` directory will be scanned for -checkpoints, diffusers pipelines, controlnets, LoRAs and TI -embeddings. New entries will be added to the model manager and defunct -ones removed. Anything that is a main model (ModelType.Main) will be -added to models.yaml. For scanning to succeed, files need to be in -their proper places. For example, a controlnet folder built on the -stable diffusion 2 base, will need to be placed in -`models/sd-2/controlnet`. - -Layout of the `models` directory: - - models - ├── sd-1 - │ ├── controlnet - │ ├── lora - │ ├── main - │ └── embedding - ├── sd-2 - │ ├── controlnet - │ ├── lora - │ ├── main - │ └── embedding - └── core - ├── face_reconstruction - │ ├── codeformer - │ └── gfpgan - ├── sd-conversion - │ ├── clip-vit-large-patch14 - tokenizer, text_encoder subdirs - │ ├── stable-diffusion-2 - tokenizer, text_encoder subdirs - │ └── stable-diffusion-safety-checker - └── upscaling - └─── esrgan - - - -class ConfigMeta(BaseModel):Loras, textual_inversion and controlnet models are not listed -explicitly in models.yaml, but are added to the in-memory data -structure at initialization time by scanning the models directory. The -in-memory data structure can be resynchronized by calling -`manager.scan_models_directory()`. - -Files and folders placed inside the `autoimport` paths (paths -defined in `invokeai.yaml`) will also be scanned for new models at -initialization time and added to `models.yaml`. Files will not be -moved from this location but preserved in-place. These directories -are: - - configuration default description - ------------- ------- ----------- - autoimport_dir autoimport/main main models - lora_dir autoimport/lora LoRA/LyCORIS models - embedding_dir autoimport/embedding TI embeddings - controlnet_dir autoimport/controlnet ControlNet models - -In actuality, models located in any of these directories are scanned -to determine their type, so it isn't strictly necessary to organize -the different types in this way. This entry in `invokeai.yaml` will -recursively scan all subdirectories within `autoimport`, scan models -files it finds, and import them if recognized. - - Paths: - autoimport_dir: autoimport - -A model can be manually added using `add_model()` using the model's -name, base model, type and a dict of model attributes. See -`invokeai/backend/model_management/models` for the attributes required -by each model type. - -A model can be deleted using `del_model()`, providing the same -identifying information as `get_model()` - -The `heuristic_import()` method will take a set of strings -corresponding to local paths, remote URLs, and repo_ids, probe the -object to determine what type of model it is (if any), and import new -models into the manager. If passed a directory, it will recursively -scan it for models to import. The return value is a set of the models -successfully added. - -MODELS.YAML - -The general format of a models.yaml section is: - - type-of-model/name-of-model: - path: /path/to/local/file/or/directory - description: a description - format: diffusers|checkpoint - variant: normal|inpaint|depth - -The type of model is given in the stanza key, and is one of -{main, vae, lora, controlnet, textual} - -The format indicates whether the model is organized as a diffusers -folder with model subdirectories, or is contained in a single -checkpoint or safetensors file. - -The path points to a file or directory on disk. If a relative path, -the root is the InvokeAI ROOTDIR. - -""" -from __future__ import annotations - -import hashlib -import os -import textwrap -import types -from dataclasses import dataclass -from pathlib import Path -from shutil import move, rmtree -from typing import Callable, Dict, List, Literal, Optional, Set, Tuple, Union, cast - -import torch -import yaml -from omegaconf import OmegaConf -from omegaconf.dictconfig import DictConfig -from pydantic import BaseModel, ConfigDict, Field - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.util import CUDA_DEVICE, Chdir - -from .model_cache import ModelCache, ModelLocker -from .model_search import ModelSearch -from .models import ( - MODEL_CLASSES, - BaseModelType, - DuplicateModelException, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelError, - ModelNotFoundException, - ModelType, - SchedulerPredictionType, - SubModelType, -) - -# We are only starting to number the config file with release 3. -# The config file version doesn't have to start at release version, but it will help -# reduce confusion. -CONFIG_FILE_VERSION = "3.0.0" - - -@dataclass -class ModelInfo: - context: ModelLocker - name: str - base_model: BaseModelType - type: ModelType - hash: str - location: Union[Path, str] - precision: torch.dtype - _cache: Optional[ModelCache] = None - - def __enter__(self): - return self.context.__enter__() - - def __exit__(self, *args, **kwargs): - self.context.__exit__(*args, **kwargs) - - -class AddModelResult(BaseModel): - name: str = Field(description="The name of the model after installation") - model_type: ModelType = Field(description="The type of model") - base_model: BaseModelType = Field(description="The base model") - config: ModelConfigBase = Field(description="The configuration of the model") - - model_config = ConfigDict(protected_namespaces=()) - - -MAX_CACHE_SIZE = 6.0 # GB - - -class ConfigMeta(BaseModel): - version: str - - -class ModelManager(object): - """ - High-level interface to model management. - """ - - logger: types.ModuleType = logger - - def __init__( - self, - config: Union[Path, DictConfig, str], - device_type: torch.device = CUDA_DEVICE, - precision: torch.dtype = torch.float16, - max_cache_size=MAX_CACHE_SIZE, - sequential_offload=False, - logger: types.ModuleType = logger, - ): - """ - Initialize with the path to the models.yaml config file. - Optional parameters are the torch device type, precision, max_models, - and sequential_offload boolean. Note that the default device - type and precision are set up for a CUDA system running at half precision. - """ - self.config_path = None - if isinstance(config, (str, Path)): - self.config_path = Path(config) - if not self.config_path.exists(): - logger.warning(f"The file {self.config_path} was not found. Initializing a new file") - self.initialize_model_config(self.config_path) - config = OmegaConf.load(self.config_path) - - elif not isinstance(config, DictConfig): - raise ValueError("config argument must be an OmegaConf object, a Path or a string") - - self.config_meta = ConfigMeta(**config.pop("__metadata__")) - # TODO: metadata not found - # TODO: version check - - self.app_config = InvokeAIAppConfig.get_config() - self.logger = logger - self.cache = ModelCache( - max_cache_size=max_cache_size, - max_vram_cache_size=self.app_config.vram_cache_size, - lazy_offloading=self.app_config.lazy_offload, - execution_device=device_type, - precision=precision, - sequential_offload=sequential_offload, - logger=logger, - log_memory_usage=self.app_config.log_memory_usage, - ) - - self._read_models(config) - - def _read_models(self, config: Optional[DictConfig] = None): - if not config: - if self.config_path: - config = OmegaConf.load(self.config_path) - else: - return - - self.models = {} - for model_key, model_config in config.items(): - if model_key.startswith("_"): - continue - model_name, base_model, model_type = self.parse_key(model_key) - model_class = self._get_implementation(base_model, model_type) - # alias for config file - model_config["model_format"] = model_config.pop("format") - self.models[model_key] = model_class.create_config(**model_config) - - # check config version number and update on disk/RAM if necessary - self.cache_keys = {} - - # add controlnet, lora and textual_inversion models from disk - self.scan_models_directory() - - def sync_to_config(self): - """ - Call this when `models.yaml` has been changed externally. - This will reinitialize internal data structures - """ - # Reread models directory; note that this will reinitialize the cache, - # causing otherwise unreferenced models to be removed from memory - self._read_models() - - def model_exists(self, model_name: str, base_model: BaseModelType, model_type: ModelType, *, rescan=False) -> bool: - """ - Given a model name, returns True if it is a valid identifier. - - :param model_name: symbolic name of the model in models.yaml - :param model_type: ModelType enum indicating the type of model to return - :param base_model: BaseModelType enum indicating the base model used by this model - :param rescan: if True, scan_models_directory - """ - model_key = self.create_key(model_name, base_model, model_type) - exists = model_key in self.models - - # if model not found try to find it (maybe file just pasted) - if rescan and not exists: - self.scan_models_directory(base_model=base_model, model_type=model_type) - exists = self.model_exists(model_name, base_model, model_type, rescan=False) - - return exists - - @classmethod - def create_key( - cls, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ) -> str: - # In 3.11, the behavior of (str,enum) when interpolated into a - # string has changed. The next two lines are defensive. - base_model = BaseModelType(base_model) - model_type = ModelType(model_type) - return f"{base_model.value}/{model_type.value}/{model_name}" - - @classmethod - def parse_key(cls, model_key: str) -> Tuple[str, BaseModelType, ModelType]: - base_model_str, model_type_str, model_name = model_key.split("/", 2) - try: - model_type = ModelType(model_type_str) - except Exception: - raise Exception(f"Unknown model type: {model_type_str}") - - try: - base_model = BaseModelType(base_model_str) - except Exception: - raise Exception(f"Unknown base model: {base_model_str}") - - return (model_name, base_model, model_type) - - def _get_model_cache_path(self, model_path): - return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest()) - - @classmethod - def initialize_model_config(cls, config_path: Path): - """Create empty config file""" - with open(config_path, "w") as yaml_file: - yaml_file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}})) - - def get_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel_type: Optional[SubModelType] = None, - ) -> ModelInfo: - """Given a model named identified in models.yaml, return - an ModelInfo object describing it. - :param model_name: symbolic name of the model in models.yaml - :param model_type: ModelType enum indicating the type of model to return - :param base_model: BaseModelType enum indicating the base model used by this model - :param submodel_type: an ModelType enum indicating the portion of - the model to retrieve (e.g. ModelType.Vae) - """ - model_key = self.create_key(model_name, base_model, model_type) - - if not self.model_exists(model_name, base_model, model_type, rescan=True): - raise ModelNotFoundException(f"Model not found - {model_key}") - - model_config = self._get_model_config(base_model, model_name, model_type) - - model_path, is_submodel_override = self._get_model_path(model_config, submodel_type) - - if is_submodel_override: - model_type = submodel_type - submodel_type = None - - model_class = self._get_implementation(base_model, model_type) - - if not model_path.exists(): - if model_class.save_to_config: - self.models[model_key].error = ModelError.NotFound - raise Exception(f'Files for model "{model_key}" not found at {model_path}') - - else: - self.models.pop(model_key, None) - raise ModelNotFoundException(f'Files for model "{model_key}" not found at {model_path}') - - # TODO: path - # TODO: is it accurate to use path as id - dst_convert_path = self._get_model_cache_path(model_path) - - model_path = model_class.convert_if_required( - base_model=base_model, - model_path=str(model_path), # TODO: refactor str/Path types logic - output_path=dst_convert_path, - config=model_config, - ) - - model_context = self.cache.get_model( - model_path=model_path, - model_class=model_class, - base_model=base_model, - model_type=model_type, - submodel=submodel_type, - ) - - if model_key not in self.cache_keys: - self.cache_keys[model_key] = set() - self.cache_keys[model_key].add(model_context.key) - - model_hash = "" # TODO: - - return ModelInfo( - context=model_context, - name=model_name, - base_model=base_model, - type=submodel_type or model_type, - hash=model_hash, - location=model_path, # TODO: - precision=self.cache.precision, - _cache=self.cache, - ) - - def _get_model_path( - self, model_config: ModelConfigBase, submodel_type: Optional[SubModelType] = None - ) -> (Path, bool): - """Extract a model's filesystem path from its config. - - :return: The fully qualified Path of the module (or submodule). - """ - model_path = model_config.path - is_submodel_override = False - - # Does the config explicitly override the submodel? - if submodel_type is not None and hasattr(model_config, submodel_type): - submodel_path = getattr(model_config, submodel_type) - if submodel_path is not None and len(submodel_path) > 0: - model_path = getattr(model_config, submodel_type) - is_submodel_override = True - - model_path = self.resolve_model_path(model_path) - return model_path, is_submodel_override - - def _get_model_config(self, base_model: BaseModelType, model_name: str, model_type: ModelType) -> ModelConfigBase: - """Get a model's config object.""" - model_key = self.create_key(model_name, base_model, model_type) - try: - model_config = self.models[model_key] - except KeyError: - raise ModelNotFoundException(f"Model not found - {model_key}") - return model_config - - def _get_implementation(self, base_model: BaseModelType, model_type: ModelType) -> type[ModelBase]: - """Get the concrete implementation class for a specific model type.""" - model_class = MODEL_CLASSES[base_model][model_type] - return model_class - - def _instantiate( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - submodel_type: Optional[SubModelType] = None, - ) -> ModelBase: - """Make a new instance of this model, without loading it.""" - model_config = self._get_model_config(base_model, model_name, model_type) - model_path, is_submodel_override = self._get_model_path(model_config, submodel_type) - # FIXME: do non-overriden submodels get the right class? - constructor = self._get_implementation(base_model, model_type) - instance = constructor(model_path, base_model, model_type) - return instance - - def model_info( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ) -> Union[dict, None]: - """ - Given a model name returns the OmegaConf (dict-like) object describing it. - """ - model_key = self.create_key(model_name, base_model, model_type) - if model_key in self.models: - return self.models[model_key].model_dump(exclude_defaults=True) - else: - return None # TODO: None or empty dict on not found - - def model_names(self) -> List[Tuple[str, BaseModelType, ModelType]]: - """ - Return a list of (str, BaseModelType, ModelType) corresponding to all models - known to the configuration. - """ - return [(self.parse_key(x)) for x in self.models.keys()] - - def list_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ) -> Union[dict, None]: - """ - Returns a dict describing one installed model, using - the combined format of the list_models() method. - """ - models = self.list_models(base_model, model_type, model_name) - if len(models) >= 1: - return models[0] - else: - return None - - def list_models( - self, - base_model: Optional[BaseModelType] = None, - model_type: Optional[ModelType] = None, - model_name: Optional[str] = None, - ) -> list[dict]: - """ - Return a list of models. - """ - - model_keys = ( - [self.create_key(model_name, base_model, model_type)] - if model_name and base_model and model_type - else sorted(self.models, key=str.casefold) - ) - models = [] - for model_key in model_keys: - model_config = self.models.get(model_key) - if not model_config: - self.logger.error(f"Unknown model {model_name}") - raise ModelNotFoundException(f"Unknown model {model_name}") - - cur_model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - if base_model is not None and cur_base_model != base_model: - continue - if model_type is not None and cur_model_type != model_type: - continue - - model_dict = dict( - **model_config.model_dump(exclude_defaults=True), - # OpenAPIModelInfoBase - model_name=cur_model_name, - base_model=cur_base_model, - model_type=cur_model_type, - ) - - # expose paths as absolute to help web UI - if path := model_dict.get("path"): - model_dict["path"] = str(self.resolve_model_path(path)) - models.append(model_dict) - - return models - - def print_models(self) -> None: - """ - Print a table of models and their descriptions. This needs to be redone - """ - # TODO: redo - for model_dict in self.list_models(): - for _model_name, model_info in model_dict.items(): - line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}' - print(line) - - # Tested - LS - def del_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - ): - """ - Delete the named model. - """ - model_key = self.create_key(model_name, base_model, model_type) - model_cfg = self.models.pop(model_key, None) - - if model_cfg is None: - raise ModelNotFoundException(f"Unknown model {model_key}") - - # note: it not garantie to release memory(model can has other references) - cache_ids = self.cache_keys.pop(model_key, []) - for cache_id in cache_ids: - self.cache.uncache_model(cache_id) - - # if model inside invoke models folder - delete files - model_path = self.resolve_model_path(model_cfg.path) - cache_path = self._get_model_cache_path(model_path) - if cache_path.exists(): - rmtree(str(cache_path)) - - if model_path.is_relative_to(self.app_config.models_path): - if model_path.is_dir(): - rmtree(str(model_path)) - else: - model_path.unlink() - self.commit() - - # LS: tested - def add_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - model_attributes: dict, - clobber: bool = False, - ) -> AddModelResult: - """ - Update the named model with a dictionary of attributes. Will fail with an - assertion error if the name already exists. Pass clobber=True to overwrite. - On a successful update, the config will be changed in memory and the - method will return True. Will fail with an assertion error if provided - attributes are incorrect or the model name is missing. - - The returned dict has the same format as the dict returned by - model_info(). - """ - # relativize paths as they go in - this makes it easier to move the models directory around - if path := model_attributes.get("path"): - model_attributes["path"] = str(self.relative_model_path(Path(path))) - - model_class = self._get_implementation(base_model, model_type) - model_config = model_class.create_config(**model_attributes) - model_key = self.create_key(model_name, base_model, model_type) - - if model_key in self.models and not clobber: - raise Exception(f'Attempt to overwrite existing model definition "{model_key}"') - - old_model = self.models.pop(model_key, None) - if old_model is not None: - # TODO: if path changed and old_model.path inside models folder should we delete this too? - - # remove conversion cache as config changed - old_model_path = self.resolve_model_path(old_model.path) - old_model_cache = self._get_model_cache_path(old_model_path) - if old_model_cache.exists(): - if old_model_cache.is_dir(): - rmtree(str(old_model_cache)) - else: - old_model_cache.unlink() - - # remove in-memory cache - # note: it not guaranteed to release memory(model can has other references) - cache_ids = self.cache_keys.pop(model_key, []) - for cache_id in cache_ids: - self.cache.uncache_model(cache_id) - - self.models[model_key] = model_config - self.commit() - - return AddModelResult( - name=model_name, - model_type=model_type, - base_model=base_model, - config=model_config, - ) - - def rename_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: ModelType, - new_name: Optional[str] = None, - new_base: Optional[BaseModelType] = None, - ) -> None: - """ - Rename or rebase a model. - """ - if new_name is None and new_base is None: - self.logger.error("rename_model() called with neither a new_name nor a new_base. {model_name} unchanged.") - return - - model_key = self.create_key(model_name, base_model, model_type) - model_cfg = self.models.get(model_key, None) - if not model_cfg: - raise ModelNotFoundException(f"Unknown model: {model_key}") - - old_path = self.resolve_model_path(model_cfg.path) - new_name = new_name or model_name - new_base = new_base or base_model - new_key = self.create_key(new_name, new_base, model_type) - if new_key in self.models: - raise ValueError(f'Attempt to overwrite existing model definition "{new_key}"') - - # if this is a model file/directory that we manage ourselves, we need to move it - if old_path.is_relative_to(self.app_config.models_path): - # keep the suffix! - if old_path.is_file(): - new_name = Path(new_name).with_suffix(old_path.suffix).as_posix() - new_path = self.resolve_model_path( - Path( - BaseModelType(new_base).value, - ModelType(model_type).value, - new_name, - ) - ) - move(old_path, new_path) - model_cfg.path = str(new_path.relative_to(self.app_config.models_path)) - - # clean up caches - old_model_cache = self._get_model_cache_path(old_path) - if old_model_cache.exists(): - if old_model_cache.is_dir(): - rmtree(str(old_model_cache)) - else: - old_model_cache.unlink() - - cache_ids = self.cache_keys.pop(model_key, []) - for cache_id in cache_ids: - self.cache.uncache_model(cache_id) - - self.models.pop(model_key, None) # delete - self.models[new_key] = model_cfg - self.commit() - - def convert_model( - self, - model_name: str, - base_model: BaseModelType, - model_type: Literal[ModelType.Main, ModelType.Vae], - dest_directory: Optional[Path] = None, - ) -> AddModelResult: - """ - Convert a checkpoint file into a diffusers folder, deleting the cached - version and deleting the original checkpoint file if it is in the models - directory. - :param model_name: Name of the model to convert - :param base_model: Base model type - :param model_type: Type of model ['vae' or 'main'] - - This will raise a ValueError unless the model is a checkpoint. - """ - info = self.model_info(model_name, base_model, model_type) - - if info is None: - raise FileNotFoundError(f"model not found: {model_name}") - - if info["model_format"] != "checkpoint": - raise ValueError(f"not a checkpoint format model: {model_name}") - - # We are taking advantage of a side effect of get_model() that converts check points - # into cached diffusers directories stored at `location`. It doesn't matter - # what submodeltype we request here, so we get the smallest. - submodel = {"submodel_type": SubModelType.Scheduler} if model_type == ModelType.Main else {} - model = self.get_model( - model_name, - base_model, - model_type, - **submodel, - ) - checkpoint_path = self.resolve_model_path(info["path"]) - old_diffusers_path = self.resolve_model_path(model.location) - new_diffusers_path = ( - dest_directory or self.app_config.models_path / base_model.value / model_type.value - ) / model_name - if new_diffusers_path.exists(): - raise ValueError(f"A diffusers model already exists at {new_diffusers_path}") - - try: - move(old_diffusers_path, new_diffusers_path) - info["model_format"] = "diffusers" - info["path"] = ( - str(new_diffusers_path) - if dest_directory - else str(new_diffusers_path.relative_to(self.app_config.models_path)) - ) - info.pop("config") - - result = self.add_model(model_name, base_model, model_type, model_attributes=info, clobber=True) - except Exception: - # something went wrong, so don't leave dangling diffusers model in directory or it will cause a duplicate model error! - rmtree(new_diffusers_path) - raise - - if checkpoint_path.exists() and checkpoint_path.is_relative_to(self.app_config.models_path): - checkpoint_path.unlink() - - return result - - def resolve_model_path(self, path: Union[Path, str]) -> Path: - """return relative paths based on configured models_path""" - return self.app_config.models_path / path - - def relative_model_path(self, model_path: Path) -> Path: - if model_path.is_relative_to(self.app_config.models_path): - model_path = model_path.relative_to(self.app_config.models_path) - return model_path - - def search_models(self, search_folder): - self.logger.info(f"Finding Models In: {search_folder}") - models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") - models_folder_safetensors = Path(search_folder).glob("**/*.safetensors") - - ckpt_files = [x for x in models_folder_ckpt if x.is_file()] - safetensor_files = [x for x in models_folder_safetensors if x.is_file()] - - files = ckpt_files + safetensor_files - - found_models = [] - for file in files: - location = str(file.resolve()).replace("\\", "/") - if "model.safetensors" not in location and "diffusion_pytorch_model.safetensors" not in location: - found_models.append({"name": file.stem, "location": location}) - - return search_folder, found_models - - def commit(self, conf_file: Optional[Path] = None) -> None: - """ - Write current configuration out to the indicated file. - """ - data_to_save = {} - data_to_save["__metadata__"] = self.config_meta.model_dump() - - for model_key, model_config in self.models.items(): - model_name, base_model, model_type = self.parse_key(model_key) - model_class = self._get_implementation(base_model, model_type) - if model_class.save_to_config: - # TODO: or exclude_unset better fits here? - data_to_save[model_key] = cast(BaseModel, model_config).model_dump( - exclude_defaults=True, exclude={"error"}, mode="json" - ) - # alias for config file - data_to_save[model_key]["format"] = data_to_save[model_key].pop("model_format") - - yaml_str = OmegaConf.to_yaml(data_to_save) - config_file_path = conf_file or self.config_path - assert config_file_path is not None, "no config file path to write to" - config_file_path = self.app_config.root_path / config_file_path - tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp") - try: - with open(tmpfile, "w", encoding="utf-8") as outfile: - outfile.write(self.preamble()) - outfile.write(yaml_str) - os.replace(tmpfile, config_file_path) - except OSError as err: - self.logger.warning(f"Could not modify the config file at {config_file_path}") - self.logger.warning(err) - - def preamble(self) -> str: - """ - Returns the preamble for the config file. - """ - return textwrap.dedent( - """ - # This file describes the alternative machine learning models - # available to InvokeAI script. - # - # To add a new model, follow the examples below. Each - # model requires a model config file, a weights file, - # and the width and height of the images it - # was trained on. - """ - ) - - def scan_models_directory( - self, - base_model: Optional[BaseModelType] = None, - model_type: Optional[ModelType] = None, - ): - loaded_files = set() - new_models_found = False - - self.logger.info(f"Scanning {self.app_config.models_path} for new models") - with Chdir(self.app_config.models_path): - for model_key, model_config in list(self.models.items()): - model_name, cur_base_model, cur_model_type = self.parse_key(model_key) - - # Patch for relative path bug in older models.yaml - paths should not - # be starting with a hard-coded 'models'. This will also fix up - # models.yaml when committed. - if model_config.path.startswith("models"): - model_config.path = str(Path(*Path(model_config.path).parts[1:])) - - model_path = self.resolve_model_path(model_config.path).absolute() - if not model_path.exists(): - model_class = self._get_implementation(cur_base_model, cur_model_type) - if model_class.save_to_config: - model_config.error = ModelError.NotFound - self.models.pop(model_key, None) - else: - self.models.pop(model_key, None) - else: - loaded_files.add(model_path) - - for cur_base_model in BaseModelType: - if base_model is not None and cur_base_model != base_model: - continue - - for cur_model_type in ModelType: - if model_type is not None and cur_model_type != model_type: - continue - model_class = self._get_implementation(cur_base_model, cur_model_type) - models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value)) - - if not models_dir.exists(): - continue # TODO: or create all folders? - - for model_path in models_dir.iterdir(): - if model_path not in loaded_files: # TODO: check - if model_path.name.startswith("."): - continue - model_name = model_path.name if model_path.is_dir() else model_path.stem - model_key = self.create_key(model_name, cur_base_model, cur_model_type) - - try: - if model_key in self.models: - raise DuplicateModelException(f"Model with key {model_key} added twice") - - model_path = self.relative_model_path(model_path) - model_config: ModelConfigBase = model_class.probe_config( - str(model_path), model_base=cur_base_model - ) - self.models[model_key] = model_config - new_models_found = True - except DuplicateModelException as e: - self.logger.warning(e) - except InvalidModelException as e: - self.logger.warning(f"Not a valid model: {model_path}. {e}") - except NotImplementedError as e: - self.logger.warning(e) - except Exception as e: - self.logger.warning(f"Error loading model {model_path}. {e}") - - imported_models = self.scan_autoimport_directory() - if (new_models_found or imported_models) and self.config_path: - self.commit() - - def scan_autoimport_directory(self) -> Dict[str, AddModelResult]: - """ - Scan the autoimport directory (if defined) and import new models, delete defunct models. - """ - # avoid circular import - from invokeai.backend.install.model_install_backend import ModelInstall - from invokeai.frontend.install.model_install import ask_user_for_prediction_type - - class ScanAndImport(ModelSearch): - def __init__(self, directories, logger, ignore: Set[Path], installer: ModelInstall): - super().__init__(directories, logger) - self.installer = installer - self.ignore = ignore - - def on_search_started(self): - self.new_models_found = {} - - def on_model_found(self, model: Path): - if model not in self.ignore: - self.new_models_found.update(self.installer.heuristic_import(model)) - - def on_search_completed(self): - self.logger.info( - f"Scanned {self._items_scanned} files and directories, imported {len(self.new_models_found)} models" - ) - - def models_found(self): - return self.new_models_found - - config = self.app_config - - # LS: hacky - # Patch in the SD VAE from core so that it is available for use by the UI - try: - self.heuristic_import({str(self.resolve_model_path("core/convert/sd-vae-ft-mse"))}) - except Exception: - pass - - installer = ModelInstall( - config=self.app_config, - model_manager=self, - prediction_type_helper=ask_user_for_prediction_type, - ) - known_paths = {self.resolve_model_path(x["path"]) for x in self.list_models()} - directories = { - config.root_path / x - for x in [ - config.autoimport_dir, - config.lora_dir, - config.embedding_dir, - config.controlnet_dir, - ] - if x - } - scanner = ScanAndImport(directories, self.logger, ignore=known_paths, installer=installer) - scanner.search() - - return scanner.models_found() - - def heuristic_import( - self, - items_to_import: Set[str], - prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, - ) -> Dict[str, AddModelResult]: - """Import a list of paths, repo_ids or URLs. Returns the set of - successfully imported items. - :param items_to_import: Set of strings corresponding to models to be imported. - :param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType. - - The prediction type helper is necessary to distinguish between - models based on Stable Diffusion 2 Base (requiring - SchedulerPredictionType.Epsilson) and Stable Diffusion 768 - (requiring SchedulerPredictionType.VPrediction). It is - generally impossible to do this programmatically, so the - prediction_type_helper usually asks the user to choose. - - The result is a set of successfully installed models. Each element - of the set is a dict corresponding to the newly-created OmegaConf stanza for - that model. - - May return the following exceptions: - - ModelNotFoundException - one or more of the items to import is not a valid path, repo_id or URL - - ValueError - a corresponding model already exists - """ - # avoid circular import here - from invokeai.backend.install.model_install_backend import ModelInstall - - successfully_installed = {} - - installer = ModelInstall( - config=self.app_config, prediction_type_helper=prediction_type_helper, model_manager=self - ) - for thing in items_to_import: - installed = installer.heuristic_import(thing) - successfully_installed.update(installed) - self.commit() - return successfully_installed diff --git a/invokeai/backend/model_management/model_merge.py b/invokeai/backend/model_management/model_merge.py deleted file mode 100644 index a9f0a23618..0000000000 --- a/invokeai/backend/model_management/model_merge.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -invokeai.backend.model_management.model_merge exports: -merge_diffusion_models() -- combine multiple models by location and return a pipeline object -merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to models.yaml - -Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team -""" - -import warnings -from enum import Enum -from pathlib import Path -from typing import List, Optional, Union - -from diffusers import DiffusionPipeline -from diffusers import logging as dlogging - -import invokeai.backend.util.logging as logger - -from ...backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType - - -class MergeInterpolationMethod(str, Enum): - WeightedSum = "weighted_sum" - Sigmoid = "sigmoid" - InvSigmoid = "inv_sigmoid" - AddDifference = "add_difference" - - -class ModelMerger(object): - def __init__(self, manager: ModelManager): - self.manager = manager - - def merge_diffusion_models( - self, - model_paths: List[Path], - alpha: float = 0.5, - interp: Optional[MergeInterpolationMethod] = None, - force: bool = False, - **kwargs, - ) -> DiffusionPipeline: - """ - :param model_paths: up to three models, designated by their local paths or HuggingFace repo_ids - :param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha - would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2 - :param interp: The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None. - Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. - :param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False. - - **kwargs - the default DiffusionPipeline.get_config_dict kwargs: - cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map - """ - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - verbosity = dlogging.get_verbosity() - dlogging.set_verbosity_error() - - pipe = DiffusionPipeline.from_pretrained( - model_paths[0], - custom_pipeline="checkpoint_merger", - ) - merged_pipe = pipe.merge( - pretrained_model_name_or_path_list=model_paths, - alpha=alpha, - interp=interp.value if interp else None, # diffusers API treats None as "weighted sum" - force=force, - **kwargs, - ) - dlogging.set_verbosity(verbosity) - return merged_pipe - - def merge_diffusion_models_and_save( - self, - model_names: List[str], - base_model: Union[BaseModelType, str], - merged_model_name: str, - alpha: float = 0.5, - interp: Optional[MergeInterpolationMethod] = None, - force: bool = False, - merge_dest_directory: Optional[Path] = None, - **kwargs, - ) -> AddModelResult: - """ - :param models: up to three models, designated by their InvokeAI models.yaml model name - :param base_model: base model (must be the same for all merged models!) - :param merged_model_name: name for new model - :param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha - would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2 - :param interp: The interpolation method to use for the merging. Supports "weighted_average", "sigmoid", "inv_sigmoid", "add_difference" and None. - Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. Add_difference is A+(B-C). - :param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False. - :param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended) - **kwargs - the default DiffusionPipeline.get_config_dict kwargs: - cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map - """ - model_paths = [] - config = self.manager.app_config - base_model = BaseModelType(base_model) - vae = None - - for mod in model_names: - info = self.manager.list_model(mod, base_model=base_model, model_type=ModelType.Main) - assert info, f"model {mod}, base_model {base_model}, is unknown" - assert ( - info["model_format"] == "diffusers" - ), f"{mod} is not a diffusers model. It must be optimized before merging" - assert info["variant"] == "normal", f"{mod} is a {info['variant']} model, which cannot currently be merged" - assert ( - len(model_names) <= 2 or interp == MergeInterpolationMethod.AddDifference - ), "When merging three models, only the 'add_difference' merge method is supported" - # pick up the first model's vae - if mod == model_names[0]: - vae = info.get("vae") - model_paths.extend([(config.root_path / info["path"]).as_posix()]) - - merge_method = None if interp == "weighted_sum" else MergeInterpolationMethod(interp) - logger.debug(f"interp = {interp}, merge_method={merge_method}") - merged_pipe = self.merge_diffusion_models(model_paths, alpha, merge_method, force, **kwargs) - dump_path = ( - Path(merge_dest_directory) - if merge_dest_directory - else config.models_path / base_model.value / ModelType.Main.value - ) - dump_path.mkdir(parents=True, exist_ok=True) - dump_path = (dump_path / merged_model_name).as_posix() - - merged_pipe.save_pretrained(dump_path, safe_serialization=True) - attributes = { - "path": dump_path, - "description": f"Merge of models {', '.join(model_names)}", - "model_format": "diffusers", - "variant": ModelVariantType.Normal.value, - "vae": vae, - } - return self.manager.add_model( - merged_model_name, - base_model=base_model, - model_type=ModelType.Main, - model_attributes=attributes, - clobber=True, - ) diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py deleted file mode 100644 index 74b1b72d31..0000000000 --- a/invokeai/backend/model_management/model_probe.py +++ /dev/null @@ -1,664 +0,0 @@ -import json -import re -from dataclasses import dataclass -from pathlib import Path -from typing import Callable, Dict, Literal, Optional, Union - -import safetensors.torch -import torch -from diffusers import ConfigMixin, ModelMixin -from picklescan.scanner import scan_file_path - -from invokeai.backend.model_management.models.ip_adapter import IPAdapterModelFormat - -from .models import ( - BaseModelType, - InvalidModelException, - ModelType, - ModelVariantType, - SchedulerPredictionType, - SilenceWarnings, -) -from .models.base import read_checkpoint_meta -from .util import lora_token_vector_length - - -@dataclass -class ModelProbeInfo(object): - model_type: ModelType - base_type: BaseModelType - variant_type: ModelVariantType - prediction_type: SchedulerPredictionType - upcast_attention: bool - format: Literal["diffusers", "checkpoint", "lycoris", "olive", "onnx"] - image_size: int - name: Optional[str] = None - description: Optional[str] = None - - -class ProbeBase(object): - """forward declaration""" - - pass - - -class ModelProbe(object): - PROBES = { - "diffusers": {}, - "checkpoint": {}, - "onnx": {}, - } - - CLASS2TYPE = { - "StableDiffusionPipeline": ModelType.Main, - "StableDiffusionInpaintPipeline": ModelType.Main, - "StableDiffusionXLPipeline": ModelType.Main, - "StableDiffusionXLImg2ImgPipeline": ModelType.Main, - "StableDiffusionXLInpaintPipeline": ModelType.Main, - "LatentConsistencyModelPipeline": ModelType.Main, - "AutoencoderKL": ModelType.Vae, - "AutoencoderTiny": ModelType.Vae, - "ControlNetModel": ModelType.ControlNet, - "CLIPVisionModelWithProjection": ModelType.CLIPVision, - "T2IAdapter": ModelType.T2IAdapter, - } - - @classmethod - def register_probe( - cls, format: Literal["diffusers", "checkpoint", "onnx"], model_type: ModelType, probe_class: ProbeBase - ): - cls.PROBES[format][model_type] = probe_class - - @classmethod - def heuristic_probe( - cls, - model: Union[Dict, ModelMixin, Path], - prediction_type_helper: Callable[[Path], SchedulerPredictionType] = None, - ) -> ModelProbeInfo: - if isinstance(model, Path): - return cls.probe(model_path=model, prediction_type_helper=prediction_type_helper) - elif isinstance(model, (dict, ModelMixin, ConfigMixin)): - return cls.probe(model_path=None, model=model, prediction_type_helper=prediction_type_helper) - else: - raise InvalidModelException("model parameter {model} is neither a Path, nor a model") - - @classmethod - def probe( - cls, - model_path: Path, - model: Optional[Union[Dict, ModelMixin]] = None, - prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None, - ) -> ModelProbeInfo: - """ - Probe the model at model_path and return sufficient information about it - to place it somewhere in the models directory hierarchy. If the model is - already loaded into memory, you may provide it as model in order to avoid - opening it a second time. The prediction_type_helper callable is a function that receives - the path to the model and returns the SchedulerPredictionType. - """ - if model_path: - format_type = "diffusers" if model_path.is_dir() else "checkpoint" - else: - format_type = "diffusers" if isinstance(model, (ConfigMixin, ModelMixin)) else "checkpoint" - model_info = None - try: - model_type = ( - cls.get_model_type_from_folder(model_path, model) - if format_type == "diffusers" - else cls.get_model_type_from_checkpoint(model_path, model) - ) - format_type = "onnx" if model_type == ModelType.ONNX else format_type - probe_class = cls.PROBES[format_type].get(model_type) - if not probe_class: - return None - probe = probe_class(model_path, model, prediction_type_helper) - base_type = probe.get_base_type() - variant_type = probe.get_variant_type() - prediction_type = probe.get_scheduler_prediction_type() - name = cls.get_model_name(model_path) - description = f"{base_type.value} {model_type.value} model {name}" - format = probe.get_format() - model_info = ModelProbeInfo( - model_type=model_type, - base_type=base_type, - variant_type=variant_type, - prediction_type=prediction_type, - name=name, - description=description, - upcast_attention=( - base_type == BaseModelType.StableDiffusion2 - and prediction_type == SchedulerPredictionType.VPrediction - ), - format=format, - image_size=( - 1024 - if (base_type in {BaseModelType.StableDiffusionXL, BaseModelType.StableDiffusionXLRefiner}) - else ( - 768 - if ( - base_type == BaseModelType.StableDiffusion2 - and prediction_type == SchedulerPredictionType.VPrediction - ) - else 512 - ) - ), - ) - except Exception: - raise - - return model_info - - @classmethod - def get_model_name(cls, model_path: Path) -> str: - if model_path.suffix in {".safetensors", ".bin", ".pt", ".ckpt"}: - return model_path.stem - else: - return model_path.name - - @classmethod - def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict) -> ModelType: - if model_path.suffix not in (".bin", ".pt", ".ckpt", ".safetensors", ".pth"): - return None - - if model_path.name == "learned_embeds.bin": - return ModelType.TextualInversion - - ckpt = checkpoint if checkpoint else read_checkpoint_meta(model_path, scan=True) - ckpt = ckpt.get("state_dict", ckpt) - - for key in ckpt.keys(): - if any(key.startswith(v) for v in {"cond_stage_model.", "first_stage_model.", "model.diffusion_model."}): - return ModelType.Main - elif any(key.startswith(v) for v in {"encoder.conv_in", "decoder.conv_in"}): - return ModelType.Vae - elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}): - return ModelType.Lora - elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}): - return ModelType.Lora - elif any(key.startswith(v) for v in {"control_model", "input_blocks"}): - return ModelType.ControlNet - elif key in {"emb_params", "string_to_param"}: - return ModelType.TextualInversion - - else: - # diffusers-ti - if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): - return ModelType.TextualInversion - - raise InvalidModelException(f"Unable to determine model type for {model_path}") - - @classmethod - def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin) -> ModelType: - """ - Get the model type of a hugging-face style folder. - """ - class_name = None - error_hint = None - if model: - class_name = model.__class__.__name__ - else: - for suffix in ["bin", "safetensors"]: - if (folder_path / f"learned_embeds.{suffix}").exists(): - return ModelType.TextualInversion - if (folder_path / f"pytorch_lora_weights.{suffix}").exists(): - return ModelType.Lora - if (folder_path / "unet/model.onnx").exists(): - return ModelType.ONNX - if (folder_path / "image_encoder.txt").exists(): - return ModelType.IPAdapter - - i = folder_path / "model_index.json" - c = folder_path / "config.json" - config_path = i if i.exists() else c if c.exists() else None - - if config_path: - with open(config_path, "r") as file: - conf = json.load(file) - if "_class_name" in conf: - class_name = conf["_class_name"] - elif "architectures" in conf: - class_name = conf["architectures"][0] - else: - class_name = None - else: - error_hint = f"No model_index.json or config.json found in {folder_path}." - - if class_name and (type := cls.CLASS2TYPE.get(class_name)): - return type - else: - error_hint = f"class {class_name} is not one of the supported classes [{', '.join(cls.CLASS2TYPE.keys())}]" - - # give up - raise InvalidModelException( - f"Unable to determine model type for {folder_path}" + (f"; {error_hint}" if error_hint else "") - ) - - @classmethod - def _scan_and_load_checkpoint(cls, model_path: Path) -> dict: - with SilenceWarnings(): - if model_path.suffix.endswith((".ckpt", ".pt", ".bin")): - cls._scan_model(model_path, model_path) - return torch.load(model_path, map_location="cpu") - else: - return safetensors.torch.load_file(model_path) - - @classmethod - def _scan_model(cls, model_name, checkpoint): - """ - Apply picklescanner to the indicated checkpoint and issue a warning - and option to exit if an infected file is identified. - """ - # scan model - scan_result = scan_file_path(checkpoint) - if scan_result.infected_files != 0: - raise Exception("The model {model_name} is potentially infected by malware. Aborting import.") - - -# ##################################################3 -# Checkpoint probing -# ##################################################3 -class ProbeBase(object): - def get_base_type(self) -> BaseModelType: - pass - - def get_variant_type(self) -> ModelVariantType: - pass - - def get_scheduler_prediction_type(self) -> SchedulerPredictionType: - pass - - def get_format(self) -> str: - pass - - -class CheckpointProbeBase(ProbeBase): - def __init__( - self, checkpoint_path: Path, checkpoint: dict, helper: Callable[[Path], SchedulerPredictionType] = None - ) -> BaseModelType: - self.checkpoint = checkpoint or ModelProbe._scan_and_load_checkpoint(checkpoint_path) - self.checkpoint_path = checkpoint_path - self.helper = helper - - def get_base_type(self) -> BaseModelType: - pass - - def get_format(self) -> str: - return "checkpoint" - - def get_variant_type(self) -> ModelVariantType: - model_type = ModelProbe.get_model_type_from_checkpoint(self.checkpoint_path, self.checkpoint) - if model_type != ModelType.Main: - return ModelVariantType.Normal - state_dict = self.checkpoint.get("state_dict") or self.checkpoint - in_channels = state_dict["model.diffusion_model.input_blocks.0.0.weight"].shape[1] - if in_channels == 9: - return ModelVariantType.Inpaint - elif in_channels == 5: - return ModelVariantType.Depth - elif in_channels == 4: - return ModelVariantType.Normal - else: - raise InvalidModelException( - f"Cannot determine variant type (in_channels={in_channels}) at {self.checkpoint_path}" - ) - - -class PipelineCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - state_dict = self.checkpoint.get("state_dict") or checkpoint - key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in state_dict and state_dict[key_name].shape[-1] == 768: - return BaseModelType.StableDiffusion1 - if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: - return BaseModelType.StableDiffusion2 - key_name = "model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in state_dict and state_dict[key_name].shape[-1] == 2048: - return BaseModelType.StableDiffusionXL - elif key_name in state_dict and state_dict[key_name].shape[-1] == 1280: - return BaseModelType.StableDiffusionXLRefiner - else: - raise InvalidModelException("Cannot determine base type") - - def get_scheduler_prediction_type(self) -> Optional[SchedulerPredictionType]: - """Return model prediction type.""" - # if there is a .yaml associated with this checkpoint, then we do not need - # to probe for the prediction type as it will be ignored. - if self.checkpoint_path and self.checkpoint_path.with_suffix(".yaml").exists(): - return None - - type = self.get_base_type() - if type == BaseModelType.StableDiffusion2: - checkpoint = self.checkpoint - state_dict = self.checkpoint.get("state_dict") or checkpoint - key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" - if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: - if "global_step" in checkpoint: - if checkpoint["global_step"] == 220000: - return SchedulerPredictionType.Epsilon - elif checkpoint["global_step"] == 110000: - return SchedulerPredictionType.VPrediction - if self.helper and self.checkpoint_path: - if helper_guess := self.helper(self.checkpoint_path): - return helper_guess - return SchedulerPredictionType.VPrediction # a guess for sd2 ckpts - - elif type == BaseModelType.StableDiffusion1: - if self.helper and self.checkpoint_path: - if helper_guess := self.helper(self.checkpoint_path): - return helper_guess - return SchedulerPredictionType.Epsilon # a reasonable guess for sd1 ckpts - else: - return None - - -class VaeCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - # I can't find any standalone 2.X VAEs to test with! - return BaseModelType.StableDiffusion1 - - -class LoRACheckpointProbe(CheckpointProbeBase): - def get_format(self) -> str: - return "lycoris" - - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - token_vector_length = lora_token_vector_length(checkpoint) - - if token_vector_length == 768: - return BaseModelType.StableDiffusion1 - elif token_vector_length == 1024: - return BaseModelType.StableDiffusion2 - elif token_vector_length == 1280: - return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641 - elif token_vector_length == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelException(f"Unknown LoRA type: {self.checkpoint_path}") - - -class TextualInversionCheckpointProbe(CheckpointProbeBase): - def get_format(self) -> str: - return None - - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - if "string_to_token" in checkpoint: - token_dim = list(checkpoint["string_to_param"].values())[0].shape[-1] - elif "emb_params" in checkpoint: - token_dim = checkpoint["emb_params"].shape[-1] - elif "clip_g" in checkpoint: - token_dim = checkpoint["clip_g"].shape[-1] - else: - token_dim = list(checkpoint.values())[0].shape[-1] - if token_dim == 768: - return BaseModelType.StableDiffusion1 - elif token_dim == 1024: - return BaseModelType.StableDiffusion2 - elif token_dim == 1280: - return BaseModelType.StableDiffusionXL - else: - return None - - -class ControlNetCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - checkpoint = self.checkpoint - for key_name in ( - "control_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", - "input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", - ): - if key_name not in checkpoint: - continue - if checkpoint[key_name].shape[-1] == 768: - return BaseModelType.StableDiffusion1 - elif checkpoint[key_name].shape[-1] == 1024: - return BaseModelType.StableDiffusion2 - elif self.checkpoint_path and self.helper: - return self.helper(self.checkpoint_path) - raise InvalidModelException("Unable to determine base type for {self.checkpoint_path}") - - -class IPAdapterCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class CLIPVisionCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -class T2IAdapterCheckpointProbe(CheckpointProbeBase): - def get_base_type(self) -> BaseModelType: - raise NotImplementedError() - - -######################################################## -# classes for probing folders -####################################################### -class FolderProbeBase(ProbeBase): - def __init__(self, folder_path: Path, model: ModelMixin = None, helper: Callable = None): # not used - self.model = model - self.folder_path = folder_path - - def get_variant_type(self) -> ModelVariantType: - return ModelVariantType.Normal - - def get_format(self) -> str: - return "diffusers" - - -class PipelineFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - if self.model: - unet_conf = self.model.unet.config - else: - with open(self.folder_path / "unet" / "config.json", "r") as file: - unet_conf = json.load(file) - if unet_conf["cross_attention_dim"] == 768: - return BaseModelType.StableDiffusion1 - elif unet_conf["cross_attention_dim"] == 1024: - return BaseModelType.StableDiffusion2 - elif unet_conf["cross_attention_dim"] == 1280: - return BaseModelType.StableDiffusionXLRefiner - elif unet_conf["cross_attention_dim"] == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelException(f"Unknown base model for {self.folder_path}") - - def get_scheduler_prediction_type(self) -> SchedulerPredictionType: - if self.model: - scheduler_conf = self.model.scheduler.config - else: - with open(self.folder_path / "scheduler" / "scheduler_config.json", "r") as file: - scheduler_conf = json.load(file) - if scheduler_conf["prediction_type"] == "v_prediction": - return SchedulerPredictionType.VPrediction - elif scheduler_conf["prediction_type"] == "epsilon": - return SchedulerPredictionType.Epsilon - else: - return None - - def get_variant_type(self) -> ModelVariantType: - # This only works for pipelines! Any kind of - # exception results in our returning the - # "normal" variant type - try: - if self.model: - conf = self.model.unet.config - else: - config_file = self.folder_path / "unet" / "config.json" - with open(config_file, "r") as file: - conf = json.load(file) - - in_channels = conf["in_channels"] - if in_channels == 9: - return ModelVariantType.Inpaint - elif in_channels == 5: - return ModelVariantType.Depth - elif in_channels == 4: - return ModelVariantType.Normal - except Exception: - pass - return ModelVariantType.Normal - - -class VaeFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - if self._config_looks_like_sdxl(): - return BaseModelType.StableDiffusionXL - elif self._name_looks_like_sdxl(): - # but SD and SDXL VAE are the same shape (3-channel RGB to 4-channel float scaled down - # by a factor of 8), we can't necessarily tell them apart by config hyperparameters. - return BaseModelType.StableDiffusionXL - else: - return BaseModelType.StableDiffusion1 - - def _config_looks_like_sdxl(self) -> bool: - # config values that distinguish Stability's SD 1.x VAE from their SDXL VAE. - config_file = self.folder_path / "config.json" - if not config_file.exists(): - raise InvalidModelException(f"Cannot determine base type for {self.folder_path}") - with open(config_file, "r") as file: - config = json.load(file) - return config.get("scaling_factor", 0) == 0.13025 and config.get("sample_size") in [512, 1024] - - def _name_looks_like_sdxl(self) -> bool: - return bool(re.search(r"xl\b", self._guess_name(), re.IGNORECASE)) - - def _guess_name(self) -> str: - name = self.folder_path.name - if name == "vae": - name = self.folder_path.parent.name - return name - - -class TextualInversionFolderProbe(FolderProbeBase): - def get_format(self) -> str: - return None - - def get_base_type(self) -> BaseModelType: - path = self.folder_path / "learned_embeds.bin" - if not path.exists(): - return None - checkpoint = ModelProbe._scan_and_load_checkpoint(path) - return TextualInversionCheckpointProbe(None, checkpoint=checkpoint).get_base_type() - - -class ONNXFolderProbe(FolderProbeBase): - def get_format(self) -> str: - return "onnx" - - def get_base_type(self) -> BaseModelType: - return BaseModelType.StableDiffusion1 - - def get_variant_type(self) -> ModelVariantType: - return ModelVariantType.Normal - - -class ControlNetFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - config_file = self.folder_path / "config.json" - if not config_file.exists(): - raise InvalidModelException(f"Cannot determine base type for {self.folder_path}") - with open(config_file, "r") as file: - config = json.load(file) - # no obvious way to distinguish between sd2-base and sd2-768 - dimension = config["cross_attention_dim"] - base_model = ( - BaseModelType.StableDiffusion1 - if dimension == 768 - else ( - BaseModelType.StableDiffusion2 - if dimension == 1024 - else BaseModelType.StableDiffusionXL - if dimension == 2048 - else None - ) - ) - if not base_model: - raise InvalidModelException(f"Unable to determine model base for {self.folder_path}") - return base_model - - -class LoRAFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - model_file = None - for suffix in ["safetensors", "bin"]: - base_file = self.folder_path / f"pytorch_lora_weights.{suffix}" - if base_file.exists(): - model_file = base_file - break - if not model_file: - raise InvalidModelException("Unknown LoRA format encountered") - return LoRACheckpointProbe(model_file, None).get_base_type() - - -class IPAdapterFolderProbe(FolderProbeBase): - def get_format(self) -> str: - return IPAdapterModelFormat.InvokeAI.value - - def get_base_type(self) -> BaseModelType: - model_file = self.folder_path / "ip_adapter.bin" - if not model_file.exists(): - raise InvalidModelException("Unknown IP-Adapter model format.") - - state_dict = torch.load(model_file, map_location="cpu") - cross_attention_dim = state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[-1] - if cross_attention_dim == 768: - return BaseModelType.StableDiffusion1 - elif cross_attention_dim == 1024: - return BaseModelType.StableDiffusion2 - elif cross_attention_dim == 2048: - return BaseModelType.StableDiffusionXL - else: - raise InvalidModelException(f"IP-Adapter had unexpected cross-attention dimension: {cross_attention_dim}.") - - -class CLIPVisionFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - return BaseModelType.Any - - -class T2IAdapterFolderProbe(FolderProbeBase): - def get_base_type(self) -> BaseModelType: - config_file = self.folder_path / "config.json" - if not config_file.exists(): - raise InvalidModelException(f"Cannot determine base type for {self.folder_path}") - with open(config_file, "r") as file: - config = json.load(file) - - adapter_type = config.get("adapter_type", None) - if adapter_type == "full_adapter_xl": - return BaseModelType.StableDiffusionXL - elif adapter_type == "full_adapter" or "light_adapter": - # I haven't seen any T2I adapter models for SD2, so assume that this is an SD1 adapter. - return BaseModelType.StableDiffusion1 - else: - raise InvalidModelException( - f"Unable to determine base model for '{self.folder_path}' (adapter_type = {adapter_type})." - ) - - -############## register probe classes ###### -ModelProbe.register_probe("diffusers", ModelType.Main, PipelineFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.Vae, VaeFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.Lora, LoRAFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.TextualInversion, TextualInversionFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.ControlNet, ControlNetFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.IPAdapter, IPAdapterFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.CLIPVision, CLIPVisionFolderProbe) -ModelProbe.register_probe("diffusers", ModelType.T2IAdapter, T2IAdapterFolderProbe) - -ModelProbe.register_probe("checkpoint", ModelType.Main, PipelineCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.Vae, VaeCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.Lora, LoRACheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.TextualInversion, TextualInversionCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.ControlNet, ControlNetCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.IPAdapter, IPAdapterCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.CLIPVision, CLIPVisionCheckpointProbe) -ModelProbe.register_probe("checkpoint", ModelType.T2IAdapter, T2IAdapterCheckpointProbe) - -ModelProbe.register_probe("onnx", ModelType.ONNX, ONNXFolderProbe) diff --git a/invokeai/backend/model_management/model_search.py b/invokeai/backend/model_management/model_search.py deleted file mode 100644 index e125c3ced7..0000000000 --- a/invokeai/backend/model_management/model_search.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2023, Lincoln D. Stein and the InvokeAI Team -""" -Abstract base class for recursive directory search for models. -""" - -import os -from abc import ABC, abstractmethod -from pathlib import Path -from typing import List, Set, types - -import invokeai.backend.util.logging as logger - - -class ModelSearch(ABC): - def __init__(self, directories: List[Path], logger: types.ModuleType = logger): - """ - Initialize a recursive model directory search. - :param directories: List of directory Paths to recurse through - :param logger: Logger to use - """ - self.directories = directories - self.logger = logger - self._items_scanned = 0 - self._models_found = 0 - self._scanned_dirs = set() - self._scanned_paths = set() - self._pruned_paths = set() - - @abstractmethod - def on_search_started(self): - """ - Called before the scan starts. - """ - pass - - @abstractmethod - def on_model_found(self, model: Path): - """ - Process a found model. Raise an exception if something goes wrong. - :param model: Model to process - could be a directory or checkpoint. - """ - pass - - @abstractmethod - def on_search_completed(self): - """ - Perform some activity when the scan is completed. May use instance - variables, items_scanned and models_found - """ - pass - - def search(self): - self.on_search_started() - for dir in self.directories: - self.walk_directory(dir) - self.on_search_completed() - - def walk_directory(self, path: Path): - for root, dirs, files in os.walk(path, followlinks=True): - if str(Path(root).name).startswith("."): - self._pruned_paths.add(root) - if any(Path(root).is_relative_to(x) for x in self._pruned_paths): - continue - - self._items_scanned += len(dirs) + len(files) - for d in dirs: - path = Path(root) / d - if path in self._scanned_paths or path.parent in self._scanned_dirs: - self._scanned_dirs.add(path) - continue - if any( - (path / x).exists() - for x in { - "config.json", - "model_index.json", - "learned_embeds.bin", - "pytorch_lora_weights.bin", - "image_encoder.txt", - } - ): - try: - self.on_model_found(path) - self._models_found += 1 - self._scanned_dirs.add(path) - except Exception as e: - self.logger.warning(f"Failed to process '{path}': {e}") - - for f in files: - path = Path(root) / f - if path.parent in self._scanned_dirs: - continue - if path.suffix in {".ckpt", ".bin", ".pth", ".safetensors", ".pt"}: - try: - self.on_model_found(path) - self._models_found += 1 - except Exception as e: - self.logger.warning(f"Failed to process '{path}': {e}") - - -class FindModels(ModelSearch): - def on_search_started(self): - self.models_found: Set[Path] = set() - - def on_model_found(self, model: Path): - self.models_found.add(model) - - def on_search_completed(self): - pass - - def list_models(self) -> List[Path]: - self.search() - return list(self.models_found) diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py deleted file mode 100644 index 5f9b13b96f..0000000000 --- a/invokeai/backend/model_management/models/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -import inspect -from enum import Enum -from typing import Literal, get_origin - -from pydantic import BaseModel, ConfigDict, create_model - -from .base import ( # noqa: F401 - BaseModelType, - DuplicateModelException, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelError, - ModelNotFoundException, - ModelType, - ModelVariantType, - SchedulerPredictionType, - SilenceWarnings, - SubModelType, -) -from .clip_vision import CLIPVisionModel -from .controlnet import ControlNetModel # TODO: -from .ip_adapter import IPAdapterModel -from .lora import LoRAModel -from .sdxl import StableDiffusionXLModel -from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model -from .stable_diffusion_onnx import ONNXStableDiffusion1Model, ONNXStableDiffusion2Model -from .t2i_adapter import T2IAdapterModel -from .textual_inversion import TextualInversionModel -from .vae import VaeModel - -MODEL_CLASSES = { - BaseModelType.StableDiffusion1: { - ModelType.ONNX: ONNXStableDiffusion1Model, - ModelType.Main: StableDiffusion1Model, - ModelType.Vae: VaeModel, - ModelType.Lora: LoRAModel, - ModelType.ControlNet: ControlNetModel, - ModelType.TextualInversion: TextualInversionModel, - ModelType.IPAdapter: IPAdapterModel, - ModelType.CLIPVision: CLIPVisionModel, - ModelType.T2IAdapter: T2IAdapterModel, - }, - BaseModelType.StableDiffusion2: { - ModelType.ONNX: ONNXStableDiffusion2Model, - ModelType.Main: StableDiffusion2Model, - ModelType.Vae: VaeModel, - ModelType.Lora: LoRAModel, - ModelType.ControlNet: ControlNetModel, - ModelType.TextualInversion: TextualInversionModel, - ModelType.IPAdapter: IPAdapterModel, - ModelType.CLIPVision: CLIPVisionModel, - ModelType.T2IAdapter: T2IAdapterModel, - }, - BaseModelType.StableDiffusionXL: { - ModelType.Main: StableDiffusionXLModel, - ModelType.Vae: VaeModel, - # will not work until support written - ModelType.Lora: LoRAModel, - ModelType.ControlNet: ControlNetModel, - ModelType.TextualInversion: TextualInversionModel, - ModelType.ONNX: ONNXStableDiffusion2Model, - ModelType.IPAdapter: IPAdapterModel, - ModelType.CLIPVision: CLIPVisionModel, - ModelType.T2IAdapter: T2IAdapterModel, - }, - BaseModelType.StableDiffusionXLRefiner: { - ModelType.Main: StableDiffusionXLModel, - ModelType.Vae: VaeModel, - # will not work until support written - ModelType.Lora: LoRAModel, - ModelType.ControlNet: ControlNetModel, - ModelType.TextualInversion: TextualInversionModel, - ModelType.ONNX: ONNXStableDiffusion2Model, - ModelType.IPAdapter: IPAdapterModel, - ModelType.CLIPVision: CLIPVisionModel, - ModelType.T2IAdapter: T2IAdapterModel, - }, - BaseModelType.Any: { - ModelType.CLIPVision: CLIPVisionModel, - # The following model types are not expected to be used with BaseModelType.Any. - ModelType.ONNX: ONNXStableDiffusion2Model, - ModelType.Main: StableDiffusion2Model, - ModelType.Vae: VaeModel, - ModelType.Lora: LoRAModel, - ModelType.ControlNet: ControlNetModel, - ModelType.TextualInversion: TextualInversionModel, - ModelType.IPAdapter: IPAdapterModel, - ModelType.T2IAdapter: T2IAdapterModel, - }, - # BaseModelType.Kandinsky2_1: { - # ModelType.Main: Kandinsky2_1Model, - # ModelType.MoVQ: MoVQModel, - # ModelType.Lora: LoRAModel, - # ModelType.ControlNet: ControlNetModel, - # ModelType.TextualInversion: TextualInversionModel, - # }, -} - -MODEL_CONFIGS = [] -OPENAPI_MODEL_CONFIGS = [] - - -class OpenAPIModelInfoBase(BaseModel): - model_name: str - base_model: BaseModelType - model_type: ModelType - - model_config = ConfigDict(protected_namespaces=()) - - -for _base_model, models in MODEL_CLASSES.items(): - for model_type, model_class in models.items(): - model_configs = set(model_class._get_configs().values()) - model_configs.discard(None) - MODEL_CONFIGS.extend(model_configs) - - # LS: sort to get the checkpoint configs first, which makes - # for a better template in the Swagger docs - for cfg in sorted(model_configs, key=lambda x: str(x)): - model_name, cfg_name = cfg.__qualname__.split(".")[-2:] - openapi_cfg_name = model_name + cfg_name - if openapi_cfg_name in vars(): - continue - - api_wrapper = create_model( - openapi_cfg_name, - __base__=(cfg, OpenAPIModelInfoBase), - model_type=(Literal[model_type], model_type), # type: ignore - ) - vars()[openapi_cfg_name] = api_wrapper - OPENAPI_MODEL_CONFIGS.append(api_wrapper) - - -def get_model_config_enums(): - enums = [] - - for model_config in MODEL_CONFIGS: - if hasattr(inspect, "get_annotations"): - fields = inspect.get_annotations(model_config) - else: - fields = model_config.__annotations__ - try: - field = fields["model_format"] - except Exception: - raise Exception("format field not found") - - # model_format: None - # model_format: SomeModelFormat - # model_format: Literal[SomeModelFormat.Diffusers] - # model_format: Literal[SomeModelFormat.Diffusers, SomeModelFormat.Checkpoint] - - if isinstance(field, type) and issubclass(field, str) and issubclass(field, Enum): - enums.append(field) - - elif get_origin(field) is Literal and all( - isinstance(arg, str) and isinstance(arg, Enum) for arg in field.__args__ - ): - enums.append(type(field.__args__[0])) - - elif field is None: - pass - - else: - raise Exception(f"Unsupported format definition in {model_configs.__qualname__}") - - return enums diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py deleted file mode 100644 index 7807cb9a54..0000000000 --- a/invokeai/backend/model_management/models/base.py +++ /dev/null @@ -1,681 +0,0 @@ -import inspect -import json -import os -import sys -import typing -import warnings -from abc import ABCMeta, abstractmethod -from contextlib import suppress -from enum import Enum -from pathlib import Path -from typing import Any, Callable, Dict, Generic, List, Literal, Optional, Type, TypeVar, Union - -import numpy as np -import onnx -import safetensors.torch -import torch -from diffusers import ConfigMixin, DiffusionPipeline -from diffusers import logging as diffusers_logging -from onnx import numpy_helper -from onnxruntime import InferenceSession, SessionOptions, get_available_providers -from picklescan.scanner import scan_file_path -from pydantic import BaseModel, ConfigDict, Field -from transformers import logging as transformers_logging - - -class DuplicateModelException(Exception): - pass - - -class InvalidModelException(Exception): - pass - - -class ModelNotFoundException(Exception): - pass - - -class BaseModelType(str, Enum): - Any = "any" # For models that are not associated with any particular base model. - StableDiffusion1 = "sd-1" - StableDiffusion2 = "sd-2" - StableDiffusionXL = "sdxl" - StableDiffusionXLRefiner = "sdxl-refiner" - # Kandinsky2_1 = "kandinsky-2.1" - - -class ModelType(str, Enum): - ONNX = "onnx" - Main = "main" - Vae = "vae" - Lora = "lora" - ControlNet = "controlnet" # used by model_probe - TextualInversion = "embedding" - IPAdapter = "ip_adapter" - CLIPVision = "clip_vision" - T2IAdapter = "t2i_adapter" - - -class SubModelType(str, Enum): - UNet = "unet" - TextEncoder = "text_encoder" - TextEncoder2 = "text_encoder_2" - Tokenizer = "tokenizer" - Tokenizer2 = "tokenizer_2" - Vae = "vae" - VaeDecoder = "vae_decoder" - VaeEncoder = "vae_encoder" - Scheduler = "scheduler" - SafetyChecker = "safety_checker" - # MoVQ = "movq" - - -class ModelVariantType(str, Enum): - Normal = "normal" - Inpaint = "inpaint" - Depth = "depth" - - -class SchedulerPredictionType(str, Enum): - Epsilon = "epsilon" - VPrediction = "v_prediction" - Sample = "sample" - - -class ModelError(str, Enum): - NotFound = "not_found" - - -def model_config_json_schema_extra(schema: dict[str, Any]) -> None: - if "required" not in schema: - schema["required"] = [] - schema["required"].append("model_type") - - -class ModelConfigBase(BaseModel): - path: str # or Path - description: Optional[str] = Field(None) - model_format: Optional[str] = Field(None) - error: Optional[ModelError] = Field(None) - - model_config = ConfigDict( - use_enum_values=True, protected_namespaces=(), json_schema_extra=model_config_json_schema_extra - ) - - -class EmptyConfigLoader(ConfigMixin): - @classmethod - def load_config(cls, *args, **kwargs): - cls.config_name = kwargs.pop("config_name") - return super().load_config(*args, **kwargs) - - -T_co = TypeVar("T_co", covariant=True) - - -class classproperty(Generic[T_co]): - def __init__(self, fget: Callable[[Any], T_co]) -> None: - self.fget = fget - - def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co: - return self.fget(owner) - - def __set__(self, instance: Optional[Any], value: Any) -> None: - raise AttributeError("cannot set attribute") - - -class ModelBase(metaclass=ABCMeta): - # model_path: str - # base_model: BaseModelType - # model_type: ModelType - - def __init__( - self, - model_path: str, - base_model: BaseModelType, - model_type: ModelType, - ): - self.model_path = model_path - self.base_model = base_model - self.model_type = model_type - - def _hf_definition_to_type(self, subtypes: List[str]) -> Type: - if len(subtypes) < 2: - raise Exception("Invalid subfolder definition!") - if all(t is None for t in subtypes): - return None - elif any(t is None for t in subtypes): - raise Exception(f"Unsupported definition: {subtypes}") - - if subtypes[0] in ["diffusers", "transformers"]: - res_type = sys.modules[subtypes[0]] - subtypes = subtypes[1:] - - else: - res_type = sys.modules["diffusers"] - res_type = res_type.pipelines - - for subtype in subtypes: - res_type = getattr(res_type, subtype) - return res_type - - @classmethod - def _get_configs(cls): - with suppress(Exception): - return cls.__configs - - configs = {} - for name in dir(cls): - if name.startswith("__"): - continue - - value = getattr(cls, name) - if not isinstance(value, type) or not issubclass(value, ModelConfigBase): - continue - - if hasattr(inspect, "get_annotations"): - fields = inspect.get_annotations(value) - else: - fields = value.__annotations__ - try: - field = fields["model_format"] - except Exception: - raise Exception(f"Invalid config definition - format field not found({cls.__qualname__})") - - if isinstance(field, type) and issubclass(field, str) and issubclass(field, Enum): - for model_format in field: - configs[model_format.value] = value - - elif typing.get_origin(field) is Literal and all( - isinstance(arg, str) and isinstance(arg, Enum) for arg in field.__args__ - ): - for model_format in field.__args__: - configs[model_format.value] = value - - elif field is None: - configs[None] = value - - else: - raise Exception(f"Unsupported format definition in {cls.__qualname__}") - - cls.__configs = configs - return cls.__configs - - @classmethod - def create_config(cls, **kwargs) -> ModelConfigBase: - if "model_format" not in kwargs: - raise Exception("Field 'model_format' not found in model config") - - configs = cls._get_configs() - return configs[kwargs["model_format"]](**kwargs) - - @classmethod - def probe_config(cls, path: str, **kwargs) -> ModelConfigBase: - return cls.create_config( - path=path, - model_format=cls.detect_format(path), - ) - - @classmethod - @abstractmethod - def detect_format(cls, path: str) -> str: - raise NotImplementedError() - - @classproperty - @abstractmethod - def save_to_config(cls) -> bool: - raise NotImplementedError() - - @abstractmethod - def get_size(self, child_type: Optional[SubModelType] = None) -> int: - raise NotImplementedError() - - @abstractmethod - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ) -> Any: - raise NotImplementedError() - - -class DiffusersModel(ModelBase): - # child_types: Dict[str, Type] - # child_sizes: Dict[str, int] - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - super().__init__(model_path, base_model, model_type) - - self.child_types: Dict[str, Type] = {} - self.child_sizes: Dict[str, int] = {} - - try: - config_data = DiffusionPipeline.load_config(self.model_path) - # config_data = json.loads(os.path.join(self.model_path, "model_index.json")) - except Exception: - raise Exception("Invalid diffusers model! (model_index.json not found or invalid)") - - config_data.pop("_ignore_files", None) - - # retrieve all folder_names that contain relevant files - child_components = [k for k, v in config_data.items() if isinstance(v, list)] - - for child_name in child_components: - child_type = self._hf_definition_to_type(config_data[child_name]) - self.child_types[child_name] = child_type - self.child_sizes[child_name] = calc_model_size_by_fs(self.model_path, subfolder=child_name) - - def get_size(self, child_type: Optional[SubModelType] = None): - if child_type is None: - return sum(self.child_sizes.values()) - else: - return self.child_sizes[child_type] - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ): - # return pipeline in different function to pass more arguments - if child_type is None: - raise Exception("Child model type can't be null on diffusers model") - if child_type not in self.child_types: - return None # TODO: or raise - - if torch_dtype == torch.float16: - variants = ["fp16", None] - else: - variants = [None, "fp16"] - - # TODO: better error handling(differentiate not found from others) - for variant in variants: - try: - # TODO: set cache_dir to /dev/null to be sure that cache not used? - model = self.child_types[child_type].from_pretrained( - self.model_path, - subfolder=child_type.value, - torch_dtype=torch_dtype, - variant=variant, - local_files_only=True, - ) - break - except Exception as e: - if not str(e).startswith("Error no file"): - print("====ERR LOAD====") - print(f"{variant}: {e}") - pass - else: - raise Exception(f"Failed to load {self.base_model}:{self.model_type}:{child_type} model") - - # calc more accurate size - self.child_sizes[child_type] = calc_model_size_by_data(model) - return model - - # def convert_if_required(model_path: str, cache_path: str, config: Optional[dict]) -> str: - - -def calc_model_size_by_fs(model_path: str, subfolder: Optional[str] = None, variant: Optional[str] = None): - if subfolder is not None: - model_path = os.path.join(model_path, subfolder) - - # this can happen when, for example, the safety checker - # is not downloaded. - if not os.path.exists(model_path): - return 0 - - all_files = os.listdir(model_path) - all_files = [f for f in all_files if os.path.isfile(os.path.join(model_path, f))] - - fp16_files = {f for f in all_files if ".fp16." in f or ".fp16-" in f} - bit8_files = {f for f in all_files if ".8bit." in f or ".8bit-" in f} - other_files = set(all_files) - fp16_files - bit8_files - - if variant is None: - files = other_files - elif variant == "fp16": - files = fp16_files - elif variant == "8bit": - files = bit8_files - else: - raise NotImplementedError(f"Unknown variant: {variant}") - - # try read from index if exists - index_postfix = ".index.json" - if variant is not None: - index_postfix = f".index.{variant}.json" - - for file in files: - if not file.endswith(index_postfix): - continue - try: - with open(os.path.join(model_path, file), "r") as f: - index_data = json.loads(f.read()) - return int(index_data["metadata"]["total_size"]) - except Exception: - pass - - # calculate files size if there is no index file - formats = [ - (".safetensors",), # safetensors - (".bin",), # torch - (".onnx", ".pb"), # onnx - (".msgpack",), # flax - (".ckpt",), # tf - (".h5",), # tf2 - ] - - for file_format in formats: - model_files = [f for f in files if f.endswith(file_format)] - if len(model_files) == 0: - continue - - model_size = 0 - for model_file in model_files: - file_stats = os.stat(os.path.join(model_path, model_file)) - model_size += file_stats.st_size - return model_size - - # raise NotImplementedError(f"Unknown model structure! Files: {all_files}") - return 0 # scheduler/feature_extractor/tokenizer - models without loading to gpu - - -def calc_model_size_by_data(model) -> int: - if isinstance(model, DiffusionPipeline): - return _calc_pipeline_by_data(model) - elif isinstance(model, torch.nn.Module): - return _calc_model_by_data(model) - elif isinstance(model, IAIOnnxRuntimeModel): - return _calc_onnx_model_by_data(model) - else: - return 0 - - -def _calc_pipeline_by_data(pipeline) -> int: - res = 0 - for submodel_key in pipeline.components.keys(): - submodel = getattr(pipeline, submodel_key) - if submodel is not None and isinstance(submodel, torch.nn.Module): - res += _calc_model_by_data(submodel) - return res - - -def _calc_model_by_data(model) -> int: - mem_params = sum([param.nelement() * param.element_size() for param in model.parameters()]) - mem_bufs = sum([buf.nelement() * buf.element_size() for buf in model.buffers()]) - mem = mem_params + mem_bufs # in bytes - return mem - - -def _calc_onnx_model_by_data(model) -> int: - tensor_size = model.tensors.size() * 2 # The session doubles this - mem = tensor_size # in bytes - return mem - - -def _fast_safetensors_reader(path: str): - checkpoint = {} - device = torch.device("meta") - with open(path, "rb") as f: - definition_len = int.from_bytes(f.read(8), "little") - definition_json = f.read(definition_len) - definition = json.loads(definition_json) - - if "__metadata__" in definition and definition["__metadata__"].get("format", "pt") not in { - "pt", - "torch", - "pytorch", - }: - raise Exception("Supported only pytorch safetensors files") - definition.pop("__metadata__", None) - - for key, info in definition.items(): - dtype = { - "I8": torch.int8, - "I16": torch.int16, - "I32": torch.int32, - "I64": torch.int64, - "F16": torch.float16, - "F32": torch.float32, - "F64": torch.float64, - }[info["dtype"]] - - checkpoint[key] = torch.empty(info["shape"], dtype=dtype, device=device) - - return checkpoint - - -def read_checkpoint_meta(path: Union[str, Path], scan: bool = False): - if str(path).endswith(".safetensors"): - try: - checkpoint = _fast_safetensors_reader(path) - except Exception: - # TODO: create issue for support "meta"? - checkpoint = safetensors.torch.load_file(path, device="cpu") - else: - if scan: - scan_result = scan_file_path(path) - if scan_result.infected_files != 0: - raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.') - checkpoint = torch.load(path, map_location=torch.device("meta")) - return checkpoint - - -class SilenceWarnings(object): - def __init__(self): - self.transformers_verbosity = transformers_logging.get_verbosity() - self.diffusers_verbosity = diffusers_logging.get_verbosity() - - def __enter__(self): - transformers_logging.set_verbosity_error() - diffusers_logging.set_verbosity_error() - warnings.simplefilter("ignore") - - def __exit__(self, type, value, traceback): - transformers_logging.set_verbosity(self.transformers_verbosity) - diffusers_logging.set_verbosity(self.diffusers_verbosity) - warnings.simplefilter("default") - - -ONNX_WEIGHTS_NAME = "model.onnx" - - -class IAIOnnxRuntimeModel: - class _tensor_access: - def __init__(self, model): - self.model = model - self.indexes = {} - for idx, obj in enumerate(self.model.proto.graph.initializer): - self.indexes[obj.name] = idx - - def __getitem__(self, key: str): - value = self.model.proto.graph.initializer[self.indexes[key]] - return numpy_helper.to_array(value) - - def __setitem__(self, key: str, value: np.ndarray): - new_node = numpy_helper.from_array(value) - # set_external_data(new_node, location="in-memory-location") - new_node.name = key - # new_node.ClearField("raw_data") - del self.model.proto.graph.initializer[self.indexes[key]] - self.model.proto.graph.initializer.insert(self.indexes[key], new_node) - # self.model.data[key] = OrtValue.ortvalue_from_numpy(value) - - # __delitem__ - - def __contains__(self, key: str): - return self.indexes[key] in self.model.proto.graph.initializer - - def items(self): - raise NotImplementedError("tensor.items") - # return [(obj.name, obj) for obj in self.raw_proto] - - def keys(self): - return self.indexes.keys() - - def values(self): - raise NotImplementedError("tensor.values") - # return [obj for obj in self.raw_proto] - - def size(self): - bytesSum = 0 - for node in self.model.proto.graph.initializer: - bytesSum += sys.getsizeof(node.raw_data) - return bytesSum - - class _access_helper: - def __init__(self, raw_proto): - self.indexes = {} - self.raw_proto = raw_proto - for idx, obj in enumerate(raw_proto): - self.indexes[obj.name] = idx - - def __getitem__(self, key: str): - return self.raw_proto[self.indexes[key]] - - def __setitem__(self, key: str, value): - index = self.indexes[key] - del self.raw_proto[index] - self.raw_proto.insert(index, value) - - # __delitem__ - - def __contains__(self, key: str): - return key in self.indexes - - def items(self): - return [(obj.name, obj) for obj in self.raw_proto] - - def keys(self): - return self.indexes.keys() - - def values(self): - return list(self.raw_proto) - - def __init__(self, model_path: str, provider: Optional[str]): - self.path = model_path - self.session = None - self.provider = provider - """ - self.data_path = self.path + "_data" - if not os.path.exists(self.data_path): - print(f"Moving model tensors to separate file: {self.data_path}") - tmp_proto = onnx.load(model_path, load_external_data=True) - onnx.save_model(tmp_proto, self.path, save_as_external_data=True, all_tensors_to_one_file=True, location=os.path.basename(self.data_path), size_threshold=1024, convert_attribute=False) - del tmp_proto - gc.collect() - - self.proto = onnx.load(model_path, load_external_data=False) - """ - - self.proto = onnx.load(model_path, load_external_data=True) - # self.data = dict() - # for tensor in self.proto.graph.initializer: - # name = tensor.name - - # if tensor.HasField("raw_data"): - # npt = numpy_helper.to_array(tensor) - # orv = OrtValue.ortvalue_from_numpy(npt) - # # self.data[name] = orv - # # set_external_data(tensor, location="in-memory-location") - # tensor.name = name - # # tensor.ClearField("raw_data") - - self.nodes = self._access_helper(self.proto.graph.node) - # self.initializers = self._access_helper(self.proto.graph.initializer) - # print(self.proto.graph.input) - # print(self.proto.graph.initializer) - - self.tensors = self._tensor_access(self) - - # TODO: integrate with model manager/cache - def create_session(self, height=None, width=None): - if self.session is None or self.session_width != width or self.session_height != height: - # onnx.save(self.proto, "tmp.onnx") - # onnx.save_model(self.proto, "tmp.onnx", save_as_external_data=True, all_tensors_to_one_file=True, location="tmp.onnx_data", size_threshold=1024, convert_attribute=False) - # TODO: something to be able to get weight when they already moved outside of model proto - # (trimmed_model, external_data) = buffer_external_data_tensors(self.proto) - sess = SessionOptions() - # self._external_data.update(**external_data) - # sess.add_external_initializers(list(self.data.keys()), list(self.data.values())) - # sess.enable_profiling = True - - # sess.intra_op_num_threads = 1 - # sess.inter_op_num_threads = 1 - # sess.execution_mode = ExecutionMode.ORT_SEQUENTIAL - # sess.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL - # sess.enable_cpu_mem_arena = True - # sess.enable_mem_pattern = True - # sess.add_session_config_entry("session.intra_op.use_xnnpack_threadpool", "1") ########### It's the key code - self.session_height = height - self.session_width = width - if height and width: - sess.add_free_dimension_override_by_name("unet_sample_batch", 2) - sess.add_free_dimension_override_by_name("unet_sample_channels", 4) - sess.add_free_dimension_override_by_name("unet_hidden_batch", 2) - sess.add_free_dimension_override_by_name("unet_hidden_sequence", 77) - sess.add_free_dimension_override_by_name("unet_sample_height", self.session_height) - sess.add_free_dimension_override_by_name("unet_sample_width", self.session_width) - sess.add_free_dimension_override_by_name("unet_time_batch", 1) - providers = [] - if self.provider: - providers.append(self.provider) - else: - providers = get_available_providers() - if "TensorrtExecutionProvider" in providers: - providers.remove("TensorrtExecutionProvider") - try: - self.session = InferenceSession(self.proto.SerializeToString(), providers=providers, sess_options=sess) - except Exception as e: - raise e - # self.session = InferenceSession("tmp.onnx", providers=[self.provider], sess_options=self.sess_options) - # self.io_binding = self.session.io_binding() - - def release_session(self): - self.session = None - import gc - - gc.collect() - return - - def __call__(self, **kwargs): - if self.session is None: - raise Exception("You should call create_session before running model") - - inputs = {k: np.array(v) for k, v in kwargs.items()} - # output_names = self.session.get_outputs() - # for k in inputs: - # self.io_binding.bind_cpu_input(k, inputs[k]) - # for name in output_names: - # self.io_binding.bind_output(name.name) - # self.session.run_with_iobinding(self.io_binding, None) - # return self.io_binding.copy_outputs_to_cpu() - return self.session.run(None, inputs) - - # compatability with diffusers load code - @classmethod - def from_pretrained( - cls, - model_id: Union[str, Path], - subfolder: Union[str, Path] = None, - file_name: Optional[str] = None, - provider: Optional[str] = None, - sess_options: Optional["SessionOptions"] = None, - **kwargs, - ): - file_name = file_name or ONNX_WEIGHTS_NAME - - if os.path.isdir(model_id): - model_path = model_id - if subfolder is not None: - model_path = os.path.join(model_path, subfolder) - model_path = os.path.join(model_path, file_name) - - else: - model_path = model_id - - # load model from local directory - if not os.path.isfile(model_path): - raise Exception(f"Model not found: {model_path}") - - # TODO: session options - return cls(model_path, provider=provider) diff --git a/invokeai/backend/model_management/models/clip_vision.py b/invokeai/backend/model_management/models/clip_vision.py deleted file mode 100644 index 2276c6beed..0000000000 --- a/invokeai/backend/model_management/models/clip_vision.py +++ /dev/null @@ -1,82 +0,0 @@ -import os -from enum import Enum -from typing import Literal, Optional - -import torch -from transformers import CLIPVisionModelWithProjection - -from invokeai.backend.model_management.models.base import ( - BaseModelType, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelType, - SubModelType, - calc_model_size_by_data, - calc_model_size_by_fs, - classproperty, -) - - -class CLIPVisionModelFormat(str, Enum): - Diffusers = "diffusers" - - -class CLIPVisionModel(ModelBase): - class DiffusersConfig(ModelConfigBase): - model_format: Literal[CLIPVisionModelFormat.Diffusers] - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.CLIPVision - super().__init__(model_path, base_model, model_type) - - self.model_size = calc_model_size_by_fs(self.model_path) - - @classmethod - def detect_format(cls, path: str) -> str: - if not os.path.exists(path): - raise ModuleNotFoundError(f"No CLIP Vision model at path '{path}'.") - - if os.path.isdir(path) and os.path.exists(os.path.join(path, "config.json")): - return CLIPVisionModelFormat.Diffusers - - raise InvalidModelException(f"Unexpected CLIP Vision model format: {path}") - - @classproperty - def save_to_config(cls) -> bool: - return True - - def get_size(self, child_type: Optional[SubModelType] = None) -> int: - if child_type is not None: - raise ValueError("There are no child models in a CLIP Vision model.") - - return self.model_size - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ) -> CLIPVisionModelWithProjection: - if child_type is not None: - raise ValueError("There are no child models in a CLIP Vision model.") - - model = CLIPVisionModelWithProjection.from_pretrained(self.model_path, torch_dtype=torch_dtype) - - # Calculate a more accurate model size. - self.model_size = calc_model_size_by_data(model) - - return model - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - format = cls.detect_format(model_path) - if format == CLIPVisionModelFormat.Diffusers: - return model_path - else: - raise ValueError(f"Unsupported format: '{format}'.") diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py deleted file mode 100644 index da269eba4b..0000000000 --- a/invokeai/backend/model_management/models/controlnet.py +++ /dev/null @@ -1,163 +0,0 @@ -import os -from enum import Enum -from pathlib import Path -from typing import Literal, Optional - -import torch - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig - -from .base import ( - BaseModelType, - EmptyConfigLoader, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelNotFoundException, - ModelType, - SubModelType, - calc_model_size_by_data, - calc_model_size_by_fs, - classproperty, -) - - -class ControlNetModelFormat(str, Enum): - Checkpoint = "checkpoint" - Diffusers = "diffusers" - - -class ControlNetModel(ModelBase): - # model_class: Type - # model_size: int - - class DiffusersConfig(ModelConfigBase): - model_format: Literal[ControlNetModelFormat.Diffusers] - - class CheckpointConfig(ModelConfigBase): - model_format: Literal[ControlNetModelFormat.Checkpoint] - config: str - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.ControlNet - super().__init__(model_path, base_model, model_type) - - try: - config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json") - # config = json.loads(os.path.join(self.model_path, "config.json")) - except Exception: - raise Exception("Invalid controlnet model! (config.json not found or invalid)") - - model_class_name = config.get("_class_name", None) - if model_class_name not in {"ControlNetModel"}: - raise Exception(f"Invalid ControlNet model! Unknown _class_name: {model_class_name}") - - try: - self.model_class = self._hf_definition_to_type(["diffusers", model_class_name]) - self.model_size = calc_model_size_by_fs(self.model_path) - except Exception: - raise Exception("Invalid ControlNet model!") - - def get_size(self, child_type: Optional[SubModelType] = None): - if child_type is not None: - raise Exception("There is no child models in controlnet model") - return self.model_size - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ): - if child_type is not None: - raise Exception("There are no child models in controlnet model") - - model = None - for variant in ["fp16", None]: - try: - model = self.model_class.from_pretrained( - self.model_path, - torch_dtype=torch_dtype, - variant=variant, - ) - break - except Exception: - pass - if not model: - raise ModelNotFoundException() - - # calc more accurate size - self.model_size = calc_model_size_by_data(model) - return model - - @classproperty - def save_to_config(cls) -> bool: - return False - - @classmethod - def detect_format(cls, path: str): - if not os.path.exists(path): - raise ModelNotFoundException() - - if os.path.isdir(path): - if os.path.exists(os.path.join(path, "config.json")): - return ControlNetModelFormat.Diffusers - - if os.path.isfile(path): - if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]): - return ControlNetModelFormat.Checkpoint - - raise InvalidModelException(f"Not a valid model: {path}") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - if cls.detect_format(model_path) == ControlNetModelFormat.Checkpoint: - return _convert_controlnet_ckpt_and_cache( - model_path=model_path, - model_config=config.config, - output_path=output_path, - base_model=base_model, - ) - else: - return model_path - - -def _convert_controlnet_ckpt_and_cache( - model_path: str, - output_path: str, - base_model: BaseModelType, - model_config: str, -) -> str: - """ - Convert the controlnet from checkpoint format to diffusers format, - cache it to disk, and return Path to converted - file. If already on disk then just returns Path. - """ - print(f"DEBUG: controlnet config = {model_config}") - app_config = InvokeAIAppConfig.get_config() - weights = app_config.root_path / model_path - output_path = Path(output_path) - - logger.info(f"Converting {weights} to diffusers format") - # return cached version if it exists - if output_path.exists(): - return output_path - - # to avoid circular import errors - from ..convert_ckpt_to_diffusers import convert_controlnet_to_diffusers - - convert_controlnet_to_diffusers( - weights, - output_path, - original_config_file=app_config.root_path / model_config, - image_size=512, - scan_needed=True, - from_safetensors=weights.suffix == ".safetensors", - ) - return output_path diff --git a/invokeai/backend/model_management/models/ip_adapter.py b/invokeai/backend/model_management/models/ip_adapter.py deleted file mode 100644 index c60edd0abe..0000000000 --- a/invokeai/backend/model_management/models/ip_adapter.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import typing -from enum import Enum -from typing import Literal, Optional - -import torch - -from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus, build_ip_adapter -from invokeai.backend.model_management.models.base import ( - BaseModelType, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelType, - SubModelType, - calc_model_size_by_fs, - classproperty, -) - - -class IPAdapterModelFormat(str, Enum): - # The custom IP-Adapter model format defined by InvokeAI. - InvokeAI = "invokeai" - - -class IPAdapterModel(ModelBase): - class InvokeAIConfig(ModelConfigBase): - model_format: Literal[IPAdapterModelFormat.InvokeAI] - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.IPAdapter - super().__init__(model_path, base_model, model_type) - - self.model_size = calc_model_size_by_fs(self.model_path) - - @classmethod - def detect_format(cls, path: str) -> str: - if not os.path.exists(path): - raise ModuleNotFoundError(f"No IP-Adapter model at path '{path}'.") - - if os.path.isdir(path): - model_file = os.path.join(path, "ip_adapter.bin") - image_encoder_config_file = os.path.join(path, "image_encoder.txt") - if os.path.exists(model_file) and os.path.exists(image_encoder_config_file): - return IPAdapterModelFormat.InvokeAI - - raise InvalidModelException(f"Unexpected IP-Adapter model format: {path}") - - @classproperty - def save_to_config(cls) -> bool: - return True - - def get_size(self, child_type: Optional[SubModelType] = None) -> int: - if child_type is not None: - raise ValueError("There are no child models in an IP-Adapter model.") - - return self.model_size - - def get_model( - self, - torch_dtype: torch.dtype, - child_type: Optional[SubModelType] = None, - ) -> typing.Union[IPAdapter, IPAdapterPlus]: - if child_type is not None: - raise ValueError("There are no child models in an IP-Adapter model.") - - model = build_ip_adapter( - ip_adapter_ckpt_path=os.path.join(self.model_path, "ip_adapter.bin"), - device=torch.device("cpu"), - dtype=torch_dtype, - ) - - self.model_size = model.calc_size() - return model - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - format = cls.detect_format(model_path) - if format == IPAdapterModelFormat.InvokeAI: - return model_path - else: - raise ValueError(f"Unsupported format: '{format}'.") - - -def get_ip_adapter_image_encoder_model_id(model_path: str): - """Read the ID of the image encoder associated with the IP-Adapter at `model_path`.""" - image_encoder_config_file = os.path.join(model_path, "image_encoder.txt") - - with open(image_encoder_config_file, "r") as f: - image_encoder_model = f.readline().strip() - - return image_encoder_model diff --git a/invokeai/backend/model_management/models/sdxl.py b/invokeai/backend/model_management/models/sdxl.py deleted file mode 100644 index 01e9420fed..0000000000 --- a/invokeai/backend/model_management/models/sdxl.py +++ /dev/null @@ -1,148 +0,0 @@ -import json -import os -from enum import Enum -from pathlib import Path -from typing import Literal, Optional - -from omegaconf import OmegaConf -from pydantic import Field - -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_management.detect_baked_in_vae import has_baked_in_sdxl_vae -from invokeai.backend.util.logging import InvokeAILogger - -from .base import ( - BaseModelType, - DiffusersModel, - InvalidModelException, - ModelConfigBase, - ModelType, - ModelVariantType, - classproperty, - read_checkpoint_meta, -) - - -class StableDiffusionXLModelFormat(str, Enum): - Checkpoint = "checkpoint" - Diffusers = "diffusers" - - -class StableDiffusionXLModel(DiffusersModel): - # TODO: check that configs overwriten properly - class DiffusersConfig(ModelConfigBase): - model_format: Literal[StableDiffusionXLModelFormat.Diffusers] - vae: Optional[str] = Field(None) - variant: ModelVariantType - - class CheckpointConfig(ModelConfigBase): - model_format: Literal[StableDiffusionXLModelFormat.Checkpoint] - vae: Optional[str] = Field(None) - config: str - variant: ModelVariantType - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert base_model in {BaseModelType.StableDiffusionXL, BaseModelType.StableDiffusionXLRefiner} - assert model_type == ModelType.Main - super().__init__( - model_path=model_path, - base_model=BaseModelType.StableDiffusionXL, - model_type=ModelType.Main, - ) - - @classmethod - def probe_config(cls, path: str, **kwargs): - model_format = cls.detect_format(path) - ckpt_config_path = kwargs.get("config", None) - if model_format == StableDiffusionXLModelFormat.Checkpoint: - if ckpt_config_path: - ckpt_config = OmegaConf.load(ckpt_config_path) - in_channels = ckpt_config["model"]["params"]["unet_config"]["params"]["in_channels"] - - else: - checkpoint = read_checkpoint_meta(path) - checkpoint = checkpoint.get("state_dict", checkpoint) - in_channels = checkpoint["model.diffusion_model.input_blocks.0.0.weight"].shape[1] - - elif model_format == StableDiffusionXLModelFormat.Diffusers: - unet_config_path = os.path.join(path, "unet", "config.json") - if os.path.exists(unet_config_path): - with open(unet_config_path, "r") as f: - unet_config = json.loads(f.read()) - in_channels = unet_config["in_channels"] - - else: - raise InvalidModelException(f"{path} is not a recognized Stable Diffusion diffusers model") - - else: - raise NotImplementedError(f"Unknown stable diffusion 2.* format: {model_format}") - - if in_channels == 9: - variant = ModelVariantType.Inpaint - elif in_channels == 5: - variant = ModelVariantType.Depth - elif in_channels == 4: - variant = ModelVariantType.Normal - else: - raise Exception("Unkown stable diffusion 2.* model format") - - if ckpt_config_path is None: - # avoid circular import - from .stable_diffusion import _select_ckpt_config - - ckpt_config_path = _select_ckpt_config(kwargs.get("model_base", BaseModelType.StableDiffusionXL), variant) - - return cls.create_config( - path=path, - model_format=model_format, - config=ckpt_config_path, - variant=variant, - ) - - @classproperty - def save_to_config(cls) -> bool: - return True - - @classmethod - def detect_format(cls, model_path: str): - if os.path.isdir(model_path): - return StableDiffusionXLModelFormat.Diffusers - else: - return StableDiffusionXLModelFormat.Checkpoint - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - # The convert script adapted from the diffusers package uses - # strings for the base model type. To avoid making too many - # source code changes, we simply translate here - if Path(output_path).exists(): - return output_path - - if isinstance(config, cls.CheckpointConfig): - from invokeai.backend.model_management.models.stable_diffusion import _convert_ckpt_and_cache - - # Hack in VAE-fp16 fix - If model sdxl-vae-fp16-fix is installed, - # then we bake it into the converted model unless there is already - # a nonstandard VAE installed. - kwargs = {} - app_config = InvokeAIAppConfig.get_config() - vae_path = app_config.models_path / "sdxl/vae/sdxl-vae-fp16-fix" - if vae_path.exists() and not has_baked_in_sdxl_vae(Path(model_path)): - InvokeAILogger.get_logger().warning("No baked-in VAE detected. Inserting sdxl-vae-fp16-fix.") - kwargs["vae_path"] = vae_path - - return _convert_ckpt_and_cache( - version=base_model, - model_config=config, - output_path=output_path, - use_safetensors=True, - **kwargs, - ) - else: - return model_path diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py deleted file mode 100644 index a38a44fccf..0000000000 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ /dev/null @@ -1,337 +0,0 @@ -import json -import os -from enum import Enum -from pathlib import Path -from typing import Literal, Optional, Union - -from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline -from omegaconf import OmegaConf -from pydantic import Field - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig - -from .base import ( - BaseModelType, - DiffusersModel, - InvalidModelException, - ModelConfigBase, - ModelNotFoundException, - ModelType, - ModelVariantType, - SilenceWarnings, - classproperty, - read_checkpoint_meta, -) -from .sdxl import StableDiffusionXLModel - - -class StableDiffusion1ModelFormat(str, Enum): - Checkpoint = "checkpoint" - Diffusers = "diffusers" - - -class StableDiffusion1Model(DiffusersModel): - class DiffusersConfig(ModelConfigBase): - model_format: Literal[StableDiffusion1ModelFormat.Diffusers] - vae: Optional[str] = Field(None) - variant: ModelVariantType - - class CheckpointConfig(ModelConfigBase): - model_format: Literal[StableDiffusion1ModelFormat.Checkpoint] - vae: Optional[str] = Field(None) - config: str - variant: ModelVariantType - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert base_model == BaseModelType.StableDiffusion1 - assert model_type == ModelType.Main - super().__init__( - model_path=model_path, - base_model=BaseModelType.StableDiffusion1, - model_type=ModelType.Main, - ) - - @classmethod - def probe_config(cls, path: str, **kwargs): - model_format = cls.detect_format(path) - ckpt_config_path = kwargs.get("config", None) - if model_format == StableDiffusion1ModelFormat.Checkpoint: - if ckpt_config_path: - ckpt_config = OmegaConf.load(ckpt_config_path) - ckpt_config["model"]["params"]["unet_config"]["params"]["in_channels"] - - else: - checkpoint = read_checkpoint_meta(path) - checkpoint = checkpoint.get("state_dict", checkpoint) - in_channels = checkpoint["model.diffusion_model.input_blocks.0.0.weight"].shape[1] - - elif model_format == StableDiffusion1ModelFormat.Diffusers: - unet_config_path = os.path.join(path, "unet", "config.json") - if os.path.exists(unet_config_path): - with open(unet_config_path, "r") as f: - unet_config = json.loads(f.read()) - in_channels = unet_config["in_channels"] - - else: - raise NotImplementedError(f"{path} is not a supported stable diffusion diffusers format") - - else: - raise NotImplementedError(f"Unknown stable diffusion 1.* format: {model_format}") - - if in_channels == 9: - variant = ModelVariantType.Inpaint - elif in_channels == 4: - variant = ModelVariantType.Normal - else: - raise Exception("Unkown stable diffusion 1.* model format") - - if ckpt_config_path is None: - ckpt_config_path = _select_ckpt_config(BaseModelType.StableDiffusion1, variant) - - return cls.create_config( - path=path, - model_format=model_format, - config=ckpt_config_path, - variant=variant, - ) - - @classproperty - def save_to_config(cls) -> bool: - return True - - @classmethod - def detect_format(cls, model_path: str): - if not os.path.exists(model_path): - raise ModelNotFoundException() - - if os.path.isdir(model_path): - if os.path.exists(os.path.join(model_path, "model_index.json")): - return StableDiffusion1ModelFormat.Diffusers - - if os.path.isfile(model_path): - if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]): - return StableDiffusion1ModelFormat.Checkpoint - - raise InvalidModelException(f"Not a valid model: {model_path}") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - if isinstance(config, cls.CheckpointConfig): - return _convert_ckpt_and_cache( - version=BaseModelType.StableDiffusion1, - model_config=config, - load_safety_checker=False, - output_path=output_path, - ) - else: - return model_path - - -class StableDiffusion2ModelFormat(str, Enum): - Checkpoint = "checkpoint" - Diffusers = "diffusers" - - -class StableDiffusion2Model(DiffusersModel): - # TODO: check that configs overwriten properly - class DiffusersConfig(ModelConfigBase): - model_format: Literal[StableDiffusion2ModelFormat.Diffusers] - vae: Optional[str] = Field(None) - variant: ModelVariantType - - class CheckpointConfig(ModelConfigBase): - model_format: Literal[StableDiffusion2ModelFormat.Checkpoint] - vae: Optional[str] = Field(None) - config: str - variant: ModelVariantType - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert base_model == BaseModelType.StableDiffusion2 - assert model_type == ModelType.Main - super().__init__( - model_path=model_path, - base_model=BaseModelType.StableDiffusion2, - model_type=ModelType.Main, - ) - - @classmethod - def probe_config(cls, path: str, **kwargs): - model_format = cls.detect_format(path) - ckpt_config_path = kwargs.get("config", None) - if model_format == StableDiffusion2ModelFormat.Checkpoint: - if ckpt_config_path: - ckpt_config = OmegaConf.load(ckpt_config_path) - ckpt_config["model"]["params"]["unet_config"]["params"]["in_channels"] - - else: - checkpoint = read_checkpoint_meta(path) - checkpoint = checkpoint.get("state_dict", checkpoint) - in_channels = checkpoint["model.diffusion_model.input_blocks.0.0.weight"].shape[1] - - elif model_format == StableDiffusion2ModelFormat.Diffusers: - unet_config_path = os.path.join(path, "unet", "config.json") - if os.path.exists(unet_config_path): - with open(unet_config_path, "r") as f: - unet_config = json.loads(f.read()) - in_channels = unet_config["in_channels"] - - else: - raise Exception("Not supported stable diffusion diffusers format(possibly onnx?)") - - else: - raise NotImplementedError(f"Unknown stable diffusion 2.* format: {model_format}") - - if in_channels == 9: - variant = ModelVariantType.Inpaint - elif in_channels == 5: - variant = ModelVariantType.Depth - elif in_channels == 4: - variant = ModelVariantType.Normal - else: - raise Exception("Unkown stable diffusion 2.* model format") - - if ckpt_config_path is None: - ckpt_config_path = _select_ckpt_config(BaseModelType.StableDiffusion2, variant) - - return cls.create_config( - path=path, - model_format=model_format, - config=ckpt_config_path, - variant=variant, - ) - - @classproperty - def save_to_config(cls) -> bool: - return True - - @classmethod - def detect_format(cls, model_path: str): - if not os.path.exists(model_path): - raise ModelNotFoundException() - - if os.path.isdir(model_path): - if os.path.exists(os.path.join(model_path, "model_index.json")): - return StableDiffusion2ModelFormat.Diffusers - - if os.path.isfile(model_path): - if any(model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]): - return StableDiffusion2ModelFormat.Checkpoint - - raise InvalidModelException(f"Not a valid model: {model_path}") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - if isinstance(config, cls.CheckpointConfig): - return _convert_ckpt_and_cache( - version=BaseModelType.StableDiffusion2, - model_config=config, - output_path=output_path, - ) - else: - return model_path - - -# TODO: rework -# pass precision - currently defaulting to fp16 -def _convert_ckpt_and_cache( - version: BaseModelType, - model_config: Union[ - StableDiffusion1Model.CheckpointConfig, - StableDiffusion2Model.CheckpointConfig, - StableDiffusionXLModel.CheckpointConfig, - ], - output_path: str, - use_save_model: bool = False, - **kwargs, -) -> str: - """ - Convert the checkpoint model indicated in mconfig into a - diffusers, cache it to disk, and return Path to converted - file. If already on disk then just returns Path. - """ - app_config = InvokeAIAppConfig.get_config() - - weights = app_config.models_path / model_config.path - config_file = app_config.root_path / model_config.config - output_path = Path(output_path) - variant = model_config.variant - pipeline_class = StableDiffusionInpaintPipeline if variant == "inpaint" else StableDiffusionPipeline - - # return cached version if it exists - if output_path.exists(): - return output_path - - # to avoid circular import errors - from ...util.devices import choose_torch_device, torch_dtype - from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers - - model_base_to_model_type = { - BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder", - BaseModelType.StableDiffusion2: "FrozenOpenCLIPEmbedder", - BaseModelType.StableDiffusionXL: "SDXL", - BaseModelType.StableDiffusionXLRefiner: "SDXL-Refiner", - } - logger.info(f"Converting {weights} to diffusers format") - with SilenceWarnings(): - convert_ckpt_to_diffusers( - weights, - output_path, - model_type=model_base_to_model_type[version], - model_version=version, - model_variant=model_config.variant, - original_config_file=config_file, - extract_ema=True, - scan_needed=True, - pipeline_class=pipeline_class, - from_safetensors=weights.suffix == ".safetensors", - precision=torch_dtype(choose_torch_device()), - **kwargs, - ) - return output_path - - -def _select_ckpt_config(version: BaseModelType, variant: ModelVariantType): - ckpt_configs = { - BaseModelType.StableDiffusion1: { - ModelVariantType.Normal: "v1-inference.yaml", - ModelVariantType.Inpaint: "v1-inpainting-inference.yaml", - }, - BaseModelType.StableDiffusion2: { - ModelVariantType.Normal: "v2-inference-v.yaml", # best guess, as we can't differentiate with base(512) - ModelVariantType.Inpaint: "v2-inpainting-inference.yaml", - ModelVariantType.Depth: "v2-midas-inference.yaml", - }, - BaseModelType.StableDiffusionXL: { - ModelVariantType.Normal: "sd_xl_base.yaml", - ModelVariantType.Inpaint: None, - ModelVariantType.Depth: None, - }, - BaseModelType.StableDiffusionXLRefiner: { - ModelVariantType.Normal: "sd_xl_refiner.yaml", - ModelVariantType.Inpaint: None, - ModelVariantType.Depth: None, - }, - } - - app_config = InvokeAIAppConfig.get_config() - try: - config_path = app_config.legacy_conf_path / ckpt_configs[version][variant] - if config_path.is_relative_to(app_config.root_path): - config_path = config_path.relative_to(app_config.root_path) - return str(config_path) - - except Exception: - return None diff --git a/invokeai/backend/model_management/models/stable_diffusion_onnx.py b/invokeai/backend/model_management/models/stable_diffusion_onnx.py deleted file mode 100644 index 2d0dd22c43..0000000000 --- a/invokeai/backend/model_management/models/stable_diffusion_onnx.py +++ /dev/null @@ -1,150 +0,0 @@ -from enum import Enum -from typing import Literal - -from diffusers import OnnxRuntimeModel - -from .base import ( - BaseModelType, - DiffusersModel, - IAIOnnxRuntimeModel, - ModelConfigBase, - ModelType, - ModelVariantType, - SchedulerPredictionType, - classproperty, -) - - -class StableDiffusionOnnxModelFormat(str, Enum): - Olive = "olive" - Onnx = "onnx" - - -class ONNXStableDiffusion1Model(DiffusersModel): - class Config(ModelConfigBase): - model_format: Literal[StableDiffusionOnnxModelFormat.Onnx] - variant: ModelVariantType - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert base_model == BaseModelType.StableDiffusion1 - assert model_type == ModelType.ONNX - super().__init__( - model_path=model_path, - base_model=BaseModelType.StableDiffusion1, - model_type=ModelType.ONNX, - ) - - for child_name, child_type in self.child_types.items(): - if child_type is OnnxRuntimeModel: - self.child_types[child_name] = IAIOnnxRuntimeModel - - # TODO: check that no optimum models provided - - @classmethod - def probe_config(cls, path: str, **kwargs): - model_format = cls.detect_format(path) - in_channels = 4 # TODO: - - if in_channels == 9: - variant = ModelVariantType.Inpaint - elif in_channels == 4: - variant = ModelVariantType.Normal - else: - raise Exception("Unkown stable diffusion 1.* model format") - - return cls.create_config( - path=path, - model_format=model_format, - variant=variant, - ) - - @classproperty - def save_to_config(cls) -> bool: - return True - - @classmethod - def detect_format(cls, model_path: str): - # TODO: Detect onnx vs olive - return StableDiffusionOnnxModelFormat.Onnx - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - return model_path - - -class ONNXStableDiffusion2Model(DiffusersModel): - # TODO: check that configs overwriten properly - class Config(ModelConfigBase): - model_format: Literal[StableDiffusionOnnxModelFormat.Onnx] - variant: ModelVariantType - prediction_type: SchedulerPredictionType - upcast_attention: bool - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert base_model == BaseModelType.StableDiffusion2 - assert model_type == ModelType.ONNX - super().__init__( - model_path=model_path, - base_model=BaseModelType.StableDiffusion2, - model_type=ModelType.ONNX, - ) - - for child_name, child_type in self.child_types.items(): - if child_type is OnnxRuntimeModel: - self.child_types[child_name] = IAIOnnxRuntimeModel - # TODO: check that no optimum models provided - - @classmethod - def probe_config(cls, path: str, **kwargs): - model_format = cls.detect_format(path) - in_channels = 4 # TODO: - - if in_channels == 9: - variant = ModelVariantType.Inpaint - elif in_channels == 5: - variant = ModelVariantType.Depth - elif in_channels == 4: - variant = ModelVariantType.Normal - else: - raise Exception("Unkown stable diffusion 2.* model format") - - if variant == ModelVariantType.Normal: - prediction_type = SchedulerPredictionType.VPrediction - upcast_attention = True - - else: - prediction_type = SchedulerPredictionType.Epsilon - upcast_attention = False - - return cls.create_config( - path=path, - model_format=model_format, - variant=variant, - prediction_type=prediction_type, - upcast_attention=upcast_attention, - ) - - @classproperty - def save_to_config(cls) -> bool: - return True - - @classmethod - def detect_format(cls, model_path: str): - # TODO: Detect onnx vs olive - return StableDiffusionOnnxModelFormat.Onnx - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - return model_path diff --git a/invokeai/backend/model_management/models/t2i_adapter.py b/invokeai/backend/model_management/models/t2i_adapter.py deleted file mode 100644 index 4adb9901f9..0000000000 --- a/invokeai/backend/model_management/models/t2i_adapter.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -from enum import Enum -from typing import Literal, Optional - -import torch -from diffusers import T2IAdapter - -from invokeai.backend.model_management.models.base import ( - BaseModelType, - EmptyConfigLoader, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelNotFoundException, - ModelType, - SubModelType, - calc_model_size_by_data, - calc_model_size_by_fs, - classproperty, -) - - -class T2IAdapterModelFormat(str, Enum): - Diffusers = "diffusers" - - -class T2IAdapterModel(ModelBase): - class DiffusersConfig(ModelConfigBase): - model_format: Literal[T2IAdapterModelFormat.Diffusers] - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.T2IAdapter - super().__init__(model_path, base_model, model_type) - - config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json") - - model_class_name = config.get("_class_name", None) - if model_class_name not in {"T2IAdapter"}: - raise InvalidModelException(f"Invalid T2I-Adapter model. Unknown _class_name: '{model_class_name}'.") - - self.model_class = self._hf_definition_to_type(["diffusers", model_class_name]) - self.model_size = calc_model_size_by_fs(self.model_path) - - def get_size(self, child_type: Optional[SubModelType] = None): - if child_type is not None: - raise ValueError(f"T2I-Adapters do not have child models. Invalid child type: '{child_type}'.") - return self.model_size - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ) -> T2IAdapter: - if child_type is not None: - raise ValueError(f"T2I-Adapters do not have child models. Invalid child type: '{child_type}'.") - - model = None - for variant in ["fp16", None]: - try: - model = self.model_class.from_pretrained( - self.model_path, - torch_dtype=torch_dtype, - variant=variant, - ) - break - except Exception: - pass - if not model: - raise ModelNotFoundException() - - # Calculate a more accurate size after loading the model into memory. - self.model_size = calc_model_size_by_data(model) - return model - - @classproperty - def save_to_config(cls) -> bool: - return False - - @classmethod - def detect_format(cls, path: str): - if not os.path.exists(path): - raise ModelNotFoundException(f"Model not found at '{path}'.") - - if os.path.isdir(path): - if os.path.exists(os.path.join(path, "config.json")): - return T2IAdapterModelFormat.Diffusers - - raise InvalidModelException(f"Unsupported T2I-Adapter format: '{path}'.") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - format = cls.detect_format(model_path) - if format == T2IAdapterModelFormat.Diffusers: - return model_path - else: - raise ValueError(f"Unsupported format: '{format}'.") diff --git a/invokeai/backend/model_management/models/textual_inversion.py b/invokeai/backend/model_management/models/textual_inversion.py deleted file mode 100644 index 99358704b8..0000000000 --- a/invokeai/backend/model_management/models/textual_inversion.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -from typing import Optional - -import torch - -# TODO: naming -from ..lora import TextualInversionModel as TextualInversionModelRaw -from .base import ( - BaseModelType, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelNotFoundException, - ModelType, - SubModelType, - classproperty, -) - - -class TextualInversionModel(ModelBase): - # model_size: int - - class Config(ModelConfigBase): - model_format: None - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.TextualInversion - super().__init__(model_path, base_model, model_type) - - self.model_size = os.path.getsize(self.model_path) - - def get_size(self, child_type: Optional[SubModelType] = None): - if child_type is not None: - raise Exception("There is no child models in textual inversion") - return self.model_size - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ): - if child_type is not None: - raise Exception("There is no child models in textual inversion") - - checkpoint_path = self.model_path - if os.path.isdir(checkpoint_path): - checkpoint_path = os.path.join(checkpoint_path, "learned_embeds.bin") - - if not os.path.exists(checkpoint_path): - raise ModelNotFoundException() - - model = TextualInversionModelRaw.from_checkpoint( - file_path=checkpoint_path, - dtype=torch_dtype, - ) - - self.model_size = model.embedding.nelement() * model.embedding.element_size() - return model - - @classproperty - def save_to_config(cls) -> bool: - return False - - @classmethod - def detect_format(cls, path: str): - if not os.path.exists(path): - raise ModelNotFoundException() - - if os.path.isdir(path): - if os.path.exists(os.path.join(path, "learned_embeds.bin")): - return None # diffusers-ti - - if os.path.isfile(path): - if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "bin"]): - return None - - raise InvalidModelException(f"Not a valid model: {path}") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, - base_model: BaseModelType, - ) -> str: - return model_path diff --git a/invokeai/backend/model_management/models/vae.py b/invokeai/backend/model_management/models/vae.py deleted file mode 100644 index 8cc37e67a7..0000000000 --- a/invokeai/backend/model_management/models/vae.py +++ /dev/null @@ -1,179 +0,0 @@ -import os -from enum import Enum -from pathlib import Path -from typing import Optional - -import safetensors -import torch -from omegaconf import OmegaConf - -from invokeai.app.services.config import InvokeAIAppConfig - -from .base import ( - BaseModelType, - EmptyConfigLoader, - InvalidModelException, - ModelBase, - ModelConfigBase, - ModelNotFoundException, - ModelType, - ModelVariantType, - SubModelType, - calc_model_size_by_data, - calc_model_size_by_fs, - classproperty, -) - - -class VaeModelFormat(str, Enum): - Checkpoint = "checkpoint" - Diffusers = "diffusers" - - -class VaeModel(ModelBase): - # vae_class: Type - # model_size: int - - class Config(ModelConfigBase): - model_format: VaeModelFormat - - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): - assert model_type == ModelType.Vae - super().__init__(model_path, base_model, model_type) - - try: - config = EmptyConfigLoader.load_config(self.model_path, config_name="config.json") - # config = json.loads(os.path.join(self.model_path, "config.json")) - except Exception: - raise Exception("Invalid vae model! (config.json not found or invalid)") - - try: - vae_class_name = config.get("_class_name", "AutoencoderKL") - self.vae_class = self._hf_definition_to_type(["diffusers", vae_class_name]) - self.model_size = calc_model_size_by_fs(self.model_path) - except Exception: - raise Exception("Invalid vae model! (Unkown vae type)") - - def get_size(self, child_type: Optional[SubModelType] = None): - if child_type is not None: - raise Exception("There is no child models in vae model") - return self.model_size - - def get_model( - self, - torch_dtype: Optional[torch.dtype], - child_type: Optional[SubModelType] = None, - ): - if child_type is not None: - raise Exception("There is no child models in vae model") - - model = self.vae_class.from_pretrained( - self.model_path, - torch_dtype=torch_dtype, - ) - # calc more accurate size - self.model_size = calc_model_size_by_data(model) - return model - - @classproperty - def save_to_config(cls) -> bool: - return False - - @classmethod - def detect_format(cls, path: str): - if not os.path.exists(path): - raise ModelNotFoundException(f"Does not exist as local file: {path}") - - if os.path.isdir(path): - if os.path.exists(os.path.join(path, "config.json")): - return VaeModelFormat.Diffusers - - if os.path.isfile(path): - if any(path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]): - return VaeModelFormat.Checkpoint - - raise InvalidModelException(f"Not a valid model: {path}") - - @classmethod - def convert_if_required( - cls, - model_path: str, - output_path: str, - config: ModelConfigBase, # empty config or config of parent model - base_model: BaseModelType, - ) -> str: - if cls.detect_format(model_path) == VaeModelFormat.Checkpoint: - return _convert_vae_ckpt_and_cache( - weights_path=model_path, - output_path=output_path, - base_model=base_model, - model_config=config, - ) - else: - return model_path - - -# TODO: rework -def _convert_vae_ckpt_and_cache( - weights_path: str, - output_path: str, - base_model: BaseModelType, - model_config: ModelConfigBase, -) -> str: - """ - Convert the VAE indicated in mconfig into a diffusers AutoencoderKL - object, cache it to disk, and return Path to converted - file. If already on disk then just returns Path. - """ - app_config = InvokeAIAppConfig.get_config() - weights_path = app_config.root_dir / weights_path - output_path = Path(output_path) - - """ - this size used only in when tiling enabled to separate input in tiles - sizes in configs from stable diffusion githubs(1 and 2) set to 256 - on huggingface it: - 1.5 - 512 - 1.5-inpainting - 256 - 2-inpainting - 512 - 2-depth - 256 - 2-base - 512 - 2 - 768 - 2.1-base - 768 - 2.1 - 768 - """ - image_size = 512 - - # return cached version if it exists - if output_path.exists(): - return output_path - - if base_model in {BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2}: - from .stable_diffusion import _select_ckpt_config - - # all sd models use same vae settings - config_file = _select_ckpt_config(base_model, ModelVariantType.Normal) - else: - raise Exception(f"Vae conversion not supported for model type: {base_model}") - - # this avoids circular import error - from ..convert_ckpt_to_diffusers import convert_ldm_vae_to_diffusers - - if weights_path.suffix == ".safetensors": - checkpoint = safetensors.torch.load_file(weights_path, device="cpu") - else: - checkpoint = torch.load(weights_path, map_location="cpu") - - # sometimes weights are hidden under "state_dict", and sometimes not - if "state_dict" in checkpoint: - checkpoint = checkpoint["state_dict"] - - config = OmegaConf.load(app_config.root_path / config_file) - - vae_model = convert_ldm_vae_to_diffusers( - checkpoint=checkpoint, - vae_config=config, - image_size=image_size, - ) - vae_model.save_pretrained(output_path, safe_serialization=True) - return output_path diff --git a/invokeai/backend/model_management/seamless.py b/invokeai/backend/model_management/seamless.py deleted file mode 100644 index bfdf9e0c53..0000000000 --- a/invokeai/backend/model_management/seamless.py +++ /dev/null @@ -1,102 +0,0 @@ -from __future__ import annotations - -from contextlib import contextmanager -from typing import List, Union - -import torch.nn as nn -from diffusers.models import AutoencoderKL, UNet2DConditionModel - - -def _conv_forward_asymmetric(self, input, weight, bias): - """ - Patch for Conv2d._conv_forward that supports asymmetric padding - """ - working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"]) - working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"]) - return nn.functional.conv2d( - working, - weight, - bias, - self.stride, - nn.modules.utils._pair(0), - self.dilation, - self.groups, - ) - - -@contextmanager -def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]): - try: - to_restore = [] - - for m_name, m in model.named_modules(): - if isinstance(model, UNet2DConditionModel): - if ".attentions." in m_name: - continue - - if ".resnets." in m_name: - if ".conv2" in m_name: - continue - if ".conv_shortcut" in m_name: - continue - - """ - if isinstance(model, UNet2DConditionModel): - if False and ".upsamplers." in m_name: - continue - - if False and ".downsamplers." in m_name: - continue - - if True and ".resnets." in m_name: - if True and ".conv1" in m_name: - if False and "down_blocks" in m_name: - continue - if False and "mid_block" in m_name: - continue - if False and "up_blocks" in m_name: - continue - - if True and ".conv2" in m_name: - continue - - if True and ".conv_shortcut" in m_name: - continue - - if True and ".attentions." in m_name: - continue - - if False and m_name in ["conv_in", "conv_out"]: - continue - """ - - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - m.asymmetric_padding_mode = {} - m.asymmetric_padding = {} - m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant" - m.asymmetric_padding["x"] = ( - m._reversed_padding_repeated_twice[0], - m._reversed_padding_repeated_twice[1], - 0, - 0, - ) - m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant" - m.asymmetric_padding["y"] = ( - 0, - 0, - m._reversed_padding_repeated_twice[2], - m._reversed_padding_repeated_twice[3], - ) - - to_restore.append((m, m._conv_forward)) - m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d) - - yield - - finally: - for module, orig_conv_forward in to_restore: - module._conv_forward = orig_conv_forward - if hasattr(module, "asymmetric_padding_mode"): - del module.asymmetric_padding_mode - if hasattr(module, "asymmetric_padding"): - del module.asymmetric_padding diff --git a/invokeai/backend/model_manager/__init__.py b/invokeai/backend/model_manager/__init__.py index 0f16852c93..966e6ae21d 100644 --- a/invokeai/backend/model_manager/__init__.py +++ b/invokeai/backend/model_manager/__init__.py @@ -1,6 +1,7 @@ """Re-export frequently-used symbols from the Model Manager backend.""" from .config import ( + AnyModel, AnyModelConfig, BaseModelType, InvalidModelConfigException, @@ -12,14 +13,17 @@ from .config import ( SchedulerPredictionType, SubModelType, ) +from .load import LoadedModel from .probe import ModelProbe from .search import ModelSearch __all__ = [ + "AnyModel", "AnyModelConfig", "BaseModelType", "ModelRepoVariant", "InvalidModelConfigException", + "LoadedModel", "ModelConfigFactory", "ModelFormat", "ModelProbe", @@ -29,3 +33,42 @@ __all__ = [ "SchedulerPredictionType", "SubModelType", ] + +########## to help populate the openapi_schema with format enums for each config ########### +# This code is no longer necessary? +# leave it here just in case +# +# import inspect +# from enum import Enum +# from typing import Any, Iterable, Dict, get_args, Set +# def _expand(something: Any) -> Iterable[type]: +# if isinstance(something, type): +# yield something +# else: +# for x in get_args(something): +# for y in _expand(x): +# yield y + +# def _find_format(cls: type) -> Iterable[Enum]: +# if hasattr(inspect, "get_annotations"): +# fields = inspect.get_annotations(cls) +# else: +# fields = cls.__annotations__ +# if "format" in fields: +# for x in get_args(fields["format"]): +# yield x +# for parent_class in cls.__bases__: +# for x in _find_format(parent_class): +# yield x +# return None + +# def get_model_config_formats() -> Dict[str, Set[Enum]]: +# result: Dict[str, Set[Enum]] = {} +# for model_config in _expand(AnyModelConfig): +# for field in _find_format(model_config): +# if field is None: +# continue +# if not result.get(model_config.__qualname__): +# result[model_config.__qualname__] = set() +# result[model_config.__qualname__].add(field) +# return result diff --git a/invokeai/backend/model_manager/config.py b/invokeai/backend/model_manager/config.py index 964cc19f19..e22f74c767 100644 --- a/invokeai/backend/model_manager/config.py +++ b/invokeai/backend/model_manager/config.py @@ -19,12 +19,22 @@ Typical usage: Validation errors will raise an InvalidModelConfigException error. """ + +import time from enum import Enum from typing import Literal, Optional, Type, Union +import torch +from diffusers import ModelMixin from pydantic import BaseModel, ConfigDict, Field, TypeAdapter from typing_extensions import Annotated, Any, Dict +from ..raw_model import RawModel + +# ModelMixin is the base class for all diffusers and transformers models +# RawModel is the InvokeAI wrapper class for ip_adapters, loras, textual_inversion and onnx runtime +AnyModel = Union[ModelMixin, RawModel, torch.nn.Module] + class InvalidModelConfigException(Exception): """Exception for when config parser doesn't recognized this combination of model type and format.""" @@ -102,7 +112,7 @@ class SchedulerPredictionType(str, Enum): class ModelRepoVariant(str, Enum): """Various hugging face variants on the diffusers format.""" - DEFAULT = "default" # model files without "fp16" or other qualifier + DEFAULT = "" # model files without "fp16" or other qualifier - empty str FP16 = "fp16" FP32 = "fp32" ONNX = "onnx" @@ -113,11 +123,11 @@ class ModelRepoVariant(str, Enum): class ModelConfigBase(BaseModel): """Base class for model configuration information.""" - path: str - name: str - base: BaseModelType - type: ModelType - format: ModelFormat + path: str = Field(description="filesystem path to the model file or directory") + name: str = Field(description="model name") + base: BaseModelType = Field(description="base model") + type: ModelType = Field(description="type of the model") + format: ModelFormat = Field(description="model format") key: str = Field(description="unique key for model", default="") original_hash: Optional[str] = Field( description="original fasthash of model contents", default=None @@ -125,12 +135,20 @@ class ModelConfigBase(BaseModel): current_hash: Optional[str] = Field( description="current fasthash of model contents", default=None ) # if model is converted or otherwise modified, this will hold updated hash - description: Optional[str] = Field(default=None) - source: Optional[str] = Field(description="Model download source (URL or repo_id)", default=None) + description: Optional[str] = Field(description="human readable description of the model", default=None) + source: Optional[str] = Field(description="model original source (path, URL or repo_id)", default=None) + last_modified: Optional[float] = Field(description="timestamp for modification time", default_factory=time.time) + + @staticmethod + def json_schema_extra(schema: dict[str, Any], model_class: Type[BaseModel]) -> None: + schema["required"].extend( + ["key", "base", "type", "format", "original_hash", "current_hash", "source", "last_modified"] + ) model_config = ConfigDict( use_enum_values=False, validate_assignment=True, + json_schema_extra=json_schema_extra, ) def update(self, attributes: Dict[str, Any]) -> None: @@ -150,6 +168,7 @@ class _DiffusersConfig(ModelConfigBase): """Model config for diffusers-style models.""" format: Literal[ModelFormat.Diffusers] = ModelFormat.Diffusers + repo_variant: Optional[ModelRepoVariant] = ModelRepoVariant.DEFAULT class LoRAConfig(ModelConfigBase): @@ -199,6 +218,8 @@ class _MainConfig(ModelConfigBase): vae: Optional[str] = Field(default=None) variant: ModelVariantType = ModelVariantType.Normal + prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon + upcast_attention: bool = False ztsnr_training: bool = False @@ -212,35 +233,13 @@ class MainDiffusersConfig(_DiffusersConfig, _MainConfig): """Model config for main diffusers models.""" type: Literal[ModelType.Main] = ModelType.Main - prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon - upcast_attention: bool = False - - -class ONNXSD1Config(_MainConfig): - """Model config for ONNX format models based on sd-1.""" - - type: Literal[ModelType.ONNX] = ModelType.ONNX - format: Literal[ModelFormat.Onnx, ModelFormat.Olive] - base: Literal[BaseModelType.StableDiffusion1] = BaseModelType.StableDiffusion1 - prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon - upcast_attention: bool = False - - -class ONNXSD2Config(_MainConfig): - """Model config for ONNX format models based on sd-2.""" - - type: Literal[ModelType.ONNX] = ModelType.ONNX - format: Literal[ModelFormat.Onnx, ModelFormat.Olive] - # No yaml config file for ONNX, so these are part of config - base: Literal[BaseModelType.StableDiffusion2] = BaseModelType.StableDiffusion2 - prediction_type: SchedulerPredictionType = SchedulerPredictionType.VPrediction - upcast_attention: bool = True class IPAdapterConfig(ModelConfigBase): """Model config for IP Adaptor format models.""" type: Literal[ModelType.IPAdapter] = ModelType.IPAdapter + image_encoder_model_id: str format: Literal[ModelFormat.InvokeAI] @@ -258,7 +257,6 @@ class T2IConfig(ModelConfigBase): format: Literal[ModelFormat.Diffusers] -_ONNXConfig = Annotated[Union[ONNXSD1Config, ONNXSD2Config], Field(discriminator="base")] _ControlNetConfig = Annotated[ Union[ControlNetDiffusersConfig, ControlNetCheckpointConfig], Field(discriminator="format"), @@ -268,9 +266,9 @@ _MainModelConfig = Annotated[Union[MainDiffusersConfig, MainCheckpointConfig], F AnyModelConfig = Union[ _MainModelConfig, - _ONNXConfig, _VaeConfig, _ControlNetConfig, + # ModelConfigBase, LoRAConfig, TextualInversionConfig, IPAdapterConfig, @@ -280,6 +278,7 @@ AnyModelConfig = Union[ AnyModelConfigValidator = TypeAdapter(AnyModelConfig) + # IMPLEMENTATION NOTE: # The preferred alternative to the above is a discriminated Union as shown # below. However, it breaks FastAPI when used as the input Body parameter in a route. @@ -308,9 +307,10 @@ class ModelConfigFactory(object): @classmethod def make_config( cls, - model_data: Union[dict, AnyModelConfig], + model_data: Union[Dict[str, Any], AnyModelConfig], key: Optional[str] = None, - dest_class: Optional[Type] = None, + dest_class: Optional[Type[ModelConfigBase]] = None, + timestamp: Optional[float] = None, ) -> AnyModelConfig: """ Return the appropriate config object from raw dict values. @@ -321,12 +321,17 @@ class ModelConfigFactory(object): :param dest_class: The config class to be returned. If not provided, will be selected automatically. """ + model: Optional[ModelConfigBase] = None if isinstance(model_data, ModelConfigBase): model = model_data elif dest_class: - model = dest_class.validate_python(model_data) + model = dest_class.model_validate(model_data) else: - model = AnyModelConfigValidator.validate_python(model_data) + # mypy doesn't typecheck TypeAdapters well? + model = AnyModelConfigValidator.validate_python(model_data) # type: ignore + assert model is not None if key: model.key = key - return model + if timestamp: + model.last_modified = timestamp + return model # type: ignore diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py similarity index 99% rename from invokeai/backend/model_management/convert_ckpt_to_diffusers.py rename to invokeai/backend/model_manager/convert_ckpt_to_diffusers.py index 6878218f67..6b3e953b38 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py @@ -15,7 +15,7 @@ # # Adapted for use in InvokeAI by Lincoln Stein, July 2023 # -""" Conversion script for the Stable Diffusion checkpoints.""" +"""Conversion script for the Stable Diffusion checkpoints.""" import re from contextlib import nullcontext @@ -57,10 +57,9 @@ from transformers import ( ) from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.backend.model_manager import BaseModelType, ModelVariantType from invokeai.backend.util.logging import InvokeAILogger -from .models import BaseModelType, ModelVariantType - try: from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig @@ -1643,6 +1642,8 @@ def download_controlnet_from_original_ckpt( cross_attention_dim: Optional[bool] = None, scan_needed: bool = False, ) -> DiffusionPipeline: + from omegaconf import OmegaConf + if from_safetensors: from safetensors import safe_open @@ -1718,6 +1719,7 @@ def convert_ckpt_to_diffusers( """ pipe = download_from_original_stable_diffusion_ckpt(checkpoint_path, **kwargs) + # TO DO: save correct repo variant pipe.save_pretrained( dump_path, safe_serialization=use_safetensors, @@ -1736,4 +1738,5 @@ def convert_controlnet_to_diffusers( """ pipe = download_controlnet_from_original_ckpt(checkpoint_path, **kwargs) + # TO DO: save correct repo variant pipe.save_pretrained(dump_path, safe_serialization=True) diff --git a/invokeai/backend/model_management/libc_util.py b/invokeai/backend/model_manager/libc_util.py similarity index 100% rename from invokeai/backend/model_management/libc_util.py rename to invokeai/backend/model_manager/libc_util.py diff --git a/invokeai/backend/model_manager/load/__init__.py b/invokeai/backend/model_manager/load/__init__.py new file mode 100644 index 0000000000..f47a2c4368 --- /dev/null +++ b/invokeai/backend/model_manager/load/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Development Team +""" +Init file for the model loader. +""" + +from importlib import import_module +from pathlib import Path + +from .convert_cache.convert_cache_default import ModelConvertCache +from .load_base import LoadedModel, ModelLoaderBase +from .load_default import ModelLoader +from .model_cache.model_cache_default import ModelCache +from .model_loader_registry import ModelLoaderRegistry, ModelLoaderRegistryBase + +# This registers the subclasses that implement loaders of specific model types +loaders = [x.stem for x in Path(Path(__file__).parent, "model_loaders").glob("*.py") if x.stem != "__init__"] +for module in loaders: + import_module(f"{__package__}.model_loaders.{module}") + +__all__ = [ + "LoadedModel", + "ModelCache", + "ModelConvertCache", + "ModelLoaderBase", + "ModelLoader", + "ModelLoaderRegistryBase", + "ModelLoaderRegistry", +] diff --git a/invokeai/backend/model_manager/load/convert_cache/__init__.py b/invokeai/backend/model_manager/load/convert_cache/__init__.py new file mode 100644 index 0000000000..5be56d2d58 --- /dev/null +++ b/invokeai/backend/model_manager/load/convert_cache/__init__.py @@ -0,0 +1,4 @@ +from .convert_cache_base import ModelConvertCacheBase +from .convert_cache_default import ModelConvertCache + +__all__ = ["ModelConvertCacheBase", "ModelConvertCache"] diff --git a/invokeai/backend/model_manager/load/convert_cache/convert_cache_base.py b/invokeai/backend/model_manager/load/convert_cache/convert_cache_base.py new file mode 100644 index 0000000000..ef363cc7f4 --- /dev/null +++ b/invokeai/backend/model_manager/load/convert_cache/convert_cache_base.py @@ -0,0 +1,28 @@ +""" +Disk-based converted model cache. +""" + +from abc import ABC, abstractmethod +from pathlib import Path + + +class ModelConvertCacheBase(ABC): + @property + @abstractmethod + def max_size(self) -> float: + """Return the maximum size of this cache directory.""" + pass + + @abstractmethod + def make_room(self, size: float) -> None: + """ + Make sufficient room in the cache directory for a model of max_size. + + :param size: Size required (GB) + """ + pass + + @abstractmethod + def cache_path(self, key: str) -> Path: + """Return the path for a model with the indicated key.""" + pass diff --git a/invokeai/backend/model_manager/load/convert_cache/convert_cache_default.py b/invokeai/backend/model_manager/load/convert_cache/convert_cache_default.py new file mode 100644 index 0000000000..84f4f76299 --- /dev/null +++ b/invokeai/backend/model_manager/load/convert_cache/convert_cache_default.py @@ -0,0 +1,72 @@ +""" +Placeholder for convert cache implementation. +""" + +import shutil +from pathlib import Path + +from invokeai.backend.util import GIG, directory_size +from invokeai.backend.util.logging import InvokeAILogger + +from .convert_cache_base import ModelConvertCacheBase + + +class ModelConvertCache(ModelConvertCacheBase): + def __init__(self, cache_path: Path, max_size: float = 10.0): + """Initialize the convert cache with the base directory and a limit on its maximum size (in GBs).""" + if not cache_path.exists(): + cache_path.mkdir(parents=True) + self._cache_path = cache_path + self._max_size = max_size + + @property + def max_size(self) -> float: + """Return the maximum size of this cache directory (GB).""" + return self._max_size + + def cache_path(self, key: str) -> Path: + """Return the path for a model with the indicated key.""" + return self._cache_path / key + + def make_room(self, size: float) -> None: + """ + Make sufficient room in the cache directory for a model of max_size. + + :param size: Size required (GB) + """ + size_needed = directory_size(self._cache_path) + size + max_size = int(self.max_size) * GIG + logger = InvokeAILogger.get_logger() + + if size_needed <= max_size: + return + + logger.debug( + f"Convert cache has gotten too large {(size_needed / GIG):4.2f} > {(max_size / GIG):4.2f}G.. Trimming." + ) + + # For this to work, we make the assumption that the directory contains + # a 'model_index.json', 'unet/config.json' file, or a 'config.json' file at top level. + # This should be true for any diffusers model. + def by_atime(path: Path) -> float: + for config in ["model_index.json", "unet/config.json", "config.json"]: + sentinel = path / config + if sentinel.exists(): + return sentinel.stat().st_atime + + # no sentinel file found! - pick the most recent file in the directory + try: + atimes = sorted([x.stat().st_atime for x in path.iterdir() if x.is_file()], reverse=True) + return atimes[0] + except IndexError: + return 0.0 + + # sort by last access time - least accessed files will be at the end + lru_models = sorted(self._cache_path.iterdir(), key=by_atime, reverse=True) + logger.debug(f"cached models in descending atime order: {lru_models}") + while size_needed > max_size and len(lru_models) > 0: + next_victim = lru_models.pop() + victim_size = directory_size(next_victim) + logger.debug(f"Removing cached converted model {next_victim} to free {victim_size / GIG} GB") + shutil.rmtree(next_victim) + size_needed -= victim_size diff --git a/invokeai/backend/model_manager/load/load_base.py b/invokeai/backend/model_manager/load/load_base.py new file mode 100644 index 0000000000..b8ce56eb16 --- /dev/null +++ b/invokeai/backend/model_manager/load/load_base.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +""" +Base class for model loading in InvokeAI. +""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from logging import Logger +from pathlib import Path +from typing import Any, Optional + +from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.backend.model_manager.config import ( + AnyModel, + AnyModelConfig, + SubModelType, +) +from invokeai.backend.model_manager.load.convert_cache.convert_cache_base import ModelConvertCacheBase +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase, ModelLockerBase + + +@dataclass +class LoadedModel: + """Context manager object that mediates transfer from RAM<->VRAM.""" + + config: AnyModelConfig + _locker: ModelLockerBase + + def __enter__(self) -> AnyModel: + """Context entry.""" + self._locker.lock() + return self.model + + def __exit__(self, *args: Any, **kwargs: Any) -> None: + """Context exit.""" + self._locker.unlock() + + @property + def model(self) -> AnyModel: + """Return the model without locking it.""" + return self._locker.model + + +# TODO(MM2): +# Some "intermediary" subclasses in the ModelLoaderBase class hierarchy define methods that their subclasses don't +# know about. I think the problem may be related to this class being an ABC. +# +# For example, GenericDiffusersLoader defines `get_hf_load_class()`, and StableDiffusionDiffusersModel attempts to +# call it. However, the method is not defined in the ABC, so it is not guaranteed to be implemented. + + +class ModelLoaderBase(ABC): + """Abstract base class for loading models into RAM/VRAM.""" + + @abstractmethod + def __init__( + self, + app_config: InvokeAIAppConfig, + logger: Logger, + ram_cache: ModelCacheBase[AnyModel], + convert_cache: ModelConvertCacheBase, + ): + """Initialize the loader.""" + pass + + @abstractmethod + def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel: + """ + Return a model given its confguration. + + Given a model identified in the model configuration backend, + return a ModelInfo object that can be used to retrieve the model. + + :param model_config: Model configuration, as returned by ModelConfigRecordStore + :param submodel_type: an ModelType enum indicating the portion of + the model to retrieve (e.g. ModelType.Vae) + """ + pass + + @abstractmethod + def get_size_fs( + self, config: AnyModelConfig, model_path: Path, submodel_type: Optional[SubModelType] = None + ) -> int: + """Return size in bytes of the model, calculated before loading.""" + pass diff --git a/invokeai/backend/model_manager/load/load_default.py b/invokeai/backend/model_manager/load/load_default.py new file mode 100644 index 0000000000..642cffaf4b --- /dev/null +++ b/invokeai/backend/model_manager/load/load_default.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Default implementation of model loading in InvokeAI.""" + +from logging import Logger +from pathlib import Path +from typing import Optional, Tuple + +from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.backend.model_manager import ( + AnyModel, + AnyModelConfig, + InvalidModelConfigException, + ModelRepoVariant, + SubModelType, +) +from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase +from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase, ModelLockerBase +from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data, calc_model_size_by_fs +from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init +from invokeai.backend.util.devices import choose_torch_device, torch_dtype + + +# TO DO: The loader is not thread safe! +class ModelLoader(ModelLoaderBase): + """Default implementation of ModelLoaderBase.""" + + def __init__( + self, + app_config: InvokeAIAppConfig, + logger: Logger, + ram_cache: ModelCacheBase[AnyModel], + convert_cache: ModelConvertCacheBase, + ): + """Initialize the loader.""" + self._app_config = app_config + self._logger = logger + self._ram_cache = ram_cache + self._convert_cache = convert_cache + self._torch_dtype = torch_dtype(choose_torch_device(), app_config) + + def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel: + """ + Return a model given its configuration. + + Given a model's configuration as returned by the ModelRecordConfigStore service, + return a LoadedModel object that can be used for inference. + + :param model config: Configuration record for this model + :param submodel_type: an ModelType enum indicating the portion of + the model to retrieve (e.g. ModelType.Vae) + """ + if model_config.type == "main" and not submodel_type: + raise InvalidModelConfigException("submodel_type is required when loading a main model") + + model_path, model_config, submodel_type = self._get_model_path(model_config, submodel_type) + + if not model_path.exists(): + raise InvalidModelConfigException(f"Files for model '{model_config.name}' not found at {model_path}") + + model_path = self._convert_if_needed(model_config, model_path, submodel_type) + locker = self._load_if_needed(model_config, model_path, submodel_type) + return LoadedModel(config=model_config, _locker=locker) + + def _get_model_path( + self, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None + ) -> Tuple[Path, AnyModelConfig, Optional[SubModelType]]: + model_base = self._app_config.models_path + result = (model_base / config.path).resolve(), config, submodel_type + return result + + def _convert_if_needed( + self, config: AnyModelConfig, model_path: Path, submodel_type: Optional[SubModelType] = None + ) -> Path: + cache_path: Path = self._convert_cache.cache_path(config.key) + + if not self._needs_conversion(config, model_path, cache_path): + return cache_path if cache_path.exists() else model_path + + self._convert_cache.make_room(self.get_size_fs(config, model_path, submodel_type)) + return self._convert_model(config, model_path, cache_path) + + def _needs_conversion(self, config: AnyModelConfig, model_path: Path, cache_path: Path) -> bool: + return False + + def _load_if_needed( + self, config: AnyModelConfig, model_path: Path, submodel_type: Optional[SubModelType] = None + ) -> ModelLockerBase: + # TO DO: This is not thread safe! + try: + return self._ram_cache.get(config.key, submodel_type) + except IndexError: + pass + + model_variant = getattr(config, "repo_variant", None) + self._ram_cache.make_room(self.get_size_fs(config, model_path, submodel_type)) + + # This is where the model is actually loaded! + with skip_torch_weight_init(): + loaded_model = self._load_model(model_path, model_variant=model_variant, submodel_type=submodel_type) + + self._ram_cache.put( + config.key, + submodel_type=submodel_type, + model=loaded_model, + size=calc_model_size_by_data(loaded_model), + ) + + return self._ram_cache.get( + key=config.key, + submodel_type=submodel_type, + stats_name=":".join([config.base, config.type, config.name, (submodel_type or "")]), + ) + + def get_size_fs( + self, config: AnyModelConfig, model_path: Path, submodel_type: Optional[SubModelType] = None + ) -> int: + """Get the size of the model on disk.""" + return calc_model_size_by_fs( + model_path=model_path, + subfolder=submodel_type.value if submodel_type else None, + variant=config.repo_variant if hasattr(config, "repo_variant") else None, + ) + + # This needs to be implemented in subclasses that handle checkpoints + def _convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Path) -> Path: + raise NotImplementedError + + # This needs to be implemented in the subclass + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + raise NotImplementedError diff --git a/invokeai/backend/model_management/memory_snapshot.py b/invokeai/backend/model_manager/load/memory_snapshot.py similarity index 94% rename from invokeai/backend/model_management/memory_snapshot.py rename to invokeai/backend/model_manager/load/memory_snapshot.py index fe54af191c..195e39361b 100644 --- a/invokeai/backend/model_management/memory_snapshot.py +++ b/invokeai/backend/model_manager/load/memory_snapshot.py @@ -3,8 +3,9 @@ from typing import Optional import psutil import torch +from typing_extensions import Self -from invokeai.backend.model_management.libc_util import LibcUtil, Struct_mallinfo2 +from ..util.libc_util import LibcUtil, Struct_mallinfo2 GB = 2**30 # 1 GB @@ -27,7 +28,7 @@ class MemorySnapshot: self.malloc_info = malloc_info @classmethod - def capture(cls, run_garbage_collector: bool = True): + def capture(cls, run_garbage_collector: bool = True) -> Self: """Capture and return a MemorySnapshot. Note: This function has significant overhead, particularly if `run_garbage_collector == True`. @@ -67,7 +68,7 @@ class MemorySnapshot: def get_pretty_snapshot_diff(snapshot_1: Optional[MemorySnapshot], snapshot_2: Optional[MemorySnapshot]) -> str: """Get a pretty string describing the difference between two `MemorySnapshot`s.""" - def get_msg_line(prefix: str, val1: int, val2: int): + def get_msg_line(prefix: str, val1: int, val2: int) -> str: diff = val2 - val1 return f"{prefix: <30} ({(diff/GB):+5.3f}): {(val1/GB):5.3f}GB -> {(val2/GB):5.3f}GB\n" diff --git a/invokeai/backend/model_manager/load/model_cache/__init__.py b/invokeai/backend/model_manager/load/model_cache/__init__.py new file mode 100644 index 0000000000..32c682d042 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_cache/__init__.py @@ -0,0 +1,6 @@ +"""Init file for ModelCache.""" + +from .model_cache_base import ModelCacheBase, CacheStats # noqa F401 +from .model_cache_default import ModelCache # noqa F401 + +_all__ = ["ModelCacheBase", "ModelCache", "CacheStats"] diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_base.py b/invokeai/backend/model_manager/load/model_cache/model_cache_base.py new file mode 100644 index 0000000000..eb82f87cb2 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_base.py @@ -0,0 +1,188 @@ +# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Development team +# TODO: Add Stalker's proper name to copyright +""" +Manage a RAM cache of diffusion/transformer models for fast switching. +They are moved between GPU VRAM and CPU RAM as necessary. If the cache +grows larger than a preset maximum, then the least recently used +model will be cleared and (re)loaded from disk when next needed. +""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from logging import Logger +from typing import Dict, Generic, Optional, TypeVar + +import torch + +from invokeai.backend.model_manager.config import AnyModel, SubModelType + + +class ModelLockerBase(ABC): + """Base class for the model locker used by the loader.""" + + @abstractmethod + def lock(self) -> AnyModel: + """Lock the contained model and move it into VRAM.""" + pass + + @abstractmethod + def unlock(self) -> None: + """Unlock the contained model, and remove it from VRAM.""" + pass + + @property + @abstractmethod + def model(self) -> AnyModel: + """Return the model.""" + pass + + +T = TypeVar("T") + + +@dataclass +class CacheRecord(Generic[T]): + """Elements of the cache.""" + + key: str + model: T + size: int + loaded: bool = False + _locks: int = 0 + + def lock(self) -> None: + """Lock this record.""" + self._locks += 1 + + def unlock(self) -> None: + """Unlock this record.""" + self._locks -= 1 + assert self._locks >= 0 + + @property + def locked(self) -> bool: + """Return true if record is locked.""" + return self._locks > 0 + + +@dataclass +class CacheStats(object): + """Collect statistics on cache performance.""" + + hits: int = 0 # cache hits + misses: int = 0 # cache misses + high_watermark: int = 0 # amount of cache used + in_cache: int = 0 # number of models in cache + cleared: int = 0 # number of models cleared to make space + cache_size: int = 0 # total size of cache + loaded_model_sizes: Dict[str, int] = field(default_factory=dict) + + +class ModelCacheBase(ABC, Generic[T]): + """Virtual base class for RAM model cache.""" + + @property + @abstractmethod + def storage_device(self) -> torch.device: + """Return the storage device (e.g. "CPU" for RAM).""" + pass + + @property + @abstractmethod + def execution_device(self) -> torch.device: + """Return the exection device (e.g. "cuda" for VRAM).""" + pass + + @property + @abstractmethod + def lazy_offloading(self) -> bool: + """Return true if the cache is configured to lazily offload models in VRAM.""" + pass + + @property + @abstractmethod + def max_cache_size(self) -> float: + """Return true if the cache is configured to lazily offload models in VRAM.""" + pass + + @abstractmethod + def offload_unlocked_models(self, size_required: int) -> None: + """Offload from VRAM any models not actively in use.""" + pass + + @abstractmethod + def move_model_to_device(self, cache_entry: CacheRecord[AnyModel], target_device: torch.device) -> None: + """Move model into the indicated device.""" + pass + + @property + @abstractmethod + def stats(self) -> CacheStats: + """Return collected CacheStats object.""" + pass + + @stats.setter + @abstractmethod + def stats(self, stats: CacheStats) -> None: + """Set the CacheStats object for collectin cache statistics.""" + pass + + @property + @abstractmethod + def logger(self) -> Logger: + """Return the logger used by the cache.""" + pass + + @abstractmethod + def make_room(self, size: int) -> None: + """Make enough room in the cache to accommodate a new model of indicated size.""" + pass + + @abstractmethod + def put( + self, + key: str, + model: T, + size: int, + submodel_type: Optional[SubModelType] = None, + ) -> None: + """Store model under key and optional submodel_type.""" + pass + + @abstractmethod + def get( + self, + key: str, + submodel_type: Optional[SubModelType] = None, + stats_name: Optional[str] = None, + ) -> ModelLockerBase: + """ + Retrieve model using key and optional submodel_type. + + :param key: Opaque model key + :param submodel_type: Type of the submodel to fetch + :param stats_name: A human-readable id for the model for the purposes of + stats reporting. + + This may raise an IndexError if the model is not in the cache. + """ + pass + + @abstractmethod + def exists( + self, + key: str, + submodel_type: Optional[SubModelType] = None, + ) -> bool: + """Return true if the model identified by key and submodel_type is in the cache.""" + pass + + @abstractmethod + def cache_size(self) -> int: + """Get the total size of the models currently cached.""" + pass + + @abstractmethod + def print_cuda_stats(self) -> None: + """Log debugging information on CUDA usage.""" + pass diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py new file mode 100644 index 0000000000..2933b169f6 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -0,0 +1,426 @@ +# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Development team +# TODO: Add Stalker's proper name to copyright +""" +Manage a RAM cache of diffusion/transformer models for fast switching. +They are moved between GPU VRAM and CPU RAM as necessary. If the cache +grows larger than a preset maximum, then the least recently used +model will be cleared and (re)loaded from disk when next needed. + +The cache returns context manager generators designed to load the +model into the GPU within the context, and unload outside the +context. Use like this: + + cache = ModelCache(max_cache_size=7.5) + with cache.get_model('runwayml/stable-diffusion-1-5') as SD1, + cache.get_model('stabilityai/stable-diffusion-2') as SD2: + do_something_in_GPU(SD1,SD2) + + +""" + +import gc +import logging +import math +import sys +import time +from contextlib import suppress +from logging import Logger +from typing import Dict, List, Optional + +import torch + +from invokeai.backend.model_manager import AnyModel, SubModelType +from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff +from invokeai.backend.util.devices import choose_torch_device +from invokeai.backend.util.logging import InvokeAILogger + +from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase +from .model_locker import ModelLocker + +if choose_torch_device() == torch.device("mps"): + from torch import mps + +# Maximum size of the cache, in gigs +# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously +DEFAULT_MAX_CACHE_SIZE = 6.0 + +# amount of GPU memory to hold in reserve for use by generations (GB) +DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75 + +# actual size of a gig +GIG = 1073741824 + +# Size of a MB in bytes. +MB = 2**20 + + +class ModelCache(ModelCacheBase[AnyModel]): + """Implementation of ModelCacheBase.""" + + def __init__( + self, + max_cache_size: float = DEFAULT_MAX_CACHE_SIZE, + max_vram_cache_size: float = DEFAULT_MAX_VRAM_CACHE_SIZE, + execution_device: torch.device = torch.device("cuda"), + storage_device: torch.device = torch.device("cpu"), + precision: torch.dtype = torch.float16, + sequential_offload: bool = False, + lazy_offloading: bool = True, + sha_chunksize: int = 16777216, + log_memory_usage: bool = False, + logger: Optional[Logger] = None, + ): + """ + Initialize the model RAM cache. + + :param max_cache_size: Maximum size of the RAM cache [6.0 GB] + :param execution_device: Torch device to load active model into [torch.device('cuda')] + :param storage_device: Torch device to save inactive model in [torch.device('cpu')] + :param precision: Precision for loaded models [torch.float16] + :param lazy_offloading: Keep model in VRAM until another model needs to be loaded + :param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially + :param log_memory_usage: If True, a memory snapshot will be captured before and after every model cache + operation, and the result will be logged (at debug level). There is a time cost to capturing the memory + snapshots, so it is recommended to disable this feature unless you are actively inspecting the model cache's + behaviour. + """ + # allow lazy offloading only when vram cache enabled + self._lazy_offloading = lazy_offloading and max_vram_cache_size > 0 + self._precision: torch.dtype = precision + self._max_cache_size: float = max_cache_size + self._max_vram_cache_size: float = max_vram_cache_size + self._execution_device: torch.device = execution_device + self._storage_device: torch.device = storage_device + self._logger = logger or InvokeAILogger.get_logger(self.__class__.__name__) + self._log_memory_usage = log_memory_usage or self._logger.level == logging.DEBUG + # used for stats collection + self._stats: Optional[CacheStats] = None + + self._cached_models: Dict[str, CacheRecord[AnyModel]] = {} + self._cache_stack: List[str] = [] + + @property + def logger(self) -> Logger: + """Return the logger used by the cache.""" + return self._logger + + @property + def lazy_offloading(self) -> bool: + """Return true if the cache is configured to lazily offload models in VRAM.""" + return self._lazy_offloading + + @property + def storage_device(self) -> torch.device: + """Return the storage device (e.g. "CPU" for RAM).""" + return self._storage_device + + @property + def execution_device(self) -> torch.device: + """Return the exection device (e.g. "cuda" for VRAM).""" + return self._execution_device + + @property + def max_cache_size(self) -> float: + """Return the cap on cache size.""" + return self._max_cache_size + + @property + def stats(self) -> Optional[CacheStats]: + """Return collected CacheStats object.""" + return self._stats + + @stats.setter + def stats(self, stats: CacheStats) -> None: + """Set the CacheStats object for collectin cache statistics.""" + self._stats = stats + + def cache_size(self) -> int: + """Get the total size of the models currently cached.""" + total = 0 + for cache_record in self._cached_models.values(): + total += cache_record.size + return total + + def exists( + self, + key: str, + submodel_type: Optional[SubModelType] = None, + ) -> bool: + """Return true if the model identified by key and submodel_type is in the cache.""" + key = self._make_cache_key(key, submodel_type) + return key in self._cached_models + + def put( + self, + key: str, + model: AnyModel, + size: int, + submodel_type: Optional[SubModelType] = None, + ) -> None: + """Store model under key and optional submodel_type.""" + key = self._make_cache_key(key, submodel_type) + assert key not in self._cached_models + + cache_record = CacheRecord(key, model, size) + self._cached_models[key] = cache_record + self._cache_stack.append(key) + + def get( + self, + key: str, + submodel_type: Optional[SubModelType] = None, + stats_name: Optional[str] = None, + ) -> ModelLockerBase: + """ + Retrieve model using key and optional submodel_type. + + :param key: Opaque model key + :param submodel_type: Type of the submodel to fetch + :param stats_name: A human-readable id for the model for the purposes of + stats reporting. + + This may raise an IndexError if the model is not in the cache. + """ + key = self._make_cache_key(key, submodel_type) + if key in self._cached_models: + if self.stats: + self.stats.hits += 1 + else: + if self.stats: + self.stats.misses += 1 + raise IndexError(f"The model with key {key} is not in the cache.") + + cache_entry = self._cached_models[key] + + # more stats + if self.stats: + stats_name = stats_name or key + self.stats.cache_size = int(self._max_cache_size * GIG) + self.stats.high_watermark = max(self.stats.high_watermark, self.cache_size()) + self.stats.in_cache = len(self._cached_models) + self.stats.loaded_model_sizes[stats_name] = max( + self.stats.loaded_model_sizes.get(stats_name, 0), cache_entry.size + ) + + # this moves the entry to the top (right end) of the stack + with suppress(Exception): + self._cache_stack.remove(key) + self._cache_stack.append(key) + return ModelLocker( + cache=self, + cache_entry=cache_entry, + ) + + def _capture_memory_snapshot(self) -> Optional[MemorySnapshot]: + if self._log_memory_usage: + return MemorySnapshot.capture() + return None + + def _make_cache_key(self, model_key: str, submodel_type: Optional[SubModelType] = None) -> str: + if submodel_type: + return f"{model_key}:{submodel_type.value}" + else: + return model_key + + def offload_unlocked_models(self, size_required: int) -> None: + """Move any unused models from VRAM.""" + reserved = self._max_vram_cache_size * GIG + vram_in_use = torch.cuda.memory_allocated() + size_required + self.logger.debug(f"{(vram_in_use/GIG):.2f}GB VRAM needed for models; max allowed={(reserved/GIG):.2f}GB") + for _, cache_entry in sorted(self._cached_models.items(), key=lambda x: x[1].size): + if vram_in_use <= reserved: + break + if not cache_entry.loaded: + continue + if not cache_entry.locked: + self.move_model_to_device(cache_entry, self.storage_device) + cache_entry.loaded = False + vram_in_use = torch.cuda.memory_allocated() + size_required + self.logger.debug( + f"Removing {cache_entry.key} from VRAM to free {(cache_entry.size/GIG):.2f}GB; vram free = {(torch.cuda.memory_allocated()/GIG):.2f}GB" + ) + + torch.cuda.empty_cache() + if choose_torch_device() == torch.device("mps"): + mps.empty_cache() + + def move_model_to_device(self, cache_entry: CacheRecord[AnyModel], target_device: torch.device) -> None: + """Move model into the indicated device. + + :param cache_entry: The CacheRecord for the model + :param target_device: The torch.device to move the model into + + May raise a torch.cuda.OutOfMemoryError + """ + # These attributes are not in the base ModelMixin class but in various derived classes. + # Some models don't have these attributes, in which case they run in RAM/CPU. + self.logger.debug(f"Called to move {cache_entry.key} to {target_device}") + if not (hasattr(cache_entry.model, "device") and hasattr(cache_entry.model, "to")): + return + + source_device = cache_entry.model.device + + # Note: We compare device types only so that 'cuda' == 'cuda:0'. + # This would need to be revised to support multi-GPU. + if torch.device(source_device).type == torch.device(target_device).type: + return + + # may raise an exception here if insufficient GPU VRAM + self._check_free_vram(target_device, cache_entry.size) + + start_model_to_time = time.time() + snapshot_before = self._capture_memory_snapshot() + cache_entry.model.to(target_device) + snapshot_after = self._capture_memory_snapshot() + end_model_to_time = time.time() + self.logger.debug( + f"Moved model '{cache_entry.key}' from {source_device} to" + f" {target_device} in {(end_model_to_time-start_model_to_time):.2f}s." + f"Estimated model size: {(cache_entry.size/GIG):.3f} GB." + f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" + ) + + if ( + snapshot_before is not None + and snapshot_after is not None + and snapshot_before.vram is not None + and snapshot_after.vram is not None + ): + vram_change = abs(snapshot_before.vram - snapshot_after.vram) + + # If the estimated model size does not match the change in VRAM, log a warning. + if not math.isclose( + vram_change, + cache_entry.size, + rel_tol=0.1, + abs_tol=10 * MB, + ): + self.logger.debug( + f"Moving model '{cache_entry.key}' from {source_device} to" + f" {target_device} caused an unexpected change in VRAM usage. The model's" + " estimated size may be incorrect. Estimated model size:" + f" {(cache_entry.size/GIG):.3f} GB.\n" + f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" + ) + + def print_cuda_stats(self) -> None: + """Log CUDA diagnostics.""" + vram = "%4.2fG" % (torch.cuda.memory_allocated() / GIG) + ram = "%4.2fG" % (self.cache_size() / GIG) + + in_ram_models = 0 + in_vram_models = 0 + locked_in_vram_models = 0 + for cache_record in self._cached_models.values(): + if hasattr(cache_record.model, "device"): + if cache_record.model.device == self.storage_device: + in_ram_models += 1 + else: + in_vram_models += 1 + if cache_record.locked: + locked_in_vram_models += 1 + + self.logger.debug( + f"Current VRAM/RAM usage: {vram}/{ram}; models_in_ram/models_in_vram(locked) =" + f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})" + ) + + def make_room(self, model_size: int) -> None: + """Make enough room in the cache to accommodate a new model of indicated size.""" + # calculate how much memory this model will require + # multiplier = 2 if self.precision==torch.float32 else 1 + bytes_needed = model_size + maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes + current_size = self.cache_size() + + if current_size + bytes_needed > maximum_size: + self.logger.debug( + f"Max cache size exceeded: {(current_size/GIG):.2f}/{self.max_cache_size:.2f} GB, need an additional" + f" {(bytes_needed/GIG):.2f} GB" + ) + + self.logger.debug(f"Before making_room: cached_models={len(self._cached_models)}") + + pos = 0 + models_cleared = 0 + while current_size + bytes_needed > maximum_size and pos < len(self._cache_stack): + model_key = self._cache_stack[pos] + cache_entry = self._cached_models[model_key] + + refs = sys.getrefcount(cache_entry.model) + + # HACK: This is a workaround for a memory-management issue that we haven't tracked down yet. We are directly + # going against the advice in the Python docs by using `gc.get_referrers(...)` in this way: + # https://docs.python.org/3/library/gc.html#gc.get_referrers + + # manualy clear local variable references of just finished function calls + # for some reason python don't want to collect it even by gc.collect() immidiately + if refs > 2: + while True: + cleared = False + for referrer in gc.get_referrers(cache_entry.model): + if type(referrer).__name__ == "frame": + # RuntimeError: cannot clear an executing frame + with suppress(RuntimeError): + referrer.clear() + cleared = True + # break + + # repeat if referrers changes(due to frame clear), else exit loop + if cleared: + gc.collect() + else: + break + + device = cache_entry.model.device if hasattr(cache_entry.model, "device") else None + self.logger.debug( + f"Model: {model_key}, locks: {cache_entry._locks}, device: {device}, loaded: {cache_entry.loaded}," + f" refs: {refs}" + ) + + # Expected refs: + # 1 from cache_entry + # 1 from getrefcount function + # 1 from onnx runtime object + if not cache_entry.locked and refs <= (3 if "onnx" in model_key else 2): + self.logger.debug( + f"Removing {model_key} from RAM cache to free at least {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)" + ) + current_size -= cache_entry.size + models_cleared += 1 + del self._cache_stack[pos] + del self._cached_models[model_key] + del cache_entry + + else: + pos += 1 + + if models_cleared > 0: + # There would likely be some 'garbage' to be collected regardless of whether a model was cleared or not, but + # there is a significant time cost to calling `gc.collect()`, so we want to use it sparingly. (The time cost + # is high even if no garbage gets collected.) + # + # Calling gc.collect(...) when a model is cleared seems like a good middle-ground: + # - If models had to be cleared, it's a signal that we are close to our memory limit. + # - If models were cleared, there's a good chance that there's a significant amount of garbage to be + # collected. + # + # Keep in mind that gc is only responsible for handling reference cycles. Most objects should be cleaned up + # immediately when their reference count hits 0. + gc.collect() + + torch.cuda.empty_cache() + if choose_torch_device() == torch.device("mps"): + mps.empty_cache() + + self.logger.debug(f"After making room: cached_models={len(self._cached_models)}") + + def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None: + if target_device.type != "cuda": + return + vram_device = ( # mem_get_info() needs an indexed device + target_device if target_device.index is not None else torch.device(str(target_device), index=0) + ) + free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device)) + if needed_size > free_mem: + raise torch.cuda.OutOfMemoryError diff --git a/invokeai/backend/model_manager/load/model_cache/model_locker.py b/invokeai/backend/model_manager/load/model_cache/model_locker.py new file mode 100644 index 0000000000..81dca346e5 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_cache/model_locker.py @@ -0,0 +1,64 @@ +""" +Base class and implementation of a class that moves models in and out of VRAM. +""" + +import torch + +from invokeai.backend.model_manager import AnyModel + +from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase + + +class ModelLocker(ModelLockerBase): + """Internal class that mediates movement in and out of GPU.""" + + def __init__(self, cache: ModelCacheBase[AnyModel], cache_entry: CacheRecord[AnyModel]): + """ + Initialize the model locker. + + :param cache: The ModelCache object + :param cache_entry: The entry in the model cache + """ + self._cache = cache + self._cache_entry = cache_entry + + @property + def model(self) -> AnyModel: + """Return the model without moving it around.""" + return self._cache_entry.model + + def lock(self) -> AnyModel: + """Move the model into the execution device (GPU) and lock it.""" + if not hasattr(self.model, "to"): + return self.model + + # NOTE that the model has to have the to() method in order for this code to move it into GPU! + self._cache_entry.lock() + + try: + if self._cache.lazy_offloading: + self._cache.offload_unlocked_models(self._cache_entry.size) + + self._cache.move_model_to_device(self._cache_entry, self._cache.execution_device) + self._cache_entry.loaded = True + + self._cache.logger.debug(f"Locking {self._cache_entry.key} in {self._cache.execution_device}") + self._cache.print_cuda_stats() + except torch.cuda.OutOfMemoryError: + self._cache.logger.warning("Insufficient GPU memory to load model. Aborting") + self._cache_entry.unlock() + raise + except Exception: + self._cache_entry.unlock() + raise + return self.model + + def unlock(self) -> None: + """Call upon exit from context.""" + if not hasattr(self.model, "to"): + return + + self._cache_entry.unlock() + if not self._cache.lazy_offloading: + self._cache.offload_unlocked_models(self._cache_entry.size) + self._cache.print_cuda_stats() diff --git a/invokeai/backend/model_manager/load/model_loader_registry.py b/invokeai/backend/model_manager/load/model_loader_registry.py new file mode 100644 index 0000000000..e7747650d4 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loader_registry.py @@ -0,0 +1,123 @@ +# Copyright (c) 2024 Lincoln D. Stein and the InvokeAI Development team +""" +This module implements a system in which model loaders register the +type, base and format of models that they know how to load. + +Use like this: + + cls, model_config, submodel_type = ModelLoaderRegistry.get_implementation(model_config, submodel_type) # type: ignore + loaded_model = cls( + app_config=app_config, + logger=logger, + ram_cache=ram_cache, + convert_cache=convert_cache + ).load_model(model_config, submodel_type) + +""" + +import hashlib +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Callable, Dict, Optional, Tuple, Type + +from ..config import ( + AnyModelConfig, + BaseModelType, + ModelConfigBase, + ModelFormat, + ModelType, + SubModelType, + VaeCheckpointConfig, + VaeDiffusersConfig, +) +from . import ModelLoaderBase + + +class ModelLoaderRegistryBase(ABC): + """This class allows model loaders to register their type, base and format.""" + + @classmethod + @abstractmethod + def register( + cls, type: ModelType, format: ModelFormat, base: BaseModelType = BaseModelType.Any + ) -> Callable[[Type[ModelLoaderBase]], Type[ModelLoaderBase]]: + """Define a decorator which registers the subclass of loader.""" + + @classmethod + @abstractmethod + def get_implementation( + cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] + ) -> Tuple[Type[ModelLoaderBase], ModelConfigBase, Optional[SubModelType]]: + """ + Get subclass of ModelLoaderBase registered to handle base and type. + + Parameters: + :param config: Model configuration record, as returned by ModelRecordService + :param submodel_type: Submodel to fetch (main models only) + :return: tuple(loader_class, model_config, submodel_type) + + Note that the returned model config may be different from one what passed + in, in the event that a submodel type is provided. + """ + + +class ModelLoaderRegistry: + """ + This class allows model loaders to register their type, base and format. + """ + + _registry: Dict[str, Type[ModelLoaderBase]] = {} + + @classmethod + def register( + cls, type: ModelType, format: ModelFormat, base: BaseModelType = BaseModelType.Any + ) -> Callable[[Type[ModelLoaderBase]], Type[ModelLoaderBase]]: + """Define a decorator which registers the subclass of loader.""" + + def decorator(subclass: Type[ModelLoaderBase]) -> Type[ModelLoaderBase]: + key = cls._to_registry_key(base, type, format) + if key in cls._registry: + raise Exception( + f"{subclass.__name__} is trying to register as a loader for {base}/{type}/{format}, but this type of model has already been registered by {cls._registry[key].__name__}" + ) + cls._registry[key] = subclass + return subclass + + return decorator + + @classmethod + def get_implementation( + cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] + ) -> Tuple[Type[ModelLoaderBase], ModelConfigBase, Optional[SubModelType]]: + """Get subclass of ModelLoaderBase registered to handle base and type.""" + # We have to handle VAE overrides here because this will change the model type and the corresponding implementation returned + conf2, submodel_type = cls._handle_subtype_overrides(config, submodel_type) + + key1 = cls._to_registry_key(conf2.base, conf2.type, conf2.format) # for a specific base type + key2 = cls._to_registry_key(BaseModelType.Any, conf2.type, conf2.format) # with wildcard Any + implementation = cls._registry.get(key1) or cls._registry.get(key2) + if not implementation: + raise NotImplementedError( + f"No subclass of LoadedModel is registered for base={config.base}, type={config.type}, format={config.format}" + ) + return implementation, conf2, submodel_type + + @classmethod + def _handle_subtype_overrides( + cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] + ) -> Tuple[ModelConfigBase, Optional[SubModelType]]: + if submodel_type == SubModelType.Vae and hasattr(config, "vae") and config.vae is not None: + model_path = Path(config.vae) + config_class = ( + VaeCheckpointConfig if model_path.suffix in [".pt", ".safetensors", ".ckpt"] else VaeDiffusersConfig + ) + hash = hashlib.md5(model_path.as_posix().encode("utf-8")).hexdigest() + new_conf = config_class(path=model_path.as_posix(), name=model_path.stem, base=config.base, key=hash) + submodel_type = None + else: + new_conf = config + return new_conf, submodel_type + + @staticmethod + def _to_registry_key(base: BaseModelType, type: ModelType, format: ModelFormat) -> str: + return "-".join([base.value, type.value, format.value]) diff --git a/invokeai/backend/model_manager/load/model_loaders/__init__.py b/invokeai/backend/model_manager/load/model_loaders/__init__.py new file mode 100644 index 0000000000..962cba5481 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/__init__.py @@ -0,0 +1,3 @@ +""" +Init file for model_loaders. +""" diff --git a/invokeai/backend/model_manager/load/model_loaders/controlnet.py b/invokeai/backend/model_manager/load/model_loaders/controlnet.py new file mode 100644 index 0000000000..43393f5a84 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/controlnet.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for ControlNet model loading in InvokeAI.""" + +from pathlib import Path + +import safetensors +import torch + +from invokeai.backend.model_manager import ( + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelType, +) +from invokeai.backend.model_manager.convert_ckpt_to_diffusers import convert_controlnet_to_diffusers + +from .. import ModelLoaderRegistry +from .generic_diffusers import GenericDiffusersLoader + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ControlNet, format=ModelFormat.Checkpoint) +class ControlnetLoader(GenericDiffusersLoader): + """Class to load ControlNet models.""" + + def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool: + if config.format != ModelFormat.Checkpoint: + return False + elif ( + dest_path.exists() + and (dest_path / "config.json").stat().st_mtime >= (config.last_modified or 0.0) + and (dest_path / "config.json").stat().st_mtime >= model_path.stat().st_mtime + ): + return False + else: + return True + + def _convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Path) -> Path: + if config.base not in {BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2}: + raise Exception(f"Vae conversion not supported for model type: {config.base}") + else: + assert hasattr(config, "config") + config_file = config.config + + if model_path.suffix == ".safetensors": + checkpoint = safetensors.torch.load_file(model_path, device="cpu") + else: + checkpoint = torch.load(model_path, map_location="cpu") + + # sometimes weights are hidden under "state_dict", and sometimes not + if "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + convert_controlnet_to_diffusers( + model_path, + output_path, + original_config_file=self._app_config.root_path / config_file, + image_size=512, + scan_needed=True, + from_safetensors=model_path.suffix == ".safetensors", + ) + return output_path diff --git a/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py new file mode 100644 index 0000000000..9a9b25aec5 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py @@ -0,0 +1,90 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for simple diffusers model loading in InvokeAI.""" + +import sys +from pathlib import Path +from typing import Any, Dict, Optional + +from diffusers import ConfigMixin, ModelMixin + +from invokeai.backend.model_manager import ( + AnyModel, + BaseModelType, + InvalidModelConfigException, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) + +from .. import ModelLoader, ModelLoaderRegistry + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.CLIPVision, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.T2IAdapter, format=ModelFormat.Diffusers) +class GenericDiffusersLoader(ModelLoader): + """Class to load simple diffusers models.""" + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + model_class = self.get_hf_load_class(model_path) + if submodel_type is not None: + raise Exception(f"There are no submodels in models of type {model_class}") + variant = model_variant.value if model_variant else None + result: AnyModel = model_class.from_pretrained(model_path, torch_dtype=self._torch_dtype, variant=variant) # type: ignore + return result + + # TO DO: Add exception handling + def get_hf_load_class(self, model_path: Path, submodel_type: Optional[SubModelType] = None) -> ModelMixin: + """Given the model path and submodel, returns the diffusers ModelMixin subclass needed to load.""" + if submodel_type: + try: + config = self._load_diffusers_config(model_path, config_name="model_index.json") + module, class_name = config[submodel_type.value] + result = self._hf_definition_to_type(module=module, class_name=class_name) + except KeyError as e: + raise InvalidModelConfigException( + f'The "{submodel_type}" submodel is not available for this model.' + ) from e + else: + try: + config = self._load_diffusers_config(model_path, config_name="config.json") + class_name = config.get("_class_name", None) + if class_name: + result = self._hf_definition_to_type(module="diffusers", class_name=class_name) + if config.get("model_type", None) == "clip_vision_model": + class_name = config.get("architectures") + assert class_name is not None + result = self._hf_definition_to_type(module="transformers", class_name=class_name[0]) + if not class_name: + raise InvalidModelConfigException("Unable to decifer Load Class based on given config.json") + except KeyError as e: + raise InvalidModelConfigException("An expected config.json file is missing from this model.") from e + return result + + # TO DO: Add exception handling + def _hf_definition_to_type(self, module: str, class_name: str) -> ModelMixin: # fix with correct type + if module in ["diffusers", "transformers"]: + res_type = sys.modules[module] + else: + res_type = sys.modules["diffusers"].pipelines + result: ModelMixin = getattr(res_type, class_name) + return result + + def _load_diffusers_config(self, model_path: Path, config_name: str = "config.json") -> Dict[str, Any]: + return ConfigLoader.load_config(model_path, config_name=config_name) + + +class ConfigLoader(ConfigMixin): + """Subclass of ConfigMixin for loading diffusers configuration files.""" + + @classmethod + def load_config(cls, *args: Any, **kwargs: Any) -> Dict[str, Any]: + """Load a diffusrs ConfigMixin configuration.""" + cls.config_name = kwargs.pop("config_name") + # Diffusers doesn't provide typing info + return super().load_config(*args, **kwargs) # type: ignore diff --git a/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py b/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py new file mode 100644 index 0000000000..7d25e9d218 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py @@ -0,0 +1,38 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for IP Adapter model loading in InvokeAI.""" + +from pathlib import Path +from typing import Optional + +import torch + +from invokeai.backend.ip_adapter.ip_adapter import build_ip_adapter +from invokeai.backend.model_manager import ( + AnyModel, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) +from invokeai.backend.model_manager.load import ModelLoader, ModelLoaderRegistry + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.IPAdapter, format=ModelFormat.InvokeAI) +class IPAdapterInvokeAILoader(ModelLoader): + """Class to load IP Adapter diffusers models.""" + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if submodel_type is not None: + raise ValueError("There are no submodels in an IP-Adapter model.") + model = build_ip_adapter( + ip_adapter_ckpt_path=model_path / "ip_adapter.bin", + device=torch.device("cpu"), + dtype=self._torch_dtype, + ) + return model diff --git a/invokeai/backend/model_manager/load/model_loaders/lora.py b/invokeai/backend/model_manager/load/model_loaders/lora.py new file mode 100644 index 0000000000..e308531a4f --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/lora.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for LoRA model loading in InvokeAI.""" + +from logging import Logger +from pathlib import Path +from typing import Optional, Tuple + +from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.backend.lora import LoRAModelRaw +from invokeai.backend.model_manager import ( + AnyModel, + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) +from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase +from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase + +from .. import ModelLoader, ModelLoaderRegistry + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Lora, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Lora, format=ModelFormat.Lycoris) +class LoraLoader(ModelLoader): + """Class to load LoRA models.""" + + # We cheat a little bit to get access to the model base + def __init__( + self, + app_config: InvokeAIAppConfig, + logger: Logger, + ram_cache: ModelCacheBase[AnyModel], + convert_cache: ModelConvertCacheBase, + ): + """Initialize the loader.""" + super().__init__(app_config, logger, ram_cache, convert_cache) + self._model_base: Optional[BaseModelType] = None + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if submodel_type is not None: + raise ValueError("There are no submodels in a LoRA model.") + assert self._model_base is not None + model = LoRAModelRaw.from_checkpoint( + file_path=model_path, + dtype=self._torch_dtype, + base_model=self._model_base, + ) + return model + + # override + def _get_model_path( + self, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None + ) -> Tuple[Path, AnyModelConfig, Optional[SubModelType]]: + self._model_base = ( + config.base + ) # cheating a little - we remember this variable for using in the subsequent call to _load_model() + + model_base_path = self._app_config.models_path + model_path = model_base_path / config.path + + if config.format == ModelFormat.Diffusers: + for ext in ["safetensors", "bin"]: # return path to the safetensors file inside the folder + path = model_base_path / config.path / f"pytorch_lora_weights.{ext}" + if path.exists(): + model_path = path + break + + result = model_path.resolve(), config, submodel_type + return result diff --git a/invokeai/backend/model_manager/load/model_loaders/onnx.py b/invokeai/backend/model_manager/load/model_loaders/onnx.py new file mode 100644 index 0000000000..38f0274acc --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/onnx.py @@ -0,0 +1,42 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for Onnx model loading in InvokeAI.""" + +# This should work the same as Stable Diffusion pipelines +from pathlib import Path +from typing import Optional + +from invokeai.backend.model_manager import ( + AnyModel, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) + +from .. import ModelLoaderRegistry +from .generic_diffusers import GenericDiffusersLoader + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.Onnx) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.ONNX, format=ModelFormat.Olive) +class OnnyxDiffusersModel(GenericDiffusersLoader): + """Class to load onnx models.""" + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if not submodel_type is not None: + raise Exception("A submodel type must be provided when loading onnx pipelines.") + load_class = self.get_hf_load_class(model_path, submodel_type) + variant = model_variant.value if model_variant else None + model_path = model_path / submodel_type.value + result: AnyModel = load_class.from_pretrained( + model_path, + torch_dtype=self._torch_dtype, + variant=variant, + ) # type: ignore + return result diff --git a/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py new file mode 100644 index 0000000000..0f0483d8dc --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for StableDiffusion model loading in InvokeAI.""" + +from pathlib import Path +from typing import Optional + +from diffusers import StableDiffusionInpaintPipeline, StableDiffusionPipeline + +from invokeai.backend.model_manager import ( + AnyModel, + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + ModelVariantType, + SubModelType, +) +from invokeai.backend.model_manager.config import MainCheckpointConfig +from invokeai.backend.model_manager.convert_ckpt_to_diffusers import convert_ckpt_to_diffusers + +from .. import ModelLoaderRegistry +from .generic_diffusers import GenericDiffusersLoader + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Main, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Main, format=ModelFormat.Checkpoint) +class StableDiffusionDiffusersModel(GenericDiffusersLoader): + """Class to load main models.""" + + model_base_to_model_type = { + BaseModelType.StableDiffusion1: "FrozenCLIPEmbedder", + BaseModelType.StableDiffusion2: "FrozenOpenCLIPEmbedder", + BaseModelType.StableDiffusionXL: "SDXL", + BaseModelType.StableDiffusionXLRefiner: "SDXL-Refiner", + } + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if not submodel_type is not None: + raise Exception("A submodel type must be provided when loading main pipelines.") + load_class = self.get_hf_load_class(model_path, submodel_type) + variant = model_variant.value if model_variant else None + model_path = model_path / submodel_type.value + result: AnyModel = load_class.from_pretrained( + model_path, + torch_dtype=self._torch_dtype, + variant=variant, + ) # type: ignore + return result + + def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool: + if config.format != ModelFormat.Checkpoint: + return False + elif ( + dest_path.exists() + and (dest_path / "model_index.json").stat().st_mtime >= (config.last_modified or 0.0) + and (dest_path / "model_index.json").stat().st_mtime >= model_path.stat().st_mtime + ): + return False + else: + return True + + def _convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Path) -> Path: + assert isinstance(config, MainCheckpointConfig) + variant = config.variant + base = config.base + pipeline_class = ( + StableDiffusionInpaintPipeline if variant == ModelVariantType.Inpaint else StableDiffusionPipeline + ) + + config_file = config.config + + self._logger.info(f"Converting {model_path} to diffusers format") + convert_ckpt_to_diffusers( + model_path, + output_path, + model_type=self.model_base_to_model_type[base], + model_version=base, + model_variant=variant, + original_config_file=self._app_config.root_path / config_file, + extract_ema=True, + scan_needed=True, + pipeline_class=pipeline_class, + from_safetensors=model_path.suffix == ".safetensors", + precision=self._torch_dtype, + load_safety_checker=False, + ) + return output_path diff --git a/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py b/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py new file mode 100644 index 0000000000..fb9204a701 --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py @@ -0,0 +1,56 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for TI model loading in InvokeAI.""" + +from pathlib import Path +from typing import Optional, Tuple + +from invokeai.backend.model_manager import ( + AnyModel, + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelRepoVariant, + ModelType, + SubModelType, +) +from invokeai.backend.textual_inversion import TextualInversionModelRaw + +from .. import ModelLoader, ModelLoaderRegistry + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFile) +@ModelLoaderRegistry.register( + base=BaseModelType.Any, type=ModelType.TextualInversion, format=ModelFormat.EmbeddingFolder +) +class TextualInversionLoader(ModelLoader): + """Class to load TI models.""" + + def _load_model( + self, + model_path: Path, + model_variant: Optional[ModelRepoVariant] = None, + submodel_type: Optional[SubModelType] = None, + ) -> AnyModel: + if submodel_type is not None: + raise ValueError("There are no submodels in a TI model.") + model = TextualInversionModelRaw.from_checkpoint( + file_path=model_path, + dtype=self._torch_dtype, + ) + return model + + # override + def _get_model_path( + self, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None + ) -> Tuple[Path, AnyModelConfig, Optional[SubModelType]]: + model_path = self._app_config.models_path / config.path + + if config.format == ModelFormat.EmbeddingFolder: + path = model_path / "learned_embeds.bin" + else: + path = model_path + + if not path.exists(): + raise OSError(f"The embedding file at {path} was not found") + + return path, config, submodel_type diff --git a/invokeai/backend/model_manager/load/model_loaders/vae.py b/invokeai/backend/model_manager/load/model_loaders/vae.py new file mode 100644 index 0000000000..7ade1494eb --- /dev/null +++ b/invokeai/backend/model_manager/load/model_loaders/vae.py @@ -0,0 +1,68 @@ +# Copyright (c) 2024, Lincoln D. Stein and the InvokeAI Development Team +"""Class for VAE model loading in InvokeAI.""" + +from pathlib import Path + +import safetensors +import torch +from omegaconf import DictConfig, OmegaConf + +from invokeai.backend.model_manager import ( + AnyModelConfig, + BaseModelType, + ModelFormat, + ModelType, +) +from invokeai.backend.model_manager.convert_ckpt_to_diffusers import convert_ldm_vae_to_diffusers + +from .. import ModelLoaderRegistry +from .generic_diffusers import GenericDiffusersLoader + + +@ModelLoaderRegistry.register(base=BaseModelType.Any, type=ModelType.Vae, format=ModelFormat.Diffusers) +@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion1, type=ModelType.Vae, format=ModelFormat.Checkpoint) +@ModelLoaderRegistry.register(base=BaseModelType.StableDiffusion2, type=ModelType.Vae, format=ModelFormat.Checkpoint) +class VaeLoader(GenericDiffusersLoader): + """Class to load VAE models.""" + + def _needs_conversion(self, config: AnyModelConfig, model_path: Path, dest_path: Path) -> bool: + if config.format != ModelFormat.Checkpoint: + return False + elif ( + dest_path.exists() + and (dest_path / "config.json").stat().st_mtime >= (config.last_modified or 0.0) + and (dest_path / "config.json").stat().st_mtime >= model_path.stat().st_mtime + ): + return False + else: + return True + + def _convert_model(self, config: AnyModelConfig, model_path: Path, output_path: Path) -> Path: + # TO DO: check whether sdxl VAE models convert. + if config.base not in {BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2}: + raise Exception(f"Vae conversion not supported for model type: {config.base}") + else: + config_file = ( + "v1-inference.yaml" if config.base == BaseModelType.StableDiffusion1 else "v2-inference-v.yaml" + ) + + if model_path.suffix == ".safetensors": + checkpoint = safetensors.torch.load_file(model_path, device="cpu") + else: + checkpoint = torch.load(model_path, map_location="cpu") + + # sometimes weights are hidden under "state_dict", and sometimes not + if "state_dict" in checkpoint: + checkpoint = checkpoint["state_dict"] + + ckpt_config = OmegaConf.load(self._app_config.legacy_conf_path / config_file) + assert isinstance(ckpt_config, DictConfig) + + vae_model = convert_ldm_vae_to_diffusers( + checkpoint=checkpoint, + vae_config=ckpt_config, + image_size=512, + ) + vae_model.to(self._torch_dtype) # set precision appropriately + vae_model.save_pretrained(output_path, safe_serialization=True) + return output_path diff --git a/invokeai/backend/model_manager/load/model_util.py b/invokeai/backend/model_manager/load/model_util.py new file mode 100644 index 0000000000..c55eee48fa --- /dev/null +++ b/invokeai/backend/model_manager/load/model_util.py @@ -0,0 +1,113 @@ +# Copyright (c) 2024 The InvokeAI Development Team +"""Various utility functions needed by the loader and caching system.""" + +import json +from pathlib import Path +from typing import Optional + +import torch +from diffusers import DiffusionPipeline + +from invokeai.backend.model_manager.config import AnyModel +from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel + + +def calc_model_size_by_data(model: AnyModel) -> int: + """Get size of a model in memory in bytes.""" + if isinstance(model, DiffusionPipeline): + return _calc_pipeline_by_data(model) + elif isinstance(model, torch.nn.Module): + return _calc_model_by_data(model) + elif isinstance(model, IAIOnnxRuntimeModel): + return _calc_onnx_model_by_data(model) + else: + return 0 + + +def _calc_pipeline_by_data(pipeline: DiffusionPipeline) -> int: + res = 0 + assert hasattr(pipeline, "components") + for submodel_key in pipeline.components.keys(): + submodel = getattr(pipeline, submodel_key) + if submodel is not None and isinstance(submodel, torch.nn.Module): + res += _calc_model_by_data(submodel) + return res + + +def _calc_model_by_data(model: torch.nn.Module) -> int: + mem_params = sum([param.nelement() * param.element_size() for param in model.parameters()]) + mem_bufs = sum([buf.nelement() * buf.element_size() for buf in model.buffers()]) + mem: int = mem_params + mem_bufs # in bytes + return mem + + +def _calc_onnx_model_by_data(model: IAIOnnxRuntimeModel) -> int: + tensor_size = model.tensors.size() * 2 # The session doubles this + mem = tensor_size # in bytes + return mem + + +def calc_model_size_by_fs(model_path: Path, subfolder: Optional[str] = None, variant: Optional[str] = None) -> int: + """Estimate the size of a model on disk in bytes.""" + if model_path.is_file(): + return model_path.stat().st_size + + if subfolder is not None: + model_path = model_path / subfolder + + # this can happen when, for example, the safety checker is not downloaded. + if not model_path.exists(): + return 0 + + all_files = [f for f in model_path.iterdir() if (model_path / f).is_file()] + + fp16_files = {f for f in all_files if ".fp16." in f.name or ".fp16-" in f.name} + bit8_files = {f for f in all_files if ".8bit." in f.name or ".8bit-" in f.name} + other_files = set(all_files) - fp16_files - bit8_files + + if not variant: # ModelRepoVariant.DEFAULT evaluates to empty string for compatability with HF + files = other_files + elif variant == "fp16": + files = fp16_files + elif variant == "8bit": + files = bit8_files + else: + raise NotImplementedError(f"Unknown variant: {variant}") + + # try read from index if exists + index_postfix = ".index.json" + if variant is not None: + index_postfix = f".index.{variant}.json" + + for file in files: + if not file.name.endswith(index_postfix): + continue + try: + with open(model_path / file, "r") as f: + index_data = json.loads(f.read()) + return int(index_data["metadata"]["total_size"]) + except Exception: + pass + + # calculate files size if there is no index file + formats = [ + (".safetensors",), # safetensors + (".bin",), # torch + (".onnx", ".pb"), # onnx + (".msgpack",), # flax + (".ckpt",), # tf + (".h5",), # tf2 + ] + + for file_format in formats: + model_files = [f for f in files if f.suffix in file_format] + if len(model_files) == 0: + continue + + model_size = 0 + for model_file in model_files: + file_stats = (model_path / model_file).stat() + model_size += file_stats.st_size + return model_size + + return 0 # scheduler/feature_extractor/tokenizer - models without loading to gpu diff --git a/invokeai/backend/model_management/model_load_optimizations.py b/invokeai/backend/model_manager/load/optimizations.py similarity index 64% rename from invokeai/backend/model_management/model_load_optimizations.py rename to invokeai/backend/model_manager/load/optimizations.py index a46d262175..030fcfa639 100644 --- a/invokeai/backend/model_management/model_load_optimizations.py +++ b/invokeai/backend/model_manager/load/optimizations.py @@ -1,16 +1,16 @@ from contextlib import contextmanager +from typing import Any, Generator import torch -def _no_op(*args, **kwargs): +def _no_op(*args: Any, **kwargs: Any) -> None: pass @contextmanager -def skip_torch_weight_init(): - """A context manager that monkey-patches several of the common torch layers (torch.nn.Linear, torch.nn.Conv1d, etc.) - to skip weight initialization. +def skip_torch_weight_init() -> Generator[None, None, None]: + """Monkey patch several of the common torch layers (torch.nn.Linear, torch.nn.Conv1d, etc.) to skip weight initialization. By default, `torch.nn.Linear` and `torch.nn.ConvNd` layers initialize their weights (according to a particular distribution) when __init__ is called. This weight initialization step can take a significant amount of time, and is @@ -18,13 +18,14 @@ def skip_torch_weight_init(): monkey-patches common torch layers to skip the weight initialization step. """ torch_modules = [torch.nn.Linear, torch.nn.modules.conv._ConvNd, torch.nn.Embedding] - saved_functions = [m.reset_parameters for m in torch_modules] + saved_functions = [hasattr(m, "reset_parameters") and m.reset_parameters for m in torch_modules] try: for torch_module in torch_modules: + assert hasattr(torch_module, "reset_parameters") torch_module.reset_parameters = _no_op - yield None finally: for torch_module, saved_function in zip(torch_modules, saved_functions, strict=True): + assert hasattr(torch_module, "reset_parameters") torch_module.reset_parameters = saved_function diff --git a/invokeai/backend/model_manager/merge.py b/invokeai/backend/model_manager/merge.py index 2c94af4af3..1a3b9cb7de 100644 --- a/invokeai/backend/model_manager/merge.py +++ b/invokeai/backend/model_manager/merge.py @@ -1,7 +1,7 @@ """ invokeai.backend.model_manager.merge exports: merge_diffusion_models() -- combine multiple models by location and return a pipeline object -merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to models.yaml +merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to the models tables Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team """ @@ -13,7 +13,7 @@ from typing import Any, List, Optional, Set import torch from diffusers import AutoPipelineForText2Image -from diffusers import logging as dlogging +from diffusers.utils import logging as dlogging from invokeai.app.services.model_install import ModelInstallServiceBase from invokeai.backend.util.devices import choose_torch_device, torch_dtype @@ -39,10 +39,7 @@ class ModelMerger(object): def __init__(self, installer: ModelInstallServiceBase): """ - Initialize a ModelMerger object. - - :param store: Underlying storage manager for the running process. - :param config: InvokeAIAppConfig object (if not provided, default will be selected). + Initialize a ModelMerger object with the model installer. """ self._installer = installer @@ -79,7 +76,7 @@ class ModelMerger(object): custom_pipeline="checkpoint_merger", torch_dtype=dtype, variant=variant, - ) + ) # type: ignore merged_pipe = pipe.merge( pretrained_model_name_or_path_list=model_paths, alpha=alpha, @@ -104,7 +101,7 @@ class ModelMerger(object): **kwargs: Any, ) -> AnyModelConfig: """ - :param models: up to three models, designated by their InvokeAI models.yaml model name + :param models: up to three models, designated by their registered InvokeAI model name :param merged_model_name: name for new model :param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2 diff --git a/invokeai/backend/model_manager/metadata/__init__.py b/invokeai/backend/model_manager/metadata/__init__.py index 672e378c7f..56e741cd00 100644 --- a/invokeai/backend/model_manager/metadata/__init__.py +++ b/invokeai/backend/model_manager/metadata/__init__.py @@ -18,7 +18,8 @@ assert isinstance(data, CivitaiMetadata) if data.allow_commercial_use: print("Commercial use of this model is allowed") """ -from .fetch import CivitaiMetadataFetch, HuggingFaceMetadataFetch + +from .fetch import CivitaiMetadataFetch, HuggingFaceMetadataFetch, ModelMetadataFetchBase from .metadata_base import ( AnyModelRepoMetadata, AnyModelRepoMetadataValidator, @@ -31,7 +32,6 @@ from .metadata_base import ( RemoteModelFile, UnknownMetadataException, ) -from .metadata_store import ModelMetadataStore __all__ = [ "AnyModelRepoMetadata", @@ -42,7 +42,7 @@ __all__ = [ "HuggingFaceMetadata", "HuggingFaceMetadataFetch", "LicenseRestrictions", - "ModelMetadataStore", + "ModelMetadataFetchBase", "BaseMetadata", "ModelMetadataWithFiles", "RemoteModelFile", diff --git a/invokeai/backend/model_manager/metadata/fetch/civitai.py b/invokeai/backend/model_manager/metadata/fetch/civitai.py index 6e41d6f11b..393b7552ad 100644 --- a/invokeai/backend/model_manager/metadata/fetch/civitai.py +++ b/invokeai/backend/model_manager/metadata/fetch/civitai.py @@ -32,6 +32,8 @@ import requests from pydantic.networks import AnyHttpUrl from requests.sessions import Session +from invokeai.backend.model_manager import ModelRepoVariant + from ..metadata_base import ( AnyModelRepoMetadata, CivitaiMetadata, @@ -82,10 +84,13 @@ class CivitaiMetadataFetch(ModelMetadataFetchBase): return self.from_civitai_versionid(int(version_id)) raise UnknownMetadataException("The url '{url}' does not match any known Civitai URL patterns") - def from_id(self, id: str) -> AnyModelRepoMetadata: + def from_id(self, id: str, variant: Optional[ModelRepoVariant] = None) -> AnyModelRepoMetadata: """ Given a Civitai model version ID, return a ModelRepoMetadata object. + :param id: An ID. + :param variant: A model variant from the ModelRepoVariant enum (currently ignored) + May raise an `UnknownMetadataException`. """ return self.from_civitai_versionid(int(id)) @@ -155,7 +160,7 @@ class CivitaiMetadataFetch(ModelMetadataFetchBase): nsfw=model_json["nsfw"], restrictions=LicenseRestrictions( AllowNoCredit=model_json["allowNoCredit"], - AllowCommercialUse=CommercialUsage(model_json["allowCommercialUse"]), + AllowCommercialUse={CommercialUsage(x) for x in model_json["allowCommercialUse"]}, AllowDerivatives=model_json["allowDerivatives"], AllowDifferentLicense=model_json["allowDifferentLicense"], ), diff --git a/invokeai/backend/model_manager/metadata/fetch/fetch_base.py b/invokeai/backend/model_manager/metadata/fetch/fetch_base.py index 58b65b6947..5d75493b92 100644 --- a/invokeai/backend/model_manager/metadata/fetch/fetch_base.py +++ b/invokeai/backend/model_manager/metadata/fetch/fetch_base.py @@ -18,7 +18,9 @@ from typing import Optional from pydantic.networks import AnyHttpUrl from requests.sessions import Session -from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator +from invokeai.backend.model_manager import ModelRepoVariant + +from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator, BaseMetadata class ModelMetadataFetchBase(ABC): @@ -45,10 +47,13 @@ class ModelMetadataFetchBase(ABC): pass @abstractmethod - def from_id(self, id: str) -> AnyModelRepoMetadata: + def from_id(self, id: str, variant: Optional[ModelRepoVariant] = None) -> AnyModelRepoMetadata: """ Given an ID for a model, return a ModelMetadata object. + :param id: An ID. + :param variant: A model variant from the ModelRepoVariant enum. + This method will raise a `UnknownMetadataException` in the event that the requested model's metadata is not found at the provided id. """ @@ -57,5 +62,5 @@ class ModelMetadataFetchBase(ABC): @classmethod def from_json(cls, json: str) -> AnyModelRepoMetadata: """Given the JSON representation of the metadata, return the corresponding Pydantic object.""" - metadata = AnyModelRepoMetadataValidator.validate_json(json) + metadata: BaseMetadata = AnyModelRepoMetadataValidator.validate_json(json) # type: ignore return metadata diff --git a/invokeai/backend/model_manager/metadata/fetch/huggingface.py b/invokeai/backend/model_manager/metadata/fetch/huggingface.py index 5d1eb0cc9e..6f04e8713b 100644 --- a/invokeai/backend/model_manager/metadata/fetch/huggingface.py +++ b/invokeai/backend/model_manager/metadata/fetch/huggingface.py @@ -19,10 +19,12 @@ from typing import Optional import requests from huggingface_hub import HfApi, configure_http_backend, hf_hub_url -from huggingface_hub.utils._errors import RepositoryNotFoundError +from huggingface_hub.utils._errors import RepositoryNotFoundError, RevisionNotFoundError from pydantic.networks import AnyHttpUrl from requests.sessions import Session +from invokeai.backend.model_manager import ModelRepoVariant + from ..metadata_base import ( AnyModelRepoMetadata, HuggingFaceMetadata, @@ -53,12 +55,22 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase): metadata = HuggingFaceMetadata.model_validate_json(json) return metadata - def from_id(self, id: str) -> AnyModelRepoMetadata: + def from_id(self, id: str, variant: Optional[ModelRepoVariant] = None) -> AnyModelRepoMetadata: """Return a HuggingFaceMetadata object given the model's repo_id.""" - try: - model_info = HfApi().model_info(repo_id=id, files_metadata=True) - except RepositoryNotFoundError as excp: - raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp + # Little loop which tries fetching a revision corresponding to the selected variant. + # If not available, then set variant to None and get the default. + # If this too fails, raise exception. + model_info = None + while not model_info: + try: + model_info = HfApi().model_info(repo_id=id, files_metadata=True, revision=variant) + except RepositoryNotFoundError as excp: + raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp + except RevisionNotFoundError: + if variant is None: + raise + else: + variant = None _, name = id.split("/") return HuggingFaceMetadata( @@ -70,7 +82,7 @@ class HuggingFaceMetadataFetch(ModelMetadataFetchBase): tags=model_info.tags, files=[ RemoteModelFile( - url=hf_hub_url(id, x.rfilename), + url=hf_hub_url(id, x.rfilename, revision=variant), path=Path(name, x.rfilename), size=x.size, sha256=x.lfs.get("sha256") if x.lfs else None, diff --git a/invokeai/backend/model_manager/metadata/metadata_base.py b/invokeai/backend/model_manager/metadata/metadata_base.py index 5aa883d26d..379369f9f5 100644 --- a/invokeai/backend/model_manager/metadata/metadata_base.py +++ b/invokeai/backend/model_manager/metadata/metadata_base.py @@ -54,8 +54,8 @@ class LicenseRestrictions(BaseModel): AllowDifferentLicense: bool = Field( description="if true, derivatives of this model be redistributed under a different license", default=False ) - AllowCommercialUse: CommercialUsage = Field( - description="Type of commercial use allowed or 'No' if no commercial use is allowed.", default_factory=set + AllowCommercialUse: Optional[Set[CommercialUsage] | CommercialUsage] = Field( + description="Type of commercial use allowed if no commercial use is allowed.", default=None ) @@ -139,7 +139,13 @@ class CivitaiMetadata(ModelMetadataWithFiles): @property def allow_commercial_use(self) -> bool: """Return True if commercial use is allowed.""" - return self.restrictions.AllowCommercialUse != CommercialUsage("None") + if self.restrictions.AllowCommercialUse is None: + return False + else: + # accommodate schema change + acu = self.restrictions.AllowCommercialUse + commercial_usage = acu if isinstance(acu, set) else {acu} + return CommercialUsage.No not in commercial_usage @property def allow_derivatives(self) -> bool: @@ -184,7 +190,6 @@ class HuggingFaceMetadata(ModelMetadataWithFiles): [x.path for x in self.files], variant, subfolder ) # all files in the model prefix = f"{subfolder}/" if subfolder else "" - # the next step reads model_index.json to determine which subdirectories belong # to the model if Path(f"{prefix}model_index.json") in paths: diff --git a/invokeai/backend/model_manager/probe.py b/invokeai/backend/model_manager/probe.py index cd048d2fe7..11b8f46951 100644 --- a/invokeai/backend/model_manager/probe.py +++ b/invokeai/backend/model_manager/probe.py @@ -7,9 +7,7 @@ import safetensors.torch import torch from picklescan.scanner import scan_file_path -from invokeai.backend.model_management.models.base import read_checkpoint_meta -from invokeai.backend.model_management.models.ip_adapter import IPAdapterModelFormat -from invokeai.backend.model_management.util import lora_token_vector_length +import invokeai.backend.util.logging as logger from invokeai.backend.util.util import SilenceWarnings from .config import ( @@ -18,18 +16,24 @@ from .config import ( InvalidModelConfigException, ModelConfigFactory, ModelFormat, + ModelRepoVariant, ModelType, ModelVariantType, SchedulerPredictionType, ) from .hash import FastModelHash +from .util.model_util import lora_token_vector_length, read_checkpoint_meta CkptType = Dict[str, Any] LEGACY_CONFIGS: Dict[BaseModelType, Dict[ModelVariantType, Union[str, Dict[SchedulerPredictionType, str]]]] = { BaseModelType.StableDiffusion1: { - ModelVariantType.Normal: "v1-inference.yaml", + ModelVariantType.Normal: { + SchedulerPredictionType.Epsilon: "v1-inference.yaml", + SchedulerPredictionType.VPrediction: "v1-inference-v.yaml", + }, ModelVariantType.Inpaint: "v1-inpainting-inference.yaml", + ModelVariantType.Depth: "v2-midas-inference.yaml", }, BaseModelType.StableDiffusion2: { ModelVariantType.Normal: { @@ -72,6 +76,10 @@ class ProbeBase(object): """Get model scheduler prediction type.""" return None + def get_image_encoder_model_id(self) -> Optional[str]: + """Get image encoder (IP adapters only).""" + return None + class ModelProbe(object): PROBES: Dict[str, Dict[ModelType, type[ProbeBase]]] = { @@ -147,6 +155,7 @@ class ModelProbe(object): fields["base"] = fields.get("base") or probe.get_base_type() fields["variant"] = fields.get("variant") or probe.get_variant_type() fields["prediction_type"] = fields.get("prediction_type") or probe.get_scheduler_prediction_type() + fields["image_encoder_model_id"] = fields.get("image_encoder_model_id") or probe.get_image_encoder_model_id() fields["name"] = fields.get("name") or cls.get_model_name(model_path) fields["description"] = ( fields.get("description") or f"{fields['base'].value} {fields['type'].value} model {fields['name']}" @@ -155,6 +164,9 @@ class ModelProbe(object): fields["original_hash"] = fields.get("original_hash") or hash fields["current_hash"] = fields.get("current_hash") or hash + if format_type == ModelFormat.Diffusers and hasattr(probe, "get_repo_variant"): + fields["repo_variant"] = fields.get("repo_variant") or probe.get_repo_variant() + # additional fields needed for main and controlnet models if fields["type"] in [ModelType.Main, ModelType.ControlNet] and fields["format"] == ModelFormat.Checkpoint: fields["config"] = cls._get_checkpoint_config_path( @@ -176,7 +188,7 @@ class ModelProbe(object): and fields["prediction_type"] == SchedulerPredictionType.VPrediction ) - model_info = ModelConfigFactory.make_config(fields) + model_info = ModelConfigFactory.make_config(fields) # , key=fields.get("key", None)) return model_info @classmethod @@ -477,6 +489,21 @@ class FolderProbeBase(ProbeBase): def get_format(self) -> ModelFormat: return ModelFormat("diffusers") + def get_repo_variant(self) -> ModelRepoVariant: + # get all files ending in .bin or .safetensors + weight_files = list(self.model_path.glob("**/*.safetensors")) + weight_files.extend(list(self.model_path.glob("**/*.bin"))) + for x in weight_files: + if ".fp16" in x.suffixes: + return ModelRepoVariant.FP16 + if "openvino_model" in x.name: + return ModelRepoVariant.OPENVINO + if "flax_model" in x.name: + return ModelRepoVariant.FLAX + if x.suffix == ".onnx": + return ModelRepoVariant.ONNX + return ModelRepoVariant.DEFAULT + class PipelineFolderProbe(FolderProbeBase): def get_base_type(self) -> BaseModelType: @@ -567,13 +594,20 @@ class TextualInversionFolderProbe(FolderProbeBase): return TextualInversionCheckpointProbe(path).get_base_type() -class ONNXFolderProbe(FolderProbeBase): +class ONNXFolderProbe(PipelineFolderProbe): + def get_base_type(self) -> BaseModelType: + # Due to the way the installer is set up, the configuration file for safetensors + # will come along for the ride if both the onnx and safetensors forms + # share the same directory. We take advantage of this here. + if (self.model_path / "unet" / "config.json").exists(): + return super().get_base_type() + else: + logger.warning('Base type probing is not implemented for ONNX models. Assuming "sd-1"') + return BaseModelType.StableDiffusion1 + def get_format(self) -> ModelFormat: return ModelFormat("onnx") - def get_base_type(self) -> BaseModelType: - return BaseModelType.StableDiffusion1 - def get_variant_type(self) -> ModelVariantType: return ModelVariantType.Normal @@ -617,8 +651,8 @@ class LoRAFolderProbe(FolderProbeBase): class IPAdapterFolderProbe(FolderProbeBase): - def get_format(self) -> IPAdapterModelFormat: - return IPAdapterModelFormat.InvokeAI.value + def get_format(self) -> ModelFormat: + return ModelFormat.InvokeAI def get_base_type(self) -> BaseModelType: model_file = self.model_path / "ip_adapter.bin" @@ -638,6 +672,14 @@ class IPAdapterFolderProbe(FolderProbeBase): f"IP-Adapter had unexpected cross-attention dimension: {cross_attention_dim}." ) + def get_image_encoder_model_id(self) -> Optional[str]: + encoder_id_path = self.model_path / "image_encoder.txt" + if not encoder_id_path.exists(): + return None + with open(encoder_id_path, "r") as f: + image_encoder_model = f.readline().strip() + return image_encoder_model + class CLIPVisionFolderProbe(FolderProbeBase): def get_base_type(self) -> BaseModelType: diff --git a/invokeai/backend/model_manager/search.py b/invokeai/backend/model_manager/search.py index 4cc3caebe4..7e89c394b1 100644 --- a/invokeai/backend/model_manager/search.py +++ b/invokeai/backend/model_manager/search.py @@ -22,14 +22,16 @@ Example usage: import os from abc import ABC, abstractmethod +from logging import Logger from pathlib import Path from typing import Callable, Optional, Set, Union from pydantic import BaseModel, Field +from invokeai.app.services.config import InvokeAIAppConfig from invokeai.backend.util.logging import InvokeAILogger -default_logger = InvokeAILogger.get_logger() +default_logger: Logger = InvokeAILogger.get_logger() class SearchStats(BaseModel): @@ -56,7 +58,7 @@ class ModelSearchBase(ABC, BaseModel): on_model_found : Optional[Callable[[Path], bool]] = Field(default=None, description="Called when a model is found.") # noqa E221 on_search_completed : Optional[Callable[[Set[Path]], None]] = Field(default=None, description="Called when search is complete.") # noqa E221 stats : SearchStats = Field(default_factory=SearchStats, description="Summary statistics after search") # noqa E221 - logger : InvokeAILogger = Field(default=default_logger, description="Logger instance.") # noqa E221 + logger : Logger = Field(default=default_logger, description="Logger instance.") # noqa E221 # fmt: on class Config: @@ -115,76 +117,73 @@ class ModelSearch(ModelSearchBase): # returns all models that have 'anime' in the path """ - models_found: Set[Path] = Field(default=None) - scanned_dirs: Set[Path] = Field(default=None) - pruned_paths: Set[Path] = Field(default=None) + models_found: Set[Path] = Field(default_factory=set) + config: InvokeAIAppConfig = InvokeAIAppConfig.get_config() def search_started(self) -> None: self.models_found = set() - self.scanned_dirs = set() - self.pruned_paths = set() if self.on_search_started: self.on_search_started(self._directory) def model_found(self, model: Path) -> None: self.stats.models_found += 1 - if not self.on_model_found or self.on_model_found(model): + if self.on_model_found is None or self.on_model_found(model): self.stats.models_filtered += 1 self.models_found.add(model) def search_completed(self) -> None: - if self.on_search_completed: - self.on_search_completed(self._models_found) + if self.on_search_completed is not None: + self.on_search_completed(self.models_found) def search(self, directory: Union[Path, str]) -> Set[Path]: self._directory = Path(directory) + if not self._directory.is_absolute(): + self._directory = self.config.models_path / self._directory self.stats = SearchStats() # zero out self.search_started() # This will initialize _models_found to empty - self._walk_directory(directory) + self._walk_directory(self._directory) self.search_completed() return self.models_found - def _walk_directory(self, path: Union[Path, str]) -> None: - for root, dirs, files in os.walk(path, followlinks=True): - # don't descend into directories that start with a "." - # to avoid the Mac .DS_STORE issue. - if str(Path(root).name).startswith("."): - self.pruned_paths.add(Path(root)) - if any(Path(root).is_relative_to(x) for x in self.pruned_paths): - continue + def _walk_directory(self, path: Union[Path, str], max_depth: int = 20) -> None: + absolute_path = Path(path) + if ( + len(absolute_path.parts) - len(self._directory.parts) > max_depth + or not absolute_path.exists() + or absolute_path.parent in self.models_found + ): + return + entries = os.scandir(absolute_path.as_posix()) + entries = [entry for entry in entries if not entry.name.startswith(".")] + dirs = [entry for entry in entries if entry.is_dir()] + file_names = [entry.name for entry in entries if entry.is_file()] + if any( + x in file_names + for x in [ + "config.json", + "model_index.json", + "learned_embeds.bin", + "pytorch_lora_weights.bin", + "image_encoder.txt", + ] + ): + try: + self.model_found(absolute_path) + return + except KeyboardInterrupt: + raise + except Exception as e: + self.logger.warning(str(e)) + return - self.stats.items_scanned += len(dirs) + len(files) - for d in dirs: - path = Path(root) / d - if path.parent in self.scanned_dirs: - self.scanned_dirs.add(path) - continue - if any( - (path / x).exists() - for x in [ - "config.json", - "model_index.json", - "learned_embeds.bin", - "pytorch_lora_weights.bin", - "image_encoder.txt", - ] - ): - self.scanned_dirs.add(path) - try: - self.model_found(path) - except KeyboardInterrupt: - raise - except Exception as e: - self.logger.warning(str(e)) + for n in file_names: + if n.endswith((".ckpt", ".bin", ".pth", ".safetensors", ".pt")): + try: + self.model_found(absolute_path / n) + except KeyboardInterrupt: + raise + except Exception as e: + self.logger.warning(str(e)) - for f in files: - path = Path(root) / f - if path.parent in self.scanned_dirs: - continue - if path.suffix in {".ckpt", ".bin", ".pth", ".safetensors", ".pt"}: - try: - self.model_found(path) - except KeyboardInterrupt: - raise - except Exception as e: - self.logger.warning(str(e)) + for d in dirs: + self._walk_directory(absolute_path / d) diff --git a/invokeai/backend/model_manager/util/libc_util.py b/invokeai/backend/model_manager/util/libc_util.py new file mode 100644 index 0000000000..ef1ac2f8a4 --- /dev/null +++ b/invokeai/backend/model_manager/util/libc_util.py @@ -0,0 +1,76 @@ +import ctypes + + +class Struct_mallinfo2(ctypes.Structure): + """A ctypes Structure that matches the libc mallinfo2 struct. + + Docs: + - https://man7.org/linux/man-pages/man3/mallinfo.3.html + - https://www.gnu.org/software/libc/manual/html_node/Statistics-of-Malloc.html + + struct mallinfo2 { + size_t arena; /* Non-mmapped space allocated (bytes) */ + size_t ordblks; /* Number of free chunks */ + size_t smblks; /* Number of free fastbin blocks */ + size_t hblks; /* Number of mmapped regions */ + size_t hblkhd; /* Space allocated in mmapped regions (bytes) */ + size_t usmblks; /* See below */ + size_t fsmblks; /* Space in freed fastbin blocks (bytes) */ + size_t uordblks; /* Total allocated space (bytes) */ + size_t fordblks; /* Total free space (bytes) */ + size_t keepcost; /* Top-most, releasable space (bytes) */ + }; + """ + + _fields_ = [ + ("arena", ctypes.c_size_t), + ("ordblks", ctypes.c_size_t), + ("smblks", ctypes.c_size_t), + ("hblks", ctypes.c_size_t), + ("hblkhd", ctypes.c_size_t), + ("usmblks", ctypes.c_size_t), + ("fsmblks", ctypes.c_size_t), + ("uordblks", ctypes.c_size_t), + ("fordblks", ctypes.c_size_t), + ("keepcost", ctypes.c_size_t), + ] + + def __str__(self) -> str: + s = "" + s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n" + s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n" + s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n" + s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n" + s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n" + s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n" + s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n" + s += ( + f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)" + " (GB)\n" + ) + s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n" + s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n" + return s + + +class LibcUtil: + """A utility class for interacting with the C Standard Library (`libc`) via ctypes. + + Note that this class will raise on __init__() if 'libc.so.6' can't be found. Take care to handle environments where + this shared library is not available. + + TODO: Improve cross-OS compatibility of this class. + """ + + def __init__(self) -> None: + self._libc = ctypes.cdll.LoadLibrary("libc.so.6") + + def mallinfo2(self) -> Struct_mallinfo2: + """Calls `libc` `mallinfo2`. + + Docs: https://man7.org/linux/man-pages/man3/mallinfo.3.html + """ + mallinfo2 = self._libc.mallinfo2 + mallinfo2.restype = Struct_mallinfo2 + result: Struct_mallinfo2 = mallinfo2() + return result diff --git a/invokeai/backend/model_management/util.py b/invokeai/backend/model_manager/util/model_util.py similarity index 56% rename from invokeai/backend/model_management/util.py rename to invokeai/backend/model_manager/util/model_util.py index f4737d9f0b..2e448520e5 100644 --- a/invokeai/backend/model_management/util.py +++ b/invokeai/backend/model_manager/util/model_util.py @@ -1,15 +1,71 @@ -# Copyright (c) 2023 The InvokeAI Development Team -"""Utilities used by the Model Manager""" +"""Utilities for parsing model files, used mostly by probe.py""" + +import json +from pathlib import Path +from typing import Dict, Optional, Union + +import safetensors +import torch +from picklescan.scanner import scan_file_path -def lora_token_vector_length(checkpoint: dict) -> int: +def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]: + checkpoint = {} + device = torch.device("meta") + with open(path, "rb") as f: + definition_len = int.from_bytes(f.read(8), "little") + definition_json = f.read(definition_len) + definition = json.loads(definition_json) + + if "__metadata__" in definition and definition["__metadata__"].get("format", "pt") not in { + "pt", + "torch", + "pytorch", + }: + raise Exception("Supported only pytorch safetensors files") + definition.pop("__metadata__", None) + + for key, info in definition.items(): + dtype = { + "I8": torch.int8, + "I16": torch.int16, + "I32": torch.int32, + "I64": torch.int64, + "F16": torch.float16, + "F32": torch.float32, + "F64": torch.float64, + }[info["dtype"]] + + checkpoint[key] = torch.empty(info["shape"], dtype=dtype, device=device) + + return checkpoint + + +def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]: + if str(path).endswith(".safetensors"): + try: + path_str = path.as_posix() if isinstance(path, Path) else path + checkpoint = _fast_safetensors_reader(path_str) + except Exception: + # TODO: create issue for support "meta"? + checkpoint = safetensors.torch.load_file(path, device="cpu") + else: + if scan: + scan_result = scan_file_path(path) + if scan_result.infected_files != 0: + raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.') + checkpoint = torch.load(path, map_location=torch.device("meta")) + return checkpoint + + +def lora_token_vector_length(checkpoint: Dict[str, torch.Tensor]) -> Optional[int]: """ Given a checkpoint in memory, return the lora token vector length :param checkpoint: The checkpoint """ - def _get_shape_1(key: str, tensor, checkpoint) -> int: + def _get_shape_1(key: str, tensor: torch.Tensor, checkpoint: Dict[str, torch.Tensor]) -> Optional[int]: lora_token_vector_length = None if "." not in key: diff --git a/invokeai/backend/model_manager/util/select_hf_files.py b/invokeai/backend/model_manager/util/select_hf_files.py index 6976059044..2fd7a3721a 100644 --- a/invokeai/backend/model_manager/util/select_hf_files.py +++ b/invokeai/backend/model_manager/util/select_hf_files.py @@ -36,23 +36,37 @@ def filter_files( """ variant = variant or ModelRepoVariant.DEFAULT paths: List[Path] = [] + root = files[0].parts[0] + + # if the subfolder is a single file, then bypass the selection and just return it + if subfolder and subfolder.suffix in [".safetensors", ".bin", ".onnx", ".xml", ".pth", ".pt", ".ckpt", ".msgpack"]: + return [root / subfolder] # Start by filtering on model file extensions, discarding images, docs, etc for file in files: if file.name.endswith((".json", ".txt")): paths.append(file) - elif file.name.endswith(("learned_embeds.bin", "ip_adapter.bin", "lora_weights.safetensors")): + elif file.name.endswith( + ( + "learned_embeds.bin", + "ip_adapter.bin", + "lora_weights.safetensors", + "weights.pb", + "onnx_data", + ) + ): paths.append(file) # BRITTLENESS WARNING!! # Diffusers models always seem to have "model" in their name, and the regex filter below is applied to avoid # downloading random checkpoints that might also be in the repo. However there is no guarantee # that a checkpoint doesn't contain "model" in its name, and no guarantee that future diffusers models - # will adhere to this naming convention, so this is an area of brittleness. + # will adhere to this naming convention, so this is an area to be careful of. elif re.search(r"model(\.[^.]+)?\.(safetensors|bin|onnx|xml|pth|pt|ckpt|msgpack)$", file.name): paths.append(file) # limit search to subfolder if requested if subfolder: + subfolder = root / subfolder paths = [x for x in paths if x.parent == Path(subfolder)] # _filter_by_variant uniquifies the paths and returns a set @@ -64,7 +78,7 @@ def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path result = set() basenames: Dict[Path, Path] = {} for path in files: - if path.suffix == ".onnx": + if path.suffix in [".onnx", ".pb", ".onnx_data"]: if variant == ModelRepoVariant.ONNX: result.add(path) diff --git a/invokeai/backend/model_management/lora.py b/invokeai/backend/model_patcher.py similarity index 79% rename from invokeai/backend/model_management/lora.py rename to invokeai/backend/model_patcher.py index d72f55794d..76271fc025 100644 --- a/invokeai/backend/model_management/lora.py +++ b/invokeai/backend/model_patcher.py @@ -1,21 +1,24 @@ +# Copyright (c) 2024 Ryan Dick, Lincoln D. Stein, and the InvokeAI Development Team +"""These classes implement model patching with LoRAs and Textual Inversions.""" + from __future__ import annotations import pickle from contextlib import contextmanager -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union import numpy as np import torch -from compel.embeddings_provider import BaseTextualInversionManager -from diffusers.models import UNet2DConditionModel -from safetensors.torch import load_file -from transformers import CLIPTextModel, CLIPTokenizer +from diffusers import OnnxRuntimeModel, UNet2DConditionModel +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from invokeai.app.shared.models import FreeUConfig -from invokeai.backend.model_management.model_load_optimizations import skip_torch_weight_init +from invokeai.backend.model_manager import AnyModel +from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init +from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel -from .models.lora import LoRAModel +from .lora import LoRAModelRaw +from .textual_inversion import TextualInversionManager, TextualInversionModelRaw """ loras = [ @@ -62,8 +65,8 @@ class ModelPatcher: def apply_lora_unet( cls, unet: UNet2DConditionModel, - loras: List[Tuple[LoRAModel, float]], - ): + loras: Iterator[Tuple[LoRAModelRaw, float]], + ) -> None: with cls.apply_lora(unet, loras, "lora_unet_"): yield @@ -72,8 +75,8 @@ class ModelPatcher: def apply_lora_text_encoder( cls, text_encoder: CLIPTextModel, - loras: List[Tuple[LoRAModel, float]], - ): + loras: Iterator[Tuple[LoRAModelRaw, float]], + ) -> None: with cls.apply_lora(text_encoder, loras, "lora_te_"): yield @@ -82,8 +85,8 @@ class ModelPatcher: def apply_sdxl_lora_text_encoder( cls, text_encoder: CLIPTextModel, - loras: List[Tuple[LoRAModel, float]], - ): + loras: List[Tuple[LoRAModelRaw, float]], + ) -> None: with cls.apply_lora(text_encoder, loras, "lora_te1_"): yield @@ -92,8 +95,8 @@ class ModelPatcher: def apply_sdxl_lora_text_encoder2( cls, text_encoder: CLIPTextModel, - loras: List[Tuple[LoRAModel, float]], - ): + loras: List[Tuple[LoRAModelRaw, float]], + ) -> None: with cls.apply_lora(text_encoder, loras, "lora_te2_"): yield @@ -101,10 +104,10 @@ class ModelPatcher: @contextmanager def apply_lora( cls, - model: torch.nn.Module, - loras: List[Tuple[LoRAModel, float]], + model: AnyModel, + loras: Iterator[Tuple[LoRAModelRaw, float]], prefix: str, - ): + ) -> None: original_weights = {} try: with torch.no_grad(): @@ -121,6 +124,7 @@ class ModelPatcher: # 2. From an API perspective, there's no reason that the `ModelPatcher` should be aware of the # intricacies of Stable Diffusion key resolution. It should just expect the input LoRA # weights to have valid keys. + assert isinstance(model, torch.nn.Module) module_key, module = cls._resolve_lora_key(model, layer_key, prefix) # All of the LoRA weight calculations will be done on the same device as the module weight. @@ -141,17 +145,21 @@ class ModelPatcher: # TODO(ryand): Using torch.autocast(...) over explicit casting may offer a speed benefit on CUDA # devices here. Experimentally, it was found to be very slow on CPU. More investigation needed. layer_weight = layer.get_weight(module.weight) * (lora_weight * layer_scale) - layer.to(device="cpu") + layer.to(device=torch.device("cpu")) + assert isinstance(layer_weight, torch.Tensor) # mypy thinks layer_weight is a float|Any ??! if module.weight.shape != layer_weight.shape: # TODO: debug on lycoris + assert hasattr(layer_weight, "reshape") layer_weight = layer_weight.reshape(module.weight.shape) + assert isinstance(layer_weight, torch.Tensor) # mypy thinks layer_weight is a float|Any ??! module.weight += layer_weight.to(dtype=dtype) yield # wait for context manager exit finally: + assert hasattr(model, "get_submodule") # mypy not picking up fact that torch.nn.Module has get_submodule() with torch.no_grad(): for module_key, weight in original_weights.items(): model.get_submodule(module_key).weight.copy_(weight) @@ -161,9 +169,9 @@ class ModelPatcher: def apply_ti( cls, tokenizer: CLIPTokenizer, - text_encoder: CLIPTextModel, - ti_list: List[Tuple[str, Any]], - ) -> Tuple[CLIPTokenizer, TextualInversionManager]: + text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection], + ti_list: List[Tuple[str, TextualInversionModelRaw]], + ) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]: init_tokens_count = None new_tokens_added = None @@ -187,13 +195,13 @@ class ModelPatcher: ti_manager = TextualInversionManager(ti_tokenizer) init_tokens_count = text_encoder.resize_token_embeddings(None, pad_to_multiple_of).num_embeddings - def _get_trigger(ti_name, index): + def _get_trigger(ti_name: str, index: int) -> str: trigger = ti_name if index > 0: trigger += f"-!pad-{i}" return f"<{trigger}>" - def _get_ti_embedding(model_embeddings, ti): + def _get_ti_embedding(model_embeddings: torch.nn.Module, ti: TextualInversionModelRaw) -> torch.Tensor: # for SDXL models, select the embedding that matches the text encoder's dimensions if ti.embedding_2 is not None: return ( @@ -221,6 +229,7 @@ class ModelPatcher: model_embeddings = text_encoder.get_input_embeddings() for ti_name, ti in ti_list: + assert isinstance(ti, TextualInversionModelRaw) ti_embedding = _get_ti_embedding(text_encoder.get_input_embeddings(), ti) ti_tokens = [] @@ -257,9 +266,9 @@ class ModelPatcher: @contextmanager def apply_clip_skip( cls, - text_encoder: CLIPTextModel, + text_encoder: Union[CLIPTextModel, CLIPTextModelWithProjection], clip_skip: int, - ): + ) -> None: skipped_layers = [] try: for _i in range(clip_skip): @@ -277,9 +286,10 @@ class ModelPatcher: cls, unet: UNet2DConditionModel, freeu_config: Optional[FreeUConfig] = None, - ): + ) -> None: did_apply_freeu = False try: + assert hasattr(unet, "enable_freeu") # mypy doesn't pick up this attribute? if freeu_config is not None: unet.enable_freeu(b1=freeu_config.b1, b2=freeu_config.b2, s1=freeu_config.s1, s2=freeu_config.s2) did_apply_freeu = True @@ -287,109 +297,19 @@ class ModelPatcher: yield finally: + assert hasattr(unet, "disable_freeu") # mypy doesn't pick up this attribute? if did_apply_freeu: unet.disable_freeu() -class TextualInversionModel: - embedding: torch.Tensor # [n, 768]|[n, 1280] - embedding_2: Optional[torch.Tensor] = None # [n, 768]|[n, 1280] - for SDXL models - - @classmethod - def from_checkpoint( - cls, - file_path: Union[str, Path], - device: Optional[torch.device] = None, - dtype: Optional[torch.dtype] = None, - ): - if not isinstance(file_path, Path): - file_path = Path(file_path) - - result = cls() # TODO: - - if file_path.suffix == ".safetensors": - state_dict = load_file(file_path.absolute().as_posix(), device="cpu") - else: - state_dict = torch.load(file_path, map_location="cpu") - - # both v1 and v2 format embeddings - # difference mostly in metadata - if "string_to_param" in state_dict: - if len(state_dict["string_to_param"]) > 1: - print( - f'Warn: Embedding "{file_path.name}" contains multiple tokens, which is not supported. The first', - " token will be used.", - ) - - result.embedding = next(iter(state_dict["string_to_param"].values())) - - # v3 (easynegative) - elif "emb_params" in state_dict: - result.embedding = state_dict["emb_params"] - - # v5(sdxl safetensors file) - elif "clip_g" in state_dict and "clip_l" in state_dict: - result.embedding = state_dict["clip_g"] - result.embedding_2 = state_dict["clip_l"] - - # v4(diffusers bin files) - else: - result.embedding = next(iter(state_dict.values())) - - if len(result.embedding.shape) == 1: - result.embedding = result.embedding.unsqueeze(0) - - if not isinstance(result.embedding, torch.Tensor): - raise ValueError(f"Invalid embeddings file: {file_path.name}") - - return result - - -class TextualInversionManager(BaseTextualInversionManager): - pad_tokens: Dict[int, List[int]] - tokenizer: CLIPTokenizer - - def __init__(self, tokenizer: CLIPTokenizer): - self.pad_tokens = {} - self.tokenizer = tokenizer - - def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]: - if len(self.pad_tokens) == 0: - return token_ids - - if token_ids[0] == self.tokenizer.bos_token_id: - raise ValueError("token_ids must not start with bos_token_id") - if token_ids[-1] == self.tokenizer.eos_token_id: - raise ValueError("token_ids must not end with eos_token_id") - - new_token_ids = [] - for token_id in token_ids: - new_token_ids.append(token_id) - if token_id in self.pad_tokens: - new_token_ids.extend(self.pad_tokens[token_id]) - - # Do not exceed the max model input size - # The -2 here is compensating for compensate compel.embeddings_provider.get_token_ids(), - # which first removes and then adds back the start and end tokens. - max_length = list(self.tokenizer.max_model_input_sizes.values())[0] - 2 - if len(new_token_ids) > max_length: - new_token_ids = new_token_ids[0:max_length] - - return new_token_ids - - class ONNXModelPatcher: - from diffusers import OnnxRuntimeModel - - from .models.base import IAIOnnxRuntimeModel - @classmethod @contextmanager def apply_lora_unet( cls, unet: OnnxRuntimeModel, - loras: List[Tuple[LoRAModel, float]], - ): + loras: Iterator[Tuple[LoRAModelRaw, float]], + ) -> None: with cls.apply_lora(unet, loras, "lora_unet_"): yield @@ -398,8 +318,8 @@ class ONNXModelPatcher: def apply_lora_text_encoder( cls, text_encoder: OnnxRuntimeModel, - loras: List[Tuple[LoRAModel, float]], - ): + loras: List[Tuple[LoRAModelRaw, float]], + ) -> None: with cls.apply_lora(text_encoder, loras, "lora_te_"): yield @@ -410,9 +330,9 @@ class ONNXModelPatcher: def apply_lora( cls, model: IAIOnnxRuntimeModel, - loras: List[Tuple[LoRAModel, float]], + loras: List[Tuple[LoRAModelRaw, float]], prefix: str, - ): + ) -> None: from .models.base import IAIOnnxRuntimeModel if not isinstance(model, IAIOnnxRuntimeModel): @@ -421,7 +341,7 @@ class ONNXModelPatcher: orig_weights = {} try: - blended_loras = {} + blended_loras: Dict[str, torch.Tensor] = {} for lora, lora_weight in loras: for layer_key, layer in lora.layers.items(): @@ -432,7 +352,7 @@ class ONNXModelPatcher: layer_key = layer_key.replace(prefix, "") # TODO: rewrite to pass original tensor weight(required by ia3) layer_weight = layer.get_weight(None).detach().cpu().numpy() * lora_weight - if layer_key is blended_loras: + if layer_key in blended_loras: blended_loras[layer_key] += layer_weight else: blended_loras[layer_key] = layer_weight @@ -499,7 +419,7 @@ class ONNXModelPatcher: tokenizer: CLIPTokenizer, text_encoder: IAIOnnxRuntimeModel, ti_list: List[Tuple[str, Any]], - ) -> Tuple[CLIPTokenizer, TextualInversionManager]: + ) -> Iterator[Tuple[CLIPTokenizer, TextualInversionManager]]: from .models.base import IAIOnnxRuntimeModel if not isinstance(text_encoder, IAIOnnxRuntimeModel): @@ -517,7 +437,7 @@ class ONNXModelPatcher: ti_tokenizer = pickle.loads(pickle.dumps(tokenizer)) ti_manager = TextualInversionManager(ti_tokenizer) - def _get_trigger(ti_name, index): + def _get_trigger(ti_name: str, index: int) -> str: trigger = ti_name if index > 0: trigger += f"-!pad-{i}" diff --git a/invokeai/backend/onnx/onnx_runtime.py b/invokeai/backend/onnx/onnx_runtime.py new file mode 100644 index 0000000000..8916865dd5 --- /dev/null +++ b/invokeai/backend/onnx/onnx_runtime.py @@ -0,0 +1,218 @@ +# Copyright (c) 2024 The InvokeAI Development Team +import os +import sys +from pathlib import Path +from typing import Any, List, Optional, Tuple, Union + +import numpy as np +import onnx +from onnx import numpy_helper +from onnxruntime import InferenceSession, SessionOptions, get_available_providers + +from ..raw_model import RawModel + +ONNX_WEIGHTS_NAME = "model.onnx" + + +# NOTE FROM LS: This was copied from Stalker's original implementation. +# I have not yet gone through and fixed all the type hints +class IAIOnnxRuntimeModel(RawModel): + class _tensor_access: + def __init__(self, model): # type: ignore + self.model = model + self.indexes = {} + for idx, obj in enumerate(self.model.proto.graph.initializer): + self.indexes[obj.name] = idx + + def __getitem__(self, key: str): # type: ignore + value = self.model.proto.graph.initializer[self.indexes[key]] + return numpy_helper.to_array(value) + + def __setitem__(self, key: str, value: np.ndarray): # type: ignore + new_node = numpy_helper.from_array(value) + # set_external_data(new_node, location="in-memory-location") + new_node.name = key + # new_node.ClearField("raw_data") + del self.model.proto.graph.initializer[self.indexes[key]] + self.model.proto.graph.initializer.insert(self.indexes[key], new_node) + # self.model.data[key] = OrtValue.ortvalue_from_numpy(value) + + # __delitem__ + + def __contains__(self, key: str) -> bool: + return self.indexes[key] in self.model.proto.graph.initializer + + def items(self) -> List[Tuple[str, Any]]: # fixme + raise NotImplementedError("tensor.items") + # return [(obj.name, obj) for obj in self.raw_proto] + + def keys(self) -> List[str]: + return list(self.indexes.keys()) + + def values(self) -> List[Any]: # fixme + raise NotImplementedError("tensor.values") + # return [obj for obj in self.raw_proto] + + def size(self) -> int: + bytesSum = 0 + for node in self.model.proto.graph.initializer: + bytesSum += sys.getsizeof(node.raw_data) + return bytesSum + + class _access_helper: + def __init__(self, raw_proto): # type: ignore + self.indexes = {} + self.raw_proto = raw_proto + for idx, obj in enumerate(raw_proto): + self.indexes[obj.name] = idx + + def __getitem__(self, key: str): # type: ignore + return self.raw_proto[self.indexes[key]] + + def __setitem__(self, key: str, value): # type: ignore + index = self.indexes[key] + del self.raw_proto[index] + self.raw_proto.insert(index, value) + + # __delitem__ + + def __contains__(self, key: str) -> bool: + return key in self.indexes + + def items(self) -> List[Tuple[str, Any]]: + return [(obj.name, obj) for obj in self.raw_proto] + + def keys(self) -> List[str]: + return list(self.indexes.keys()) + + def values(self) -> List[Any]: # fixme + return list(self.raw_proto) + + def __init__(self, model_path: str, provider: Optional[str]): + self.path = model_path + self.session = None + self.provider = provider + """ + self.data_path = self.path + "_data" + if not os.path.exists(self.data_path): + print(f"Moving model tensors to separate file: {self.data_path}") + tmp_proto = onnx.load(model_path, load_external_data=True) + onnx.save_model(tmp_proto, self.path, save_as_external_data=True, all_tensors_to_one_file=True, location=os.path.basename(self.data_path), size_threshold=1024, convert_attribute=False) + del tmp_proto + gc.collect() + + self.proto = onnx.load(model_path, load_external_data=False) + """ + + self.proto = onnx.load(model_path, load_external_data=True) + # self.data = dict() + # for tensor in self.proto.graph.initializer: + # name = tensor.name + + # if tensor.HasField("raw_data"): + # npt = numpy_helper.to_array(tensor) + # orv = OrtValue.ortvalue_from_numpy(npt) + # # self.data[name] = orv + # # set_external_data(tensor, location="in-memory-location") + # tensor.name = name + # # tensor.ClearField("raw_data") + + self.nodes = self._access_helper(self.proto.graph.node) # type: ignore + # self.initializers = self._access_helper(self.proto.graph.initializer) + # print(self.proto.graph.input) + # print(self.proto.graph.initializer) + + self.tensors = self._tensor_access(self) # type: ignore + + # TODO: integrate with model manager/cache + def create_session(self, height=None, width=None): + if self.session is None or self.session_width != width or self.session_height != height: + # onnx.save(self.proto, "tmp.onnx") + # onnx.save_model(self.proto, "tmp.onnx", save_as_external_data=True, all_tensors_to_one_file=True, location="tmp.onnx_data", size_threshold=1024, convert_attribute=False) + # TODO: something to be able to get weight when they already moved outside of model proto + # (trimmed_model, external_data) = buffer_external_data_tensors(self.proto) + sess = SessionOptions() + # self._external_data.update(**external_data) + # sess.add_external_initializers(list(self.data.keys()), list(self.data.values())) + # sess.enable_profiling = True + + # sess.intra_op_num_threads = 1 + # sess.inter_op_num_threads = 1 + # sess.execution_mode = ExecutionMode.ORT_SEQUENTIAL + # sess.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL + # sess.enable_cpu_mem_arena = True + # sess.enable_mem_pattern = True + # sess.add_session_config_entry("session.intra_op.use_xnnpack_threadpool", "1") ########### It's the key code + self.session_height = height + self.session_width = width + if height and width: + sess.add_free_dimension_override_by_name("unet_sample_batch", 2) + sess.add_free_dimension_override_by_name("unet_sample_channels", 4) + sess.add_free_dimension_override_by_name("unet_hidden_batch", 2) + sess.add_free_dimension_override_by_name("unet_hidden_sequence", 77) + sess.add_free_dimension_override_by_name("unet_sample_height", self.session_height) + sess.add_free_dimension_override_by_name("unet_sample_width", self.session_width) + sess.add_free_dimension_override_by_name("unet_time_batch", 1) + providers = [] + if self.provider: + providers.append(self.provider) + else: + providers = get_available_providers() + if "TensorrtExecutionProvider" in providers: + providers.remove("TensorrtExecutionProvider") + try: + self.session = InferenceSession(self.proto.SerializeToString(), providers=providers, sess_options=sess) + except Exception as e: + raise e + # self.session = InferenceSession("tmp.onnx", providers=[self.provider], sess_options=self.sess_options) + # self.io_binding = self.session.io_binding() + + def release_session(self): + self.session = None + import gc + + gc.collect() + return + + def __call__(self, **kwargs): + if self.session is None: + raise Exception("You should call create_session before running model") + + inputs = {k: np.array(v) for k, v in kwargs.items()} + # output_names = self.session.get_outputs() + # for k in inputs: + # self.io_binding.bind_cpu_input(k, inputs[k]) + # for name in output_names: + # self.io_binding.bind_output(name.name) + # self.session.run_with_iobinding(self.io_binding, None) + # return self.io_binding.copy_outputs_to_cpu() + return self.session.run(None, inputs) + + # compatability with diffusers load code + @classmethod + def from_pretrained( + cls, + model_id: Union[str, Path], + subfolder: Optional[Union[str, Path]] = None, + file_name: Optional[str] = None, + provider: Optional[str] = None, + sess_options: Optional["SessionOptions"] = None, + **kwargs: Any, + ) -> Any: # fixme + file_name = file_name or ONNX_WEIGHTS_NAME + + if os.path.isdir(model_id): + model_path = model_id + if subfolder is not None: + model_path = os.path.join(model_path, subfolder) + model_path = os.path.join(model_path, file_name) + + else: + model_path = model_id + + # load model from local directory + if not os.path.isfile(model_path): + raise Exception(f"Model not found: {model_path}") + + # TODO: session options + return cls(str(model_path), provider=provider) diff --git a/invokeai/backend/raw_model.py b/invokeai/backend/raw_model.py new file mode 100644 index 0000000000..d0dc50c456 --- /dev/null +++ b/invokeai/backend/raw_model.py @@ -0,0 +1,15 @@ +"""Base class for 'Raw' models. + +The RawModel class is the base class of LoRAModelRaw and TextualInversionModelRaw, +and is used for type checking of calls to the model patcher. Its main purpose +is to avoid a circular import issues when lora.py tries to import BaseModelType +from invokeai.backend.model_manager.config, and the latter tries to import LoRAModelRaw +from lora.py. + +The term 'raw' was introduced to describe a wrapper around a torch.nn.Module +that adds additional methods and attributes. +""" + + +class RawModel: + """Base class for 'Raw' model wrappers.""" diff --git a/invokeai/backend/stable_diffusion/__init__.py b/invokeai/backend/stable_diffusion/__init__.py index 111458dfd5..ed6782eefa 100644 --- a/invokeai/backend/stable_diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/__init__.py @@ -1,5 +1,14 @@ """ Initialization file for the invokeai.backend.stable_diffusion package """ + from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline # noqa: F401 from .diffusion import InvokeAIDiffuserComponent # noqa: F401 +from .seamless import set_seamless # noqa: F401 + +__all__ = [ + "PipelineIntermediateState", + "StableDiffusionGeneratorPipeline", + "InvokeAIDiffuserComponent", + "set_seamless", +] diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index b13a1271eb..8f5268f473 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -87,6 +87,7 @@ class AddsMaskGuidance: mask_latents: torch.FloatTensor scheduler: SchedulerMixin noise: torch.Tensor + gradient_mask: bool def __call__(self, step_output: Union[BaseOutput, SchedulerOutput], t: torch.Tensor, conditioning) -> BaseOutput: output_class = step_output.__class__ # We'll create a new one with masked data. @@ -122,7 +123,12 @@ class AddsMaskGuidance: # TODO: Do we need to also apply scheduler.scale_model_input? Or is add_noise appropriately scaled already? # mask_latents = self.scheduler.scale_model_input(mask_latents, t) mask_latents = einops.repeat(mask_latents, "b c h w -> (repeat b) c h w", repeat=batch_size) - masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) + if self.gradient_mask: + threshhold = (t.item()) / self.scheduler.config.num_train_timesteps + mask_bool = mask > threshhold # I don't know when mask got inverted, but it did + masked_input = torch.where(mask_bool, latents, mask_latents) + else: + masked_input = torch.lerp(mask_latents.to(dtype=latents.dtype), latents, mask.to(dtype=latents.dtype)) return masked_input @@ -325,6 +331,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): t2i_adapter_data: Optional[list[T2IAdapterData]] = None, mask: Optional[torch.Tensor] = None, masked_latents: Optional[torch.Tensor] = None, + gradient_mask: Optional[bool] = False, seed: Optional[int] = None, ) -> torch.Tensor: if init_timestep.shape[0] == 0: @@ -365,7 +372,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): self._unet_forward, mask, masked_latents ) else: - additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise)) + additional_guidance.append(AddsMaskGuidance(mask, orig_latents, self.scheduler, noise, gradient_mask)) try: latents = self.generate_latents_from_embeddings( @@ -383,7 +390,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): self.invokeai_diffuser.model_forward_callback = self._unet_forward # restore unmasked part - if mask is not None: + if mask is not None and not gradient_mask: latents = torch.lerp(orig_latents, latents.to(dtype=orig_latents.dtype), mask.to(dtype=orig_latents.dtype)) return latents diff --git a/invokeai/backend/stable_diffusion/diffusion/__init__.py b/invokeai/backend/stable_diffusion/diffusion/__init__.py index b7be1294ba..854d127a36 100644 --- a/invokeai/backend/stable_diffusion/diffusion/__init__.py +++ b/invokeai/backend/stable_diffusion/diffusion/__init__.py @@ -1 +1,5 @@ +""" +Initialization file for invokeai.models.diffusion +""" + from .shared_invokeai_diffusion import InvokeAIDiffuserComponent # noqa: F401 diff --git a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py index 7276a3fc53..e4b27ad04d 100644 --- a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py +++ b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py @@ -33,6 +33,11 @@ class BasicConditioningInfo: return self +@dataclass +class ConditioningFieldData: + conditionings: List[BasicConditioningInfo] + + @dataclass class SDXLConditioningInfo(BasicConditioningInfo): """SDXL text conditioning information produced by Compel.""" diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 00ec43dd6b..674a091a2f 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -378,7 +378,6 @@ class InvokeAIDiffuserComponent: """Runs the conditioned and unconditioned UNet forward passes sequentially for lower memory usage at the cost of slower execution speed. """ - # Since we are running the conditioned and unconditioned passes sequentially, we need to split the ControlNet # and T2I-Adapter residuals into two chunks. uncond_down_block, cond_down_block = None, None diff --git a/invokeai/backend/stable_diffusion/schedulers/__init__.py b/invokeai/backend/stable_diffusion/schedulers/__init__.py index a4e9dbf9da..0b780d3ee2 100644 --- a/invokeai/backend/stable_diffusion/schedulers/__init__.py +++ b/invokeai/backend/stable_diffusion/schedulers/__init__.py @@ -1 +1,3 @@ from .schedulers import SCHEDULER_MAP # noqa: F401 + +__all__ = ["SCHEDULER_MAP"] diff --git a/invokeai/backend/stable_diffusion/seamless.py b/invokeai/backend/stable_diffusion/seamless.py new file mode 100644 index 0000000000..fb9112b56d --- /dev/null +++ b/invokeai/backend/stable_diffusion/seamless.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from contextlib import contextmanager +from typing import Callable, List, Union + +import torch.nn as nn +from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL +from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel + + +def _conv_forward_asymmetric(self, input, weight, bias): + """ + Patch for Conv2d._conv_forward that supports asymmetric padding + """ + working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"]) + working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"]) + return nn.functional.conv2d( + working, + weight, + bias, + self.stride, + nn.modules.utils._pair(0), + self.dilation, + self.groups, + ) + + +@contextmanager +def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]): + # Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor + to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = [] + try: + # Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence + skipped_layers = 1 + for m_name, m in model.named_modules(): + if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): + continue + + if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name: + # down_blocks.1.resnets.1.conv1 + _, block_num, _, resnet_num, submodule_name = m_name.split(".") + block_num = int(block_num) + resnet_num = int(resnet_num) + + if block_num >= len(model.down_blocks) - skipped_layers: + continue + + # Skip the second resnet (could be configurable) + if resnet_num > 0: + continue + + # Skip Conv2d layers (could be configurable) + if submodule_name == "conv2": + continue + + m.asymmetric_padding_mode = {} + m.asymmetric_padding = {} + m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant" + m.asymmetric_padding["x"] = ( + m._reversed_padding_repeated_twice[0], + m._reversed_padding_repeated_twice[1], + 0, + 0, + ) + m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant" + m.asymmetric_padding["y"] = ( + 0, + 0, + m._reversed_padding_repeated_twice[2], + m._reversed_padding_repeated_twice[3], + ) + + to_restore.append((m, m._conv_forward)) + m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d) + + yield + + finally: + for module, orig_conv_forward in to_restore: + module._conv_forward = orig_conv_forward + if hasattr(module, "asymmetric_padding_mode"): + del module.asymmetric_padding_mode + if hasattr(module, "asymmetric_padding"): + del module.asymmetric_padding diff --git a/invokeai/backend/textual_inversion.py b/invokeai/backend/textual_inversion.py new file mode 100644 index 0000000000..f7390979bb --- /dev/null +++ b/invokeai/backend/textual_inversion.py @@ -0,0 +1,100 @@ +"""Textual Inversion wrapper class.""" + +from pathlib import Path +from typing import Dict, List, Optional, Union + +import torch +from compel.embeddings_provider import BaseTextualInversionManager +from safetensors.torch import load_file +from transformers import CLIPTokenizer +from typing_extensions import Self + +from .raw_model import RawModel + + +class TextualInversionModelRaw(RawModel): + embedding: torch.Tensor # [n, 768]|[n, 1280] + embedding_2: Optional[torch.Tensor] = None # [n, 768]|[n, 1280] - for SDXL models + + @classmethod + def from_checkpoint( + cls, + file_path: Union[str, Path], + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> Self: + if not isinstance(file_path, Path): + file_path = Path(file_path) + + result = cls() # TODO: + + if file_path.suffix == ".safetensors": + state_dict = load_file(file_path.absolute().as_posix(), device="cpu") + else: + state_dict = torch.load(file_path, map_location="cpu") + + # both v1 and v2 format embeddings + # difference mostly in metadata + if "string_to_param" in state_dict: + if len(state_dict["string_to_param"]) > 1: + print( + f'Warn: Embedding "{file_path.name}" contains multiple tokens, which is not supported. The first', + " token will be used.", + ) + + result.embedding = next(iter(state_dict["string_to_param"].values())) + + # v3 (easynegative) + elif "emb_params" in state_dict: + result.embedding = state_dict["emb_params"] + + # v5(sdxl safetensors file) + elif "clip_g" in state_dict and "clip_l" in state_dict: + result.embedding = state_dict["clip_g"] + result.embedding_2 = state_dict["clip_l"] + + # v4(diffusers bin files) + else: + result.embedding = next(iter(state_dict.values())) + + if len(result.embedding.shape) == 1: + result.embedding = result.embedding.unsqueeze(0) + + if not isinstance(result.embedding, torch.Tensor): + raise ValueError(f"Invalid embeddings file: {file_path.name}") + + return result + + +# no type hints for BaseTextualInversionManager? +class TextualInversionManager(BaseTextualInversionManager): # type: ignore + pad_tokens: Dict[int, List[int]] + tokenizer: CLIPTokenizer + + def __init__(self, tokenizer: CLIPTokenizer): + self.pad_tokens = {} + self.tokenizer = tokenizer + + def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]: + if len(self.pad_tokens) == 0: + return token_ids + + if token_ids[0] == self.tokenizer.bos_token_id: + raise ValueError("token_ids must not start with bos_token_id") + if token_ids[-1] == self.tokenizer.eos_token_id: + raise ValueError("token_ids must not end with eos_token_id") + + new_token_ids = [] + for token_id in token_ids: + new_token_ids.append(token_id) + if token_id in self.pad_tokens: + new_token_ids.extend(self.pad_tokens[token_id]) + + # Do not exceed the max model input size + # The -2 here is compensating for compensate compel.embeddings_provider.get_token_ids(), + # which first removes and then adds back the start and end tokens. + max_length = list(self.tokenizer.max_model_input_sizes.values())[0] - 2 + if len(new_token_ids) > max_length: + new_token_ids = new_token_ids[0:max_length] + + return new_token_ids diff --git a/invokeai/backend/tiles/tiles.py b/invokeai/backend/tiles/tiles.py index 3c400fc87c..2757dadba2 100644 --- a/invokeai/backend/tiles/tiles.py +++ b/invokeai/backend/tiles/tiles.py @@ -3,7 +3,7 @@ from typing import Union import numpy as np -from invokeai.app.invocations.latent import LATENT_SCALE_FACTOR +from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.backend.tiles.utils import TBLR, Tile, paste, seam_blend diff --git a/invokeai/backend/training/__init__.py b/invokeai/backend/training/__init__.py index ed3ceb90ec..6b5aa7327d 100644 --- a/invokeai/backend/training/__init__.py +++ b/invokeai/backend/training/__init__.py @@ -1,4 +1,5 @@ """ Initialization file for invokeai.backend.training """ + from .textual_inversion_training import do_textual_inversion_training, parse_args # noqa: F401 diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index e31ce959c2..7ddcf14367 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -120,7 +120,7 @@ def parse_args() -> Namespace: "--model", type=str, default="sd-1/main/stable-diffusion-v1-5", - help="Name of the diffusers model to train against, as defined in configs/models.yaml.", + help="Name of the diffusers model to train against.", ) model_group.add_argument( "--revision", @@ -858,9 +858,9 @@ def do_textual_inversion_training( # Let's make sure we don't update any embedding weights besides the newly added token index_no_updates = torch.arange(len(tokenizer)) != placeholder_token_id with torch.no_grad(): - accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[ - index_no_updates - ] = orig_embeds_params[index_no_updates] + accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = ( + orig_embeds_params[index_no_updates] + ) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py index 87ae1480f5..ee6793376f 100644 --- a/invokeai/backend/util/__init__.py +++ b/invokeai/backend/util/__init__.py @@ -1,6 +1,7 @@ """ Initialization file for invokeai.backend.util """ + from .attention import auto_detect_slice_size # noqa: F401 from .devices import ( # noqa: F401 CPU_DEVICE, @@ -12,6 +13,22 @@ from .devices import ( # noqa: F401 torch_dtype, ) from .logging import InvokeAILogger -from .util import Chdir, ask_user, download_with_resume, instantiate_from_config, url_attachment_name # noqa: F401 +from .util import ( # TO DO: Clean this up; remove the unused symbols + GIG, + Chdir, + ask_user, # noqa + directory_size, + download_with_resume, + instantiate_from_config, # noqa + url_attachment_name, # noqa +) -__all__ = ["Chdir", "InvokeAILogger", "choose_precision", "choose_torch_device"] +__all__ = [ + "GIG", + "directory_size", + "Chdir", + "download_with_resume", + "InvokeAILogger", + "choose_precision", + "choose_torch_device", +] diff --git a/invokeai/backend/util/attention.py b/invokeai/backend/util/attention.py index 910933044e..88dc6e5cec 100644 --- a/invokeai/backend/util/attention.py +++ b/invokeai/backend/util/attention.py @@ -3,6 +3,7 @@ Utility routine used for autodetection of optimal slice size for attention mechanism. """ + import psutil import torch diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index d6d3ad727f..a83d1045f7 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -1,7 +1,7 @@ from __future__ import annotations from contextlib import nullcontext -from typing import Union +from typing import Literal, Optional, Union import torch from torch import autocast @@ -29,12 +29,19 @@ def choose_torch_device() -> torch.device: return torch.device(config.device) -def choose_precision(device: torch.device) -> str: - """Returns an appropriate precision for the given torch device""" +# We are in transition here from using a single global AppConfig to allowing multiple +# configurations. It is strongly recommended to pass the app_config to this function. +def choose_precision( + device: torch.device, app_config: Optional[InvokeAIAppConfig] = None +) -> Literal["float32", "float16", "bfloat16"]: + """Return an appropriate precision for the given torch device.""" + app_config = app_config or config if device.type == "cuda": device_name = torch.cuda.get_device_name(device) if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name): - if config.precision == "bfloat16": + if app_config.precision == "float32": + return "float32" + elif app_config.precision == "bfloat16": return "bfloat16" else: return "float16" @@ -43,8 +50,14 @@ def choose_precision(device: torch.device) -> str: return "float32" -def torch_dtype(device: torch.device) -> torch.dtype: - precision = choose_precision(device) +# We are in transition here from using a single global AppConfig to allowing multiple +# configurations. It is strongly recommended to pass the app_config to this function. +def torch_dtype( + device: Optional[torch.device] = None, + app_config: Optional[InvokeAIAppConfig] = None, +) -> torch.dtype: + device = device or choose_torch_device() + precision = choose_precision(device, app_config) if precision == "float16": return torch.float16 if precision == "bfloat16": diff --git a/invokeai/backend/util/silence_warnings.py b/invokeai/backend/util/silence_warnings.py new file mode 100644 index 0000000000..4c566ba759 --- /dev/null +++ b/invokeai/backend/util/silence_warnings.py @@ -0,0 +1,29 @@ +"""Context class to silence transformers and diffusers warnings.""" + +import warnings +from typing import Any + +from diffusers import logging as diffusers_logging +from transformers import logging as transformers_logging + + +class SilenceWarnings(object): + """Use in context to temporarily turn off warnings from transformers & diffusers modules. + + with SilenceWarnings(): + # do something + """ + + def __init__(self) -> None: + self.transformers_verbosity = transformers_logging.get_verbosity() + self.diffusers_verbosity = diffusers_logging.get_verbosity() + + def __enter__(self) -> None: + transformers_logging.set_verbosity_error() + diffusers_logging.set_verbosity_error() + warnings.simplefilter("ignore") + + def __exit__(self, *args: Any) -> None: + transformers_logging.set_verbosity(self.transformers_verbosity) + diffusers_logging.set_verbosity(self.diffusers_verbosity) + warnings.simplefilter("default") diff --git a/invokeai/backend/util/test_utils.py b/invokeai/backend/util/test_utils.py index 09b9de9e98..0d76c4633c 100644 --- a/invokeai/backend/util/test_utils.py +++ b/invokeai/backend/util/test_utils.py @@ -5,10 +5,9 @@ from typing import Optional, Union import pytest import torch -from invokeai.app.services.config.config_default import InvokeAIAppConfig -from invokeai.backend.install.model_install_backend import ModelInstall -from invokeai.backend.model_management.model_manager import ModelInfo -from invokeai.backend.model_management.models.base import BaseModelType, ModelNotFoundException, ModelType, SubModelType +from invokeai.app.services.model_manager import ModelManagerServiceBase +from invokeai.app.services.model_records import UnknownModelException +from invokeai.backend.model_manager import BaseModelType, LoadedModel, ModelType, SubModelType @pytest.fixture(scope="session") @@ -16,31 +15,20 @@ def torch_device(): return "cuda" if torch.cuda.is_available() else "cpu" -@pytest.fixture(scope="module") -def model_installer(): - """A global ModelInstall pytest fixture to be used by many tests.""" - # HACK(ryand): InvokeAIAppConfig.get_config() returns a singleton config object. This can lead to weird interactions - # between tests that need to alter the config. For example, some tests change the 'root' directory in the config, - # which can cause `install_and_load_model(...)` to re-download the model unnecessarily. As a temporary workaround, - # we pass a kwarg to get_config, which causes the config to be re-loaded. To fix this properly, we should stop using - # a singleton. - return ModelInstall(InvokeAIAppConfig.get_config(log_level="info")) - - def install_and_load_model( - model_installer: ModelInstall, + model_manager: ModelManagerServiceBase, model_path_id_or_url: Union[str, Path], model_name: str, base_model: BaseModelType, model_type: ModelType, submodel_type: Optional[SubModelType] = None, -) -> ModelInfo: - """Install a model if it is not already installed, then get the ModelInfo for that model. +) -> LoadedModel: + """Install a model if it is not already installed, then get the LoadedModel for that model. This is intended as a utility function for tests. Args: - model_installer (ModelInstall): The model installer. + mm2_model_manager (ModelManagerServiceBase): The model manager model_path_id_or_url (Union[str, Path]): The path, HF ID, URL, etc. where the model can be installed from if it is not already installed. model_name (str): The model name, forwarded to ModelManager.get_model(...). @@ -49,18 +37,25 @@ def install_and_load_model( submodel_type (Optional[SubModelType]): The submodel type, forwarded to ModelManager.get_model(...). Returns: - ModelInfo + LoadedModelInfo """ - # If the requested model is already installed, return its ModelInfo. - with contextlib.suppress(ModelNotFoundException): - return model_installer.mgr.get_model(model_name, base_model, model_type, submodel_type) + # If the requested model is already installed, return its LoadedModel + with contextlib.suppress(UnknownModelException): + # TODO: Replace with wrapper call + loaded_model: LoadedModel = model_manager.load_model_by_attr( + model_name=model_name, base_model=base_model, model_type=model_type + ) + return loaded_model # Install the requested model. - model_installer.heuristic_import(model_path_id_or_url) + job = model_manager.install.heuristic_import(model_path_id_or_url) + model_manager.install.wait_for_job(job, timeout=10) + assert job.complete try: - return model_installer.mgr.get_model(model_name, base_model, model_type, submodel_type) - except ModelNotFoundException as e: + loaded_model = model_manager.load_model_by_config(job.config_out) + return loaded_model + except UnknownModelException as e: raise Exception( "Failed to get model info after installing it. There could be a mismatch between the requested model and" f" the installation id ('{model_path_id_or_url}'). Error: {e}" diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 13751e2770..ac7a64e807 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -24,6 +24,22 @@ import invokeai.backend.util.logging as logger from .devices import torch_dtype +# actual size of a gig +GIG = 1073741824 + + +def directory_size(directory: Path) -> int: + """ + Return the aggregate size of all files in a directory (bytes). + """ + sum = 0 + for root, dirs, files in os.walk(directory): + for f in files: + sum += Path(root, f).stat().st_size + for d in dirs: + sum += Path(root, d).stat().st_size + return sum + def log_txt_as_img(wh, xc, size=10): # wh a tuple of (width, height) @@ -324,14 +340,17 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path logger.error(f"ERROR DOWNLOADING {url}: {resp.text}") return None - with open(dest, open_mode) as file, tqdm( - desc=str(dest), - initial=exist_size, - total=content_length, - unit="iB", - unit_scale=True, - unit_divisor=1000, - ) as bar: + with ( + open(dest, open_mode) as file, + tqdm( + desc=str(dest), + initial=exist_size, + total=content_length, + unit="iB", + unit_scale=True, + unit_divisor=1000, + ) as bar, + ): for data in resp.iter_content(chunk_size=1024): size = file.write(data) bar.update(size) diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index c230665e3a..8ad788fba7 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -1,153 +1,157 @@ # This file predefines a few models that the user may want to install. sd-1/main/stable-diffusion-v1-5: description: Stable Diffusion version 1.5 diffusers model (4.27 GB) - repo_id: runwayml/stable-diffusion-v1-5 + source: runwayml/stable-diffusion-v1-5 recommended: True default: True sd-1/main/stable-diffusion-v1-5-inpainting: description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) - repo_id: runwayml/stable-diffusion-inpainting + source: runwayml/stable-diffusion-inpainting recommended: True sd-2/main/stable-diffusion-2-1: description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) - repo_id: stabilityai/stable-diffusion-2-1 + source: stabilityai/stable-diffusion-2-1 recommended: False sd-2/main/stable-diffusion-2-inpainting: description: Stable Diffusion version 2.0 inpainting model (5.21 GB) - repo_id: stabilityai/stable-diffusion-2-inpainting + source: stabilityai/stable-diffusion-2-inpainting recommended: False sdxl/main/stable-diffusion-xl-base-1-0: description: Stable Diffusion XL base model (12 GB) - repo_id: stabilityai/stable-diffusion-xl-base-1.0 + source: stabilityai/stable-diffusion-xl-base-1.0 recommended: True sdxl-refiner/main/stable-diffusion-xl-refiner-1-0: description: Stable Diffusion XL refiner model (12 GB) - repo_id: stabilityai/stable-diffusion-xl-refiner-1.0 + source: stabilityai/stable-diffusion-xl-refiner-1.0 recommended: False -sdxl/vae/sdxl-1-0-vae-fix: - description: Fine tuned version of the SDXL-1.0 VAE - repo_id: madebyollin/sdxl-vae-fp16-fix +sdxl/vae/sdxl-vae-fp16-fix: + description: Version of the SDXL-1.0 VAE that works in half precision mode + source: madebyollin/sdxl-vae-fp16-fix recommended: True sd-1/main/Analog-Diffusion: description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) - repo_id: wavymulder/Analog-Diffusion + source: wavymulder/Analog-Diffusion recommended: False -sd-1/main/Deliberate_v5: +sd-1/main/Deliberate: description: Versatile model that produces detailed images up to 768px (4.27 GB) - path: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors + source: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors?download=true recommended: False sd-1/main/Dungeons-and-Diffusion: description: Dungeons & Dragons characters (2.13 GB) - repo_id: 0xJustin/Dungeons-and-Diffusion + source: 0xJustin/Dungeons-and-Diffusion recommended: False sd-1/main/dreamlike-photoreal-2: description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) - repo_id: dreamlike-art/dreamlike-photoreal-2.0 + source: dreamlike-art/dreamlike-photoreal-2.0 recommended: False sd-1/main/Inkpunk-Diffusion: description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) - repo_id: Envvi/Inkpunk-Diffusion + source: Envvi/Inkpunk-Diffusion recommended: False sd-1/main/openjourney: description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) - repo_id: prompthero/openjourney + source: prompthero/openjourney recommended: False sd-1/main/seek.art_MEGA: - repo_id: coreco/seek.art_MEGA + source: coreco/seek.art_MEGA description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) recommended: False sd-1/main/trinart_stable_diffusion_v2: description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) - repo_id: naclbit/trinart_stable_diffusion_v2 + source: naclbit/trinart_stable_diffusion_v2 recommended: False sd-1/controlnet/qrcode_monster: - repo_id: monster-labs/control_v1p_sd15_qrcode_monster + source: monster-labs/control_v1p_sd15_qrcode_monster subfolder: v2 sd-1/controlnet/canny: - repo_id: lllyasviel/control_v11p_sd15_canny + source: lllyasviel/control_v11p_sd15_canny recommended: True sd-1/controlnet/inpaint: - repo_id: lllyasviel/control_v11p_sd15_inpaint + source: lllyasviel/control_v11p_sd15_inpaint sd-1/controlnet/mlsd: - repo_id: lllyasviel/control_v11p_sd15_mlsd + source: lllyasviel/control_v11p_sd15_mlsd sd-1/controlnet/depth: - repo_id: lllyasviel/control_v11f1p_sd15_depth + source: lllyasviel/control_v11f1p_sd15_depth recommended: True sd-1/controlnet/normal_bae: - repo_id: lllyasviel/control_v11p_sd15_normalbae + source: lllyasviel/control_v11p_sd15_normalbae sd-1/controlnet/seg: - repo_id: lllyasviel/control_v11p_sd15_seg + source: lllyasviel/control_v11p_sd15_seg sd-1/controlnet/lineart: - repo_id: lllyasviel/control_v11p_sd15_lineart + source: lllyasviel/control_v11p_sd15_lineart recommended: True sd-1/controlnet/lineart_anime: - repo_id: lllyasviel/control_v11p_sd15s2_lineart_anime + source: lllyasviel/control_v11p_sd15s2_lineart_anime sd-1/controlnet/openpose: - repo_id: lllyasviel/control_v11p_sd15_openpose + source: lllyasviel/control_v11p_sd15_openpose recommended: True sd-1/controlnet/scribble: - repo_id: lllyasviel/control_v11p_sd15_scribble + source: lllyasviel/control_v11p_sd15_scribble recommended: False sd-1/controlnet/softedge: - repo_id: lllyasviel/control_v11p_sd15_softedge + source: lllyasviel/control_v11p_sd15_softedge sd-1/controlnet/shuffle: - repo_id: lllyasviel/control_v11e_sd15_shuffle + source: lllyasviel/control_v11e_sd15_shuffle sd-1/controlnet/tile: - repo_id: lllyasviel/control_v11f1e_sd15_tile + source: lllyasviel/control_v11f1e_sd15_tile sd-1/controlnet/ip2p: - repo_id: lllyasviel/control_v11e_sd15_ip2p + source: lllyasviel/control_v11e_sd15_ip2p sd-1/t2i_adapter/canny-sd15: - repo_id: TencentARC/t2iadapter_canny_sd15v2 + source: TencentARC/t2iadapter_canny_sd15v2 sd-1/t2i_adapter/sketch-sd15: - repo_id: TencentARC/t2iadapter_sketch_sd15v2 + source: TencentARC/t2iadapter_sketch_sd15v2 sd-1/t2i_adapter/depth-sd15: - repo_id: TencentARC/t2iadapter_depth_sd15v2 + source: TencentARC/t2iadapter_depth_sd15v2 sd-1/t2i_adapter/zoedepth-sd15: - repo_id: TencentARC/t2iadapter_zoedepth_sd15v1 + source: TencentARC/t2iadapter_zoedepth_sd15v1 sdxl/t2i_adapter/canny-sdxl: - repo_id: TencentARC/t2i-adapter-canny-sdxl-1.0 + source: TencentARC/t2i-adapter-canny-sdxl-1.0 sdxl/t2i_adapter/zoedepth-sdxl: - repo_id: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0 + source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0 sdxl/t2i_adapter/lineart-sdxl: - repo_id: TencentARC/t2i-adapter-lineart-sdxl-1.0 + source: TencentARC/t2i-adapter-lineart-sdxl-1.0 sdxl/t2i_adapter/sketch-sdxl: - repo_id: TencentARC/t2i-adapter-sketch-sdxl-1.0 + source: TencentARC/t2i-adapter-sketch-sdxl-1.0 sd-1/embedding/EasyNegative: - path: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors + source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors recommended: True -sd-1/embedding/ahx-beta-453407d: - repo_id: sd-concepts-library/ahx-beta-453407d + description: A textual inversion to use in the negative prompt to reduce bad anatomy +sd-1/lora/FlatColor: + source: https://civitai.com/models/6433/loraflatcolor + recommended: True + description: A LoRA that generates scenery using solid blocks of color sd-1/lora/Ink scenery: - path: https://civitai.com/api/download/models/83390 + source: https://civitai.com/api/download/models/83390 + description: Generate india ink-like landscapes sd-1/ip_adapter/ip_adapter_sd15: - repo_id: InvokeAI/ip_adapter_sd15 + source: InvokeAI/ip_adapter_sd15 recommended: True requires: - InvokeAI/ip_adapter_sd_image_encoder description: IP-Adapter for SD 1.5 models sd-1/ip_adapter/ip_adapter_plus_sd15: - repo_id: InvokeAI/ip_adapter_plus_sd15 + source: InvokeAI/ip_adapter_plus_sd15 recommended: False requires: - InvokeAI/ip_adapter_sd_image_encoder description: Refined IP-Adapter for SD 1.5 models sd-1/ip_adapter/ip_adapter_plus_face_sd15: - repo_id: InvokeAI/ip_adapter_plus_face_sd15 + source: InvokeAI/ip_adapter_plus_face_sd15 recommended: False requires: - InvokeAI/ip_adapter_sd_image_encoder description: Refined IP-Adapter for SD 1.5 models, adapted for faces sdxl/ip_adapter/ip_adapter_sdxl: - repo_id: InvokeAI/ip_adapter_sdxl + source: InvokeAI/ip_adapter_sdxl recommended: False requires: - InvokeAI/ip_adapter_sdxl_image_encoder description: IP-Adapter for SDXL models any/clip_vision/ip_adapter_sd_image_encoder: - repo_id: InvokeAI/ip_adapter_sd_image_encoder + source: InvokeAI/ip_adapter_sd_image_encoder recommended: False description: Required model for using IP-Adapters with SD-1/2 models any/clip_vision/ip_adapter_sdxl_image_encoder: - repo_id: InvokeAI/ip_adapter_sdxl_image_encoder + source: InvokeAI/ip_adapter_sdxl_image_encoder recommended: False description: Required model for using IP-Adapters with SDXL models diff --git a/invokeai/configs/INITIAL_MODELS2.yaml b/invokeai/configs/INITIAL_MODELS2.yaml deleted file mode 100644 index ca2283ab81..0000000000 --- a/invokeai/configs/INITIAL_MODELS2.yaml +++ /dev/null @@ -1,157 +0,0 @@ -# This file predefines a few models that the user may want to install. -sd-1/main/stable-diffusion-v1-5: - description: Stable Diffusion version 1.5 diffusers model (4.27 GB) - source: runwayml/stable-diffusion-v1-5 - recommended: True - default: True -sd-1/main/stable-diffusion-v1-5-inpainting: - description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) - source: runwayml/stable-diffusion-inpainting - recommended: True -sd-2/main/stable-diffusion-2-1: - description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) - source: stabilityai/stable-diffusion-2-1 - recommended: False -sd-2/main/stable-diffusion-2-inpainting: - description: Stable Diffusion version 2.0 inpainting model (5.21 GB) - source: stabilityai/stable-diffusion-2-inpainting - recommended: False -sdxl/main/stable-diffusion-xl-base-1-0: - description: Stable Diffusion XL base model (12 GB) - source: stabilityai/stable-diffusion-xl-base-1.0 - recommended: True -sdxl-refiner/main/stable-diffusion-xl-refiner-1-0: - description: Stable Diffusion XL refiner model (12 GB) - source: stabilityai/stable-diffusion-xl-refiner-1.0 - recommended: False -sdxl/vae/sdxl-vae-fp16-fix: - description: Version of the SDXL-1.0 VAE that works in half precision mode - source: madebyollin/sdxl-vae-fp16-fix - recommended: True -sd-1/main/Analog-Diffusion: - description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) - source: wavymulder/Analog-Diffusion - recommended: False -sd-1/main/Deliberate: - description: Versatile model that produces detailed images up to 768px (4.27 GB) - source: XpucT/Deliberate - recommended: False -sd-1/main/Dungeons-and-Diffusion: - description: Dungeons & Dragons characters (2.13 GB) - source: 0xJustin/Dungeons-and-Diffusion - recommended: False -sd-1/main/dreamlike-photoreal-2: - description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) - source: dreamlike-art/dreamlike-photoreal-2.0 - recommended: False -sd-1/main/Inkpunk-Diffusion: - description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) - source: Envvi/Inkpunk-Diffusion - recommended: False -sd-1/main/openjourney: - description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) - source: prompthero/openjourney - recommended: False -sd-1/main/seek.art_MEGA: - source: coreco/seek.art_MEGA - description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) - recommended: False -sd-1/main/trinart_stable_diffusion_v2: - description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) - source: naclbit/trinart_stable_diffusion_v2 - recommended: False -sd-1/controlnet/qrcode_monster: - source: monster-labs/control_v1p_sd15_qrcode_monster - subfolder: v2 -sd-1/controlnet/canny: - source: lllyasviel/control_v11p_sd15_canny - recommended: True -sd-1/controlnet/inpaint: - source: lllyasviel/control_v11p_sd15_inpaint -sd-1/controlnet/mlsd: - source: lllyasviel/control_v11p_sd15_mlsd -sd-1/controlnet/depth: - source: lllyasviel/control_v11f1p_sd15_depth - recommended: True -sd-1/controlnet/normal_bae: - source: lllyasviel/control_v11p_sd15_normalbae -sd-1/controlnet/seg: - source: lllyasviel/control_v11p_sd15_seg -sd-1/controlnet/lineart: - source: lllyasviel/control_v11p_sd15_lineart - recommended: True -sd-1/controlnet/lineart_anime: - source: lllyasviel/control_v11p_sd15s2_lineart_anime -sd-1/controlnet/openpose: - source: lllyasviel/control_v11p_sd15_openpose - recommended: True -sd-1/controlnet/scribble: - source: lllyasviel/control_v11p_sd15_scribble - recommended: False -sd-1/controlnet/softedge: - source: lllyasviel/control_v11p_sd15_softedge -sd-1/controlnet/shuffle: - source: lllyasviel/control_v11e_sd15_shuffle -sd-1/controlnet/tile: - source: lllyasviel/control_v11f1e_sd15_tile -sd-1/controlnet/ip2p: - source: lllyasviel/control_v11e_sd15_ip2p -sd-1/t2i_adapter/canny-sd15: - source: TencentARC/t2iadapter_canny_sd15v2 -sd-1/t2i_adapter/sketch-sd15: - source: TencentARC/t2iadapter_sketch_sd15v2 -sd-1/t2i_adapter/depth-sd15: - source: TencentARC/t2iadapter_depth_sd15v2 -sd-1/t2i_adapter/zoedepth-sd15: - source: TencentARC/t2iadapter_zoedepth_sd15v1 -sdxl/t2i_adapter/canny-sdxl: - source: TencentARC/t2i-adapter-canny-sdxl-1.0 -sdxl/t2i_adapter/zoedepth-sdxl: - source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0 -sdxl/t2i_adapter/lineart-sdxl: - source: TencentARC/t2i-adapter-lineart-sdxl-1.0 -sdxl/t2i_adapter/sketch-sdxl: - source: TencentARC/t2i-adapter-sketch-sdxl-1.0 -sd-1/embedding/EasyNegative: - source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors - recommended: True - description: A textual inversion to use in the negative prompt to reduce bad anatomy -sd-1/lora/FlatColor: - source: https://civitai.com/models/6433/loraflatcolor - recommended: True - description: A LoRA that generates scenery using solid blocks of color -sd-1/lora/Ink scenery: - source: https://civitai.com/api/download/models/83390 - description: Generate india ink-like landscapes -sd-1/ip_adapter/ip_adapter_sd15: - source: InvokeAI/ip_adapter_sd15 - recommended: True - requires: - - InvokeAI/ip_adapter_sd_image_encoder - description: IP-Adapter for SD 1.5 models -sd-1/ip_adapter/ip_adapter_plus_sd15: - source: InvokeAI/ip_adapter_plus_sd15 - recommended: False - requires: - - InvokeAI/ip_adapter_sd_image_encoder - description: Refined IP-Adapter for SD 1.5 models -sd-1/ip_adapter/ip_adapter_plus_face_sd15: - source: InvokeAI/ip_adapter_plus_face_sd15 - recommended: False - requires: - - InvokeAI/ip_adapter_sd_image_encoder - description: Refined IP-Adapter for SD 1.5 models, adapted for faces -sdxl/ip_adapter/ip_adapter_sdxl: - source: InvokeAI/ip_adapter_sdxl - recommended: False - requires: - - InvokeAI/ip_adapter_sdxl_image_encoder - description: IP-Adapter for SDXL models -any/clip_vision/ip_adapter_sd_image_encoder: - source: InvokeAI/ip_adapter_sd_image_encoder - recommended: False - description: Required model for using IP-Adapters with SD-1/2 models -any/clip_vision/ip_adapter_sdxl_image_encoder: - source: InvokeAI/ip_adapter_sdxl_image_encoder - recommended: False - description: Required model for using IP-Adapters with SDXL models diff --git a/invokeai/configs/models.yaml.example b/invokeai/configs/models.yaml.example deleted file mode 100644 index 98f8f77e62..0000000000 --- a/invokeai/configs/models.yaml.example +++ /dev/null @@ -1,47 +0,0 @@ -# This file describes the alternative machine learning models -# available to InvokeAI script. -# -# To add a new model, follow the examples below. Each -# model requires a model config file, a weights file, -# and the width and height of the images it -# was trained on. -diffusers-1.4: - description: 🤗🧨 Stable Diffusion v1.4 - format: diffusers - repo_id: CompVis/stable-diffusion-v1-4 -diffusers-1.5: - description: 🤗🧨 Stable Diffusion v1.5 - format: diffusers - repo_id: runwayml/stable-diffusion-v1-5 - default: true -diffusers-1.5+mse: - description: 🤗🧨 Stable Diffusion v1.5 + MSE-finetuned VAE - format: diffusers - repo_id: runwayml/stable-diffusion-v1-5 - vae: - repo_id: stabilityai/sd-vae-ft-mse -diffusers-inpainting-1.5: - description: 🤗🧨 inpainting for Stable Diffusion v1.5 - format: diffusers - repo_id: runwayml/stable-diffusion-inpainting -stable-diffusion-1.5: - description: The newest Stable Diffusion version 1.5 weight file (4.27 GB) - weights: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt - config: configs/stable-diffusion/v1-inference.yaml - width: 512 - height: 512 - vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt -stable-diffusion-1.4: - description: Stable Diffusion inference model version 1.4 - config: configs/stable-diffusion/v1-inference.yaml - weights: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt - vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt - width: 512 - height: 512 -inpainting-1.5: - weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt - config: configs/stable-diffusion/v1-inpainting-inference.yaml - vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt - description: RunwayML SD 1.5 model optimized for inpainting - width: 512 - height: 512 diff --git a/invokeai/frontend/CLI/__init__.py b/invokeai/frontend/CLI/__init__.py index f8864bbe66..63d8d66246 100644 --- a/invokeai/frontend/CLI/__init__.py +++ b/invokeai/frontend/CLI/__init__.py @@ -1,4 +1,5 @@ """ Initialization file for invokeai.frontend.CLI """ + from .CLI import main as invokeai_command_line_interface # noqa: F401 diff --git a/invokeai/frontend/install/invokeai_configure.py b/invokeai/frontend/install/invokeai_configure.py index 2a62430b69..73132ee064 100644 --- a/invokeai/frontend/install/invokeai_configure.py +++ b/invokeai/frontend/install/invokeai_configure.py @@ -1,6 +1,7 @@ """ Wrapper for invokeai.backend.configure.invokeai_configure """ + from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401 __all__ = ["invokeai_configure"] diff --git a/invokeai/frontend/install/invokeai_update.py b/invokeai/frontend/install/invokeai_update.py index 06c3520a89..32a6208e6b 100644 --- a/invokeai/frontend/install/invokeai_update.py +++ b/invokeai/frontend/install/invokeai_update.py @@ -2,6 +2,7 @@ Minimalist updater script. Prompts user for the tag or branch to update to and runs pip install . """ + import os import platform from distutils.version import LooseVersion diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index e23538ffd6..2f7fd0a1d0 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -6,47 +6,45 @@ """ This is the npyscreen frontend to the model installation application. -The work is actually done in backend code in model_install_backend.py. +It is currently named model_install2.py, but will ultimately replace model_install.py. """ import argparse import curses -import logging import sys -import textwrap import traceback +import warnings from argparse import Namespace -from multiprocessing import Process -from multiprocessing.connection import Connection, Pipe -from pathlib import Path from shutil import get_terminal_size -from typing import Optional +from typing import Any, Dict, List, Optional, Set import npyscreen import torch from npyscreen import widget from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, SchedulerPredictionType -from invokeai.backend.model_management import ModelManager, ModelType +from invokeai.app.services.model_install import ModelInstallServiceBase +from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo +from invokeai.backend.model_manager import ModelType from invokeai.backend.util import choose_precision, choose_torch_device from invokeai.backend.util.logging import InvokeAILogger from invokeai.frontend.install.widgets import ( MIN_COLS, MIN_LINES, - BufferBox, CenteredTitleText, CyclingForm, MultiSelectColumns, SingleSelectColumns, TextBox, WindowTooSmallException, - select_stable_diffusion_config_file, set_min_terminal_size, ) +warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402 config = InvokeAIAppConfig.get_config() -logger = InvokeAILogger.get_logger() +logger = InvokeAILogger.get_logger("ModelInstallService") +# logger.setLevel("WARNING") +# logger.setLevel('DEBUG') # build a table mapping all non-printable characters to None # for stripping control characters @@ -58,44 +56,42 @@ MAX_OTHER_MODELS = 72 def make_printable(s: str) -> str: - """Replace non-printable characters in a string""" + """Replace non-printable characters in a string.""" return s.translate(NOPRINT_TRANS_TABLE) class addModelsForm(CyclingForm, npyscreen.FormMultiPage): + """Main form for interactive TUI.""" + # for responsive resizing set to False, but this seems to cause a crash! FIX_MINIMUM_SIZE_WHEN_CREATED = True # for persistence current_tab = 0 - def __init__(self, parentApp, name, multipage=False, *args, **keywords): + def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any): self.multipage = multipage self.subprocess = None - super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad? + super().__init__(parentApp=parentApp, name=name, **keywords) - def create(self): + def create(self) -> None: + self.installer = self.parentApp.install_helper.installer + self.model_labels = self._get_model_labels() self.keypress_timeout = 10 self.counter = 0 self.subprocess_connection = None - if not config.model_conf_path.exists(): - with open(config.model_conf_path, "w") as file: - print("# InvokeAI model configuration file", file=file) - self.installer = ModelInstall(config) - self.all_models = self.installer.all_models() - self.starter_models = self.installer.starter_models() - self.model_labels = self._get_model_labels() window_width, window_height = get_terminal_size() - self.nextrely -= 1 + # npyscreen has no typing hints + self.nextrely -= 1 # type: ignore self.add_widget_intelligent( npyscreen.FixedText, value="Use ctrl-N and ctrl-P to move to the ext and

revious fields. Cursor keys navigate, and selects.", editable=False, color="CAUTION", ) - self.nextrely += 1 + self.nextrely += 1 # type: ignore self.tabs = self.add_widget_intelligent( SingleSelectColumns, values=[ @@ -115,9 +111,9 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ) self.tabs.on_changed = self._toggle_tables - top_of_table = self.nextrely + top_of_table = self.nextrely # type: ignore self.starter_pipelines = self.add_starter_pipelines() - bottom_of_table = self.nextrely + bottom_of_table = self.nextrely # type: ignore self.nextrely = top_of_table self.pipeline_models = self.add_pipeline_widgets( @@ -162,15 +158,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.nextrely = bottom_of_table + 1 - self.monitor = self.add_widget_intelligent( - BufferBox, - name="Log Messages", - editable=False, - max_height=6, - ) - self.nextrely += 1 - done_label = "APPLY CHANGES" back_label = "BACK" cancel_label = "CANCEL" current_position = self.nextrely @@ -186,14 +174,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel ) self.nextrely = current_position - self.ok_button = self.add_widget_intelligent( - npyscreen.ButtonPress, - name=done_label, - relx=(window_width - len(done_label)) // 2, - when_pressed_function=self.on_execute, - ) - label = "APPLY CHANGES & EXIT" + label = "APPLY CHANGES" self.nextrely = current_position self.done = self.add_widget_intelligent( npyscreen.ButtonPress, @@ -210,17 +192,16 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ############# diffusers tab ########## def add_starter_pipelines(self) -> dict[str, npyscreen.widget]: """Add widgets responsible for selecting diffusers models""" - widgets = {} - models = self.all_models - starters = self.starter_models - starter_model_labels = self.model_labels + widgets: Dict[str, npyscreen.widget] = {} - self.installed_models = sorted([x for x in starters if models[x].installed]) + all_models = self.all_models # master dict of all models, indexed by key + model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]] + model_labels = [self.model_labels[x] for x in model_list] widgets.update( label1=self.add_widget_intelligent( CenteredTitleText, - name="Select from a starter set of Stable Diffusion models from HuggingFace.", + name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.", editable=False, labelColor="CAUTION", ) @@ -230,23 +211,24 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # if user has already installed some initial models, then don't patronize them # by showing more recommendations show_recommended = len(self.installed_models) == 0 - keys = [x for x in models.keys() if x in starters] + + checked = [ + model_list.index(x) + for x in model_list + if (show_recommended and all_models[x].recommended) or all_models[x].installed + ] widgets.update( models_selected=self.add_widget_intelligent( MultiSelectColumns, columns=1, name="Install Starter Models", - values=[starter_model_labels[x] for x in keys], - value=[ - keys.index(x) - for x in keys - if (show_recommended and models[x].recommended) or (x in self.installed_models) - ], - max_height=len(starters) + 1, + values=model_labels, + value=checked, + max_height=len(model_list) + 1, relx=4, scroll_exit=True, ), - models=keys, + models=model_list, ) self.nextrely += 1 @@ -257,14 +239,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self, model_type: ModelType, window_width: int = 120, - install_prompt: str = None, - exclude: set = None, + install_prompt: Optional[str] = None, + exclude: Optional[Set[str]] = None, ) -> dict[str, npyscreen.widget]: """Generic code to create model selection widgets""" if exclude is None: exclude = set() - widgets = {} - model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and x not in exclude] + widgets: Dict[str, npyscreen.widget] = {} + all_models = self.all_models + model_list = sorted( + [x for x in all_models if all_models[x].type == model_type and x not in exclude], + key=lambda x: all_models[x].name or "", + ) model_labels = [self.model_labels[x] for x in model_list] show_recommended = len(self.installed_models) == 0 @@ -300,7 +286,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): value=[ model_list.index(x) for x in model_list - if (show_recommended and self.all_models[x].recommended) or self.all_models[x].installed + if (show_recommended and all_models[x].recommended) or all_models[x].installed ], max_height=len(model_list) // columns + 1, relx=4, @@ -324,7 +310,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): download_ids=self.add_widget_intelligent( TextBox, name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):", - max_height=4, + max_height=6, scroll_exit=True, editable=True, ) @@ -349,13 +335,13 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): return widgets - def resize(self): + def resize(self) -> None: super().resize() if s := self.starter_pipelines.get("models_selected"): - keys = [x for x in self.all_models.keys() if x in self.starter_models] - s.values = [self.model_labels[x] for x in keys] + if model_list := self.starter_pipelines.get("models"): + s.values = [self.model_labels[x] for x in model_list] - def _toggle_tables(self, value=None): + def _toggle_tables(self, value: List[int]) -> None: selected_tab = value[0] widgets = [ self.starter_pipelines, @@ -385,17 +371,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.display() def _get_model_labels(self) -> dict[str, str]: + """Return a list of trimmed labels for all models.""" window_width, window_height = get_terminal_size() checkbox_width = 4 spacing_width = 2 + result = {} models = self.all_models - label_width = max([len(models[x].name) for x in models]) + label_width = max([len(models[x].name or "") for x in self.starter_models]) description_width = window_width - label_width - checkbox_width - spacing_width - result = {} - for x in models.keys(): - description = models[x].description + for key in self.all_models: + description = models[key].description description = ( description[0 : description_width - 3] + "..." if description and len(description) > description_width @@ -403,7 +390,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if description else "" ) - result[x] = f"%-{label_width}s %s" % (models[x].name, description) + result[key] = f"%-{label_width}s %s" % (models[key].name, description) + return result def _get_columns(self) -> int: @@ -413,50 +401,40 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): def confirm_deletions(self, selections: InstallSelections) -> bool: remove_models = selections.remove_models - if len(remove_models) > 0: - mods = "\n".join([ModelManager.parse_key(x)[0] for x in remove_models]) - return npyscreen.notify_ok_cancel( + if remove_models: + model_names = [self.all_models[x].name or "" for x in remove_models] + mods = "\n".join(model_names) + is_ok = npyscreen.notify_ok_cancel( f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}" ) + assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations + return is_ok else: return True - def on_execute(self): - self.marshall_arguments() - app = self.parentApp - if not self.confirm_deletions(app.install_selections): - return + @property + def all_models(self) -> Dict[str, UnifiedModelInfo]: + # npyscreen doesn't having typing hints + return self.parentApp.install_helper.all_models # type: ignore - self.monitor.entry_widget.buffer(["Processing..."], scroll_end=True) - self.ok_button.hidden = True - self.display() + @property + def starter_models(self) -> List[str]: + return self.parentApp.install_helper._starter_models # type: ignore - # TO DO: Spawn a worker thread, not a subprocess - parent_conn, child_conn = Pipe() - p = Process( - target=process_and_execute, - kwargs={ - "opt": app.program_opts, - "selections": app.install_selections, - "conn_out": child_conn, - }, - ) - p.start() - child_conn.close() - self.subprocess_connection = parent_conn - self.subprocess = p - app.install_selections = InstallSelections() + @property + def installed_models(self) -> List[str]: + return self.parentApp.install_helper._installed_models # type: ignore - def on_back(self): + def on_back(self) -> None: self.parentApp.switchFormPrevious() self.editing = False - def on_cancel(self): + def on_cancel(self) -> None: self.parentApp.setNextForm(None) self.parentApp.user_cancelled = True self.editing = False - def on_done(self): + def on_done(self) -> None: self.marshall_arguments() if not self.confirm_deletions(self.parentApp.install_selections): return @@ -464,77 +442,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.parentApp.user_cancelled = False self.editing = False - ########## This routine monitors the child process that is performing model installation and removal ##### - def while_waiting(self): - """Called during idle periods. Main task is to update the Log Messages box with messages - from the child process that does the actual installation/removal""" - c = self.subprocess_connection - if not c: - return - - monitor_widget = self.monitor.entry_widget - while c.poll(): - try: - data = c.recv_bytes().decode("utf-8") - data.strip("\n") - - # processing child is requesting user input to select the - # right configuration file - if data.startswith("*need v2 config"): - _, model_path, *_ = data.split(":", 2) - self._return_v2_config(model_path) - - # processing child is done - elif data == "*done*": - self._close_subprocess_and_regenerate_form() - break - - # update the log message box - else: - data = make_printable(data) - data = data.replace("[A", "") - monitor_widget.buffer( - textwrap.wrap( - data, - width=monitor_widget.width, - subsequent_indent=" ", - ), - scroll_end=True, - ) - self.display() - except (EOFError, OSError): - self.subprocess_connection = None - - def _return_v2_config(self, model_path: str): - c = self.subprocess_connection - model_name = Path(model_path).name - message = select_stable_diffusion_config_file(model_name=model_name) - c.send_bytes(message.encode("utf-8")) - - def _close_subprocess_and_regenerate_form(self): - app = self.parentApp - self.subprocess_connection.close() - self.subprocess_connection = None - self.monitor.entry_widget.buffer(["** Action Complete **"]) - self.display() - - # rebuild the form, saving and restoring some of the fields that need to be preserved. - saved_messages = self.monitor.entry_widget.values - - app.main_form = app.addForm( - "MAIN", - addModelsForm, - name="Install Stable Diffusion Models", - multipage=self.multipage, - ) - app.switchForm("MAIN") - - app.main_form.monitor.entry_widget.values = saved_messages - app.main_form.monitor.entry_widget.buffer([""], scroll_end=True) - # app.main_form.pipeline_models['autoload_directory'].value = autoload_dir - # app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan - - def marshall_arguments(self): + def marshall_arguments(self) -> None: """ Assemble arguments and store as attributes of the application: .starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml @@ -547,7 +455,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): selections = self.parentApp.install_selections all_models = self.all_models - # Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove + # Defined models (in INITIAL_CONFIG.yaml or invokeai.db) to add/remove ui_sections = [ self.starter_pipelines, self.pipeline_models, @@ -564,46 +472,24 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): models_to_install = [x for x in selected if not self.all_models[x].installed] models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed] selections.remove_models.extend(models_to_remove) - selections.install_models.extend( - all_models[x].path or all_models[x].repo_id - for x in models_to_install - if all_models[x].path or all_models[x].repo_id - ) + selections.install_models.extend([all_models[x] for x in models_to_install]) # models located in the 'download_ids" section for section in ui_sections: if downloads := section.get("download_ids"): - selections.install_models.extend(downloads.value.split()) - - # NOT NEEDED - DONE IN BACKEND NOW - # # special case for the ipadapter_models. If any of the adapters are - # # chosen, then we add the corresponding encoder(s) to the install list. - # section = self.ipadapter_models - # if section.get("models_selected"): - # selected_adapters = [ - # self.all_models[section["models"][x]].name for x in section.get("models_selected").value - # ] - # encoders = [] - # if any(["sdxl" in x for x in selected_adapters]): - # encoders.append("ip_adapter_sdxl_image_encoder") - # if any(["sd15" in x for x in selected_adapters]): - # encoders.append("ip_adapter_sd_image_encoder") - # for encoder in encoders: - # key = f"any/clip_vision/{encoder}" - # repo_id = f"InvokeAI/{encoder}" - # if key not in self.all_models: - # selections.install_models.append(repo_id) + models = [UnifiedModelInfo(source=x) for x in downloads.value.split()] + selections.install_models.extend(models) -class AddModelApplication(npyscreen.NPSAppManaged): - def __init__(self, opt): +class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore + def __init__(self, opt: Namespace, install_helper: InstallHelper): super().__init__() self.program_opts = opt self.user_cancelled = False - # self.autoload_pending = True self.install_selections = InstallSelections() + self.install_helper = install_helper - def onStart(self): + def onStart(self) -> None: npyscreen.setTheme(npyscreen.Themes.DefaultTheme) self.main_form = self.addForm( "MAIN", @@ -613,138 +499,62 @@ class AddModelApplication(npyscreen.NPSAppManaged): ) -class StderrToMessage: - def __init__(self, connection: Connection): - self.connection = connection - - def write(self, data: str): - self.connection.send_bytes(data.encode("utf-8")) - - def flush(self): - pass +def list_models(installer: ModelInstallServiceBase, model_type: ModelType): + """Print out all models of type model_type.""" + models = installer.record_store.search_by_attr(model_type=model_type) + print(f"Installed models of type `{model_type}`:") + for model in models: + path = (config.models_path / model.path).resolve() + print(f"{model.name:40}{model.base.value:5}{model.type.value:8}{model.format.value:12}{path}") # -------------------------------------------------------- -def ask_user_for_prediction_type(model_path: Path, tui_conn: Connection = None) -> SchedulerPredictionType: - if tui_conn: - logger.debug("Waiting for user response...") - return _ask_user_for_pt_tui(model_path, tui_conn) - else: - return _ask_user_for_pt_cmdline(model_path) - - -def _ask_user_for_pt_cmdline(model_path: Path) -> Optional[SchedulerPredictionType]: - choices = [SchedulerPredictionType.Epsilon, SchedulerPredictionType.VPrediction, None] - print( - f""" -Please select the scheduler prediction type of the checkpoint named {model_path.name}: -[1] "epsilon" - most v1.5 models and v2 models trained on 512 pixel images -[2] "vprediction" - v2 models trained on 768 pixel images and a few v1.5 models -[3] Accept the best guess; you can fix it in the Web UI later -""" - ) - choice = None - ok = False - while not ok: - try: - choice = input("select [3]> ").strip() - if not choice: - return None - choice = choices[int(choice) - 1] - ok = True - except (ValueError, IndexError): - print(f"{choice} is not a valid choice") - except EOFError: - return - return choice - - -def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPredictionType: - tui_conn.send_bytes(f"*need v2 config for:{model_path}".encode("utf-8")) - # note that we don't do any status checking here - response = tui_conn.recv_bytes().decode("utf-8") - if response is None: - return None - elif response == "epsilon": - return SchedulerPredictionType.epsilon - elif response == "v": - return SchedulerPredictionType.VPrediction - elif response == "guess": - return None - else: - return None - - -# -------------------------------------------------------- -def process_and_execute( - opt: Namespace, - selections: InstallSelections, - conn_out: Connection = None, -): - # need to reinitialize config in subprocess - config = InvokeAIAppConfig.get_config() - args = ["--root", opt.root] if opt.root else [] - config.parse_args(args) - - # set up so that stderr is sent to conn_out - if conn_out: - translator = StderrToMessage(conn_out) - sys.stderr = translator - sys.stdout = translator - logger = InvokeAILogger.get_logger() - logger.handlers.clear() - logger.addHandler(logging.StreamHandler(translator)) - - installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out)) - installer.install(selections) - - if conn_out: - conn_out.send_bytes("*done*".encode("utf-8")) - conn_out.close() - - -# -------------------------------------------------------- -def select_and_download_models(opt: Namespace): +def select_and_download_models(opt: Namespace) -> None: + """Prompt user for install/delete selections and execute.""" precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) + # unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal config.precision = precision - installer = ModelInstall(config, prediction_type_helper=ask_user_for_prediction_type) + install_helper = InstallHelper(config, logger) + installer = install_helper.installer + if opt.list_models: - installer.list_models(opt.list_models) + list_models(installer, opt.list_models) + elif opt.add or opt.delete: - selections = InstallSelections(install_models=opt.add or [], remove_models=opt.delete or []) - installer.install(selections) + selections = InstallSelections( + install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or [] + ) + install_helper.add_or_delete(selections) + elif opt.default_only: - selections = InstallSelections(install_models=installer.default_model()) - installer.install(selections) + default_model = install_helper.default_model() + assert default_model is not None + selections = InstallSelections(install_models=[default_model]) + install_helper.add_or_delete(selections) + elif opt.yes_to_all: - selections = InstallSelections(install_models=installer.recommended_models()) - installer.install(selections) + selections = InstallSelections(install_models=install_helper.recommended_models()) + install_helper.add_or_delete(selections) # this is where the TUI is called else: - # needed to support the probe() method running under a subprocess - torch.multiprocessing.set_start_method("spawn") - if not set_min_terminal_size(MIN_COLS, MIN_LINES): raise WindowTooSmallException( "Could not increase terminal size. Try running again with a larger window or smaller font size." ) - installApp = AddModelApplication(opt) + installApp = AddModelApplication(opt, install_helper) try: installApp.run() - except KeyboardInterrupt as e: - if hasattr(installApp, "main_form"): - if installApp.main_form.subprocess and installApp.main_form.subprocess.is_alive(): - logger.info("Terminating subprocesses") - installApp.main_form.subprocess.terminate() - installApp.main_form.subprocess = None - raise e - process_and_execute(opt, installApp.install_selections) + except KeyboardInterrupt: + print("Aborted...") + sys.exit(-1) + + install_helper.add_or_delete(installApp.install_selections) # ------------------------------------- -def main(): +def main() -> None: parser = argparse.ArgumentParser(description="InvokeAI model downloader") parser.add_argument( "--add", @@ -754,7 +564,7 @@ def main(): parser.add_argument( "--delete", nargs="*", - help="List of names of models to idelete", + help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`", ) parser.add_argument( "--full-precision", @@ -781,14 +591,6 @@ def main(): choices=[x.value for x in ModelType], help="list installed models", ) - parser.add_argument( - "--config_file", - "-c", - dest="config_file", - type=str, - default=None, - help="path to configuration file to create", - ) parser.add_argument( "--root_dir", dest="root", diff --git a/invokeai/frontend/install/model_install2.py b/invokeai/frontend/install/model_install2.py deleted file mode 100644 index 6eb480c8d9..0000000000 --- a/invokeai/frontend/install/model_install2.py +++ /dev/null @@ -1,645 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) -# Before running stable-diffusion on an internet-isolated machine, -# run this script from one with internet connectivity. The -# two machines must share a common .cache directory. - -""" -This is the npyscreen frontend to the model installation application. -It is currently named model_install2.py, but will ultimately replace model_install.py. -""" - -import argparse -import curses -import sys -import traceback -import warnings -from argparse import Namespace -from shutil import get_terminal_size -from typing import Any, Dict, List, Optional, Set - -import npyscreen -import torch -from npyscreen import widget - -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.app.services.model_install import ModelInstallService -from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo -from invokeai.backend.model_manager import ModelType -from invokeai.backend.util import choose_precision, choose_torch_device -from invokeai.backend.util.logging import InvokeAILogger -from invokeai.frontend.install.widgets import ( - MIN_COLS, - MIN_LINES, - CenteredTitleText, - CyclingForm, - MultiSelectColumns, - SingleSelectColumns, - TextBox, - WindowTooSmallException, - set_min_terminal_size, -) - -warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402 -config = InvokeAIAppConfig.get_config() -logger = InvokeAILogger.get_logger("ModelInstallService") -logger.setLevel("WARNING") -# logger.setLevel('DEBUG') - -# build a table mapping all non-printable characters to None -# for stripping control characters -# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python -NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()} - -# maximum number of installed models we can display before overflowing vertically -MAX_OTHER_MODELS = 72 - - -def make_printable(s: str) -> str: - """Replace non-printable characters in a string.""" - return s.translate(NOPRINT_TRANS_TABLE) - - -class addModelsForm(CyclingForm, npyscreen.FormMultiPage): - """Main form for interactive TUI.""" - - # for responsive resizing set to False, but this seems to cause a crash! - FIX_MINIMUM_SIZE_WHEN_CREATED = True - - # for persistence - current_tab = 0 - - def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any): - self.multipage = multipage - self.subprocess = None - super().__init__(parentApp=parentApp, name=name, **keywords) - - def create(self) -> None: - self.installer = self.parentApp.install_helper.installer - self.model_labels = self._get_model_labels() - self.keypress_timeout = 10 - self.counter = 0 - self.subprocess_connection = None - - window_width, window_height = get_terminal_size() - - # npyscreen has no typing hints - self.nextrely -= 1 # type: ignore - self.add_widget_intelligent( - npyscreen.FixedText, - value="Use ctrl-N and ctrl-P to move to the ext and

revious fields. Cursor keys navigate, and selects.", - editable=False, - color="CAUTION", - ) - self.nextrely += 1 # type: ignore - self.tabs = self.add_widget_intelligent( - SingleSelectColumns, - values=[ - "STARTERS", - "MAINS", - "CONTROLNETS", - "T2I-ADAPTERS", - "IP-ADAPTERS", - "LORAS", - "TI EMBEDDINGS", - ], - value=[self.current_tab], - columns=7, - max_height=2, - relx=8, - scroll_exit=True, - ) - self.tabs.on_changed = self._toggle_tables - - top_of_table = self.nextrely # type: ignore - self.starter_pipelines = self.add_starter_pipelines() - bottom_of_table = self.nextrely # type: ignore - - self.nextrely = top_of_table - self.pipeline_models = self.add_pipeline_widgets( - model_type=ModelType.Main, window_width=window_width, exclude=self.starter_models - ) - # self.pipeline_models['autoload_pending'] = True - bottom_of_table = max(bottom_of_table, self.nextrely) - - self.nextrely = top_of_table - self.controlnet_models = self.add_model_widgets( - model_type=ModelType.ControlNet, - window_width=window_width, - ) - bottom_of_table = max(bottom_of_table, self.nextrely) - - self.nextrely = top_of_table - self.t2i_models = self.add_model_widgets( - model_type=ModelType.T2IAdapter, - window_width=window_width, - ) - bottom_of_table = max(bottom_of_table, self.nextrely) - self.nextrely = top_of_table - self.ipadapter_models = self.add_model_widgets( - model_type=ModelType.IPAdapter, - window_width=window_width, - ) - bottom_of_table = max(bottom_of_table, self.nextrely) - - self.nextrely = top_of_table - self.lora_models = self.add_model_widgets( - model_type=ModelType.Lora, - window_width=window_width, - ) - bottom_of_table = max(bottom_of_table, self.nextrely) - - self.nextrely = top_of_table - self.ti_models = self.add_model_widgets( - model_type=ModelType.TextualInversion, - window_width=window_width, - ) - bottom_of_table = max(bottom_of_table, self.nextrely) - - self.nextrely = bottom_of_table + 1 - - self.nextrely += 1 - back_label = "BACK" - cancel_label = "CANCEL" - current_position = self.nextrely - if self.multipage: - self.back_button = self.add_widget_intelligent( - npyscreen.ButtonPress, - name=back_label, - when_pressed_function=self.on_back, - ) - else: - self.nextrely = current_position - self.cancel_button = self.add_widget_intelligent( - npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel - ) - self.nextrely = current_position - - label = "APPLY CHANGES" - self.nextrely = current_position - self.done = self.add_widget_intelligent( - npyscreen.ButtonPress, - name=label, - relx=window_width - len(label) - 15, - when_pressed_function=self.on_done, - ) - - # This restores the selected page on return from an installation - for _i in range(1, self.current_tab + 1): - self.tabs.h_cursor_line_down(1) - self._toggle_tables([self.current_tab]) - - ############# diffusers tab ########## - def add_starter_pipelines(self) -> dict[str, npyscreen.widget]: - """Add widgets responsible for selecting diffusers models""" - widgets: Dict[str, npyscreen.widget] = {} - - all_models = self.all_models # master dict of all models, indexed by key - model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]] - model_labels = [self.model_labels[x] for x in model_list] - - widgets.update( - label1=self.add_widget_intelligent( - CenteredTitleText, - name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.", - editable=False, - labelColor="CAUTION", - ) - ) - - self.nextrely -= 1 - # if user has already installed some initial models, then don't patronize them - # by showing more recommendations - show_recommended = len(self.installed_models) == 0 - - checked = [ - model_list.index(x) - for x in model_list - if (show_recommended and all_models[x].recommended) or all_models[x].installed - ] - widgets.update( - models_selected=self.add_widget_intelligent( - MultiSelectColumns, - columns=1, - name="Install Starter Models", - values=model_labels, - value=checked, - max_height=len(model_list) + 1, - relx=4, - scroll_exit=True, - ), - models=model_list, - ) - - self.nextrely += 1 - return widgets - - ############# Add a set of model install widgets ######## - def add_model_widgets( - self, - model_type: ModelType, - window_width: int = 120, - install_prompt: Optional[str] = None, - exclude: Optional[Set[str]] = None, - ) -> dict[str, npyscreen.widget]: - """Generic code to create model selection widgets""" - if exclude is None: - exclude = set() - widgets: Dict[str, npyscreen.widget] = {} - all_models = self.all_models - model_list = sorted( - [x for x in all_models if all_models[x].type == model_type and x not in exclude], - key=lambda x: all_models[x].name or "", - ) - model_labels = [self.model_labels[x] for x in model_list] - - show_recommended = len(self.installed_models) == 0 - truncated = False - if len(model_list) > 0: - max_width = max([len(x) for x in model_labels]) - columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding - columns = min(len(model_list), columns) or 1 - prompt = ( - install_prompt - or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk." - ) - - widgets.update( - label1=self.add_widget_intelligent( - CenteredTitleText, - name=prompt, - editable=False, - labelColor="CAUTION", - ) - ) - - if len(model_labels) > MAX_OTHER_MODELS: - model_labels = model_labels[0:MAX_OTHER_MODELS] - truncated = True - - widgets.update( - models_selected=self.add_widget_intelligent( - MultiSelectColumns, - columns=columns, - name=f"Install {model_type} Models", - values=model_labels, - value=[ - model_list.index(x) - for x in model_list - if (show_recommended and all_models[x].recommended) or all_models[x].installed - ], - max_height=len(model_list) // columns + 1, - relx=4, - scroll_exit=True, - ), - models=model_list, - ) - - if truncated: - widgets.update( - warning_message=self.add_widget_intelligent( - npyscreen.FixedText, - value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.", - editable=False, - color="CAUTION", - ) - ) - - self.nextrely += 1 - widgets.update( - download_ids=self.add_widget_intelligent( - TextBox, - name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):", - max_height=6, - scroll_exit=True, - editable=True, - ) - ) - return widgets - - ### Tab for arbitrary diffusers widgets ### - def add_pipeline_widgets( - self, - model_type: ModelType = ModelType.Main, - window_width: int = 120, - **kwargs, - ) -> dict[str, npyscreen.widget]: - """Similar to add_model_widgets() but adds some additional widgets at the bottom - to support the autoload directory""" - widgets = self.add_model_widgets( - model_type=model_type, - window_width=window_width, - install_prompt=f"Installed {model_type.value.title()} models. Unchecked models in the InvokeAI root directory will be deleted. Enter URLs, paths or repo_ids to import.", - **kwargs, - ) - - return widgets - - def resize(self) -> None: - super().resize() - if s := self.starter_pipelines.get("models_selected"): - if model_list := self.starter_pipelines.get("models"): - s.values = [self.model_labels[x] for x in model_list] - - def _toggle_tables(self, value: List[int]) -> None: - selected_tab = value[0] - widgets = [ - self.starter_pipelines, - self.pipeline_models, - self.controlnet_models, - self.t2i_models, - self.ipadapter_models, - self.lora_models, - self.ti_models, - ] - - for group in widgets: - for _k, v in group.items(): - try: - v.hidden = True - v.editable = False - except Exception: - pass - for _k, v in widgets[selected_tab].items(): - try: - v.hidden = False - if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)): - v.editable = True - except Exception: - pass - self.__class__.current_tab = selected_tab # for persistence - self.display() - - def _get_model_labels(self) -> dict[str, str]: - """Return a list of trimmed labels for all models.""" - window_width, window_height = get_terminal_size() - checkbox_width = 4 - spacing_width = 2 - result = {} - - models = self.all_models - label_width = max([len(models[x].name or "") for x in self.starter_models]) - description_width = window_width - label_width - checkbox_width - spacing_width - - for key in self.all_models: - description = models[key].description - description = ( - description[0 : description_width - 3] + "..." - if description and len(description) > description_width - else description - if description - else "" - ) - result[key] = f"%-{label_width}s %s" % (models[key].name, description) - - return result - - def _get_columns(self) -> int: - window_width, window_height = get_terminal_size() - cols = 4 if window_width > 240 else 3 if window_width > 160 else 2 if window_width > 80 else 1 - return min(cols, len(self.installed_models)) - - def confirm_deletions(self, selections: InstallSelections) -> bool: - remove_models = selections.remove_models - if remove_models: - model_names = [self.all_models[x].name or "" for x in remove_models] - mods = "\n".join(model_names) - is_ok = npyscreen.notify_ok_cancel( - f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}" - ) - assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations - return is_ok - else: - return True - - @property - def all_models(self) -> Dict[str, UnifiedModelInfo]: - # npyscreen doesn't having typing hints - return self.parentApp.install_helper.all_models # type: ignore - - @property - def starter_models(self) -> List[str]: - return self.parentApp.install_helper._starter_models # type: ignore - - @property - def installed_models(self) -> List[str]: - return self.parentApp.install_helper._installed_models # type: ignore - - def on_back(self) -> None: - self.parentApp.switchFormPrevious() - self.editing = False - - def on_cancel(self) -> None: - self.parentApp.setNextForm(None) - self.parentApp.user_cancelled = True - self.editing = False - - def on_done(self) -> None: - self.marshall_arguments() - if not self.confirm_deletions(self.parentApp.install_selections): - return - self.parentApp.setNextForm(None) - self.parentApp.user_cancelled = False - self.editing = False - - def marshall_arguments(self) -> None: - """ - Assemble arguments and store as attributes of the application: - .starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml - True => Install - False => Remove - .scan_directory: Path to a directory of models to scan and import - .autoscan_on_startup: True if invokeai should scan and import at startup time - .import_model_paths: list of URLs, repo_ids and file paths to import - """ - selections = self.parentApp.install_selections - all_models = self.all_models - - # Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove - ui_sections = [ - self.starter_pipelines, - self.pipeline_models, - self.controlnet_models, - self.t2i_models, - self.ipadapter_models, - self.lora_models, - self.ti_models, - ] - for section in ui_sections: - if "models_selected" not in section: - continue - selected = {section["models"][x] for x in section["models_selected"].value} - models_to_install = [x for x in selected if not self.all_models[x].installed] - models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed] - selections.remove_models.extend(models_to_remove) - selections.install_models.extend([all_models[x] for x in models_to_install]) - - # models located in the 'download_ids" section - for section in ui_sections: - if downloads := section.get("download_ids"): - models = [UnifiedModelInfo(source=x) for x in downloads.value.split()] - selections.install_models.extend(models) - - -class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore - def __init__(self, opt: Namespace, install_helper: InstallHelper): - super().__init__() - self.program_opts = opt - self.user_cancelled = False - self.install_selections = InstallSelections() - self.install_helper = install_helper - - def onStart(self) -> None: - npyscreen.setTheme(npyscreen.Themes.DefaultTheme) - self.main_form = self.addForm( - "MAIN", - addModelsForm, - name="Install Stable Diffusion Models", - cycle_widgets=False, - ) - - -def list_models(installer: ModelInstallService, model_type: ModelType): - """Print out all models of type model_type.""" - models = installer.record_store.search_by_attr(model_type=model_type) - print(f"Installed models of type `{model_type}`:") - for model in models: - path = (config.models_path / model.path).resolve() - print(f"{model.name:40}{model.base.value:14}{path}") - - -# -------------------------------------------------------- -def select_and_download_models(opt: Namespace) -> None: - """Prompt user for install/delete selections and execute.""" - precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())) - # unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal - config.precision = precision # type: ignore - install_helper = InstallHelper(config, logger) - installer = install_helper.installer - - if opt.list_models: - list_models(installer, opt.list_models) - - elif opt.add or opt.delete: - selections = InstallSelections( - install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or [] - ) - install_helper.add_or_delete(selections) - - elif opt.default_only: - selections = InstallSelections(install_models=[install_helper.default_model()]) - install_helper.add_or_delete(selections) - - elif opt.yes_to_all: - selections = InstallSelections(install_models=install_helper.recommended_models()) - install_helper.add_or_delete(selections) - - # this is where the TUI is called - else: - if not set_min_terminal_size(MIN_COLS, MIN_LINES): - raise WindowTooSmallException( - "Could not increase terminal size. Try running again with a larger window or smaller font size." - ) - - installApp = AddModelApplication(opt, install_helper) - try: - installApp.run() - except KeyboardInterrupt: - print("Aborted...") - sys.exit(-1) - - install_helper.add_or_delete(installApp.install_selections) - - -# ------------------------------------- -def main() -> None: - parser = argparse.ArgumentParser(description="InvokeAI model downloader") - parser.add_argument( - "--add", - nargs="*", - help="List of URLs, local paths or repo_ids of models to install", - ) - parser.add_argument( - "--delete", - nargs="*", - help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`", - ) - parser.add_argument( - "--full-precision", - dest="full_precision", - action=argparse.BooleanOptionalAction, - type=bool, - default=False, - help="use 32-bit weights instead of faster 16-bit weights", - ) - parser.add_argument( - "--yes", - "-y", - dest="yes_to_all", - action="store_true", - help='answer "yes" to all prompts', - ) - parser.add_argument( - "--default_only", - action="store_true", - help="Only install the default model", - ) - parser.add_argument( - "--list-models", - choices=[x.value for x in ModelType], - help="list installed models", - ) - parser.add_argument( - "--root_dir", - dest="root", - type=str, - default=None, - help="path to root of install directory", - ) - opt = parser.parse_args() - - invoke_args = [] - if opt.root: - invoke_args.extend(["--root", opt.root]) - if opt.full_precision: - invoke_args.extend(["--precision", "float32"]) - config.parse_args(invoke_args) - logger = InvokeAILogger().get_logger(config=config) - - if not config.model_conf_path.exists(): - logger.info("Your InvokeAI root directory is not set up. Calling invokeai-configure.") - from invokeai.frontend.install.invokeai_configure import invokeai_configure - - invokeai_configure() - sys.exit(0) - - try: - select_and_download_models(opt) - except AssertionError as e: - logger.error(e) - sys.exit(-1) - except KeyboardInterrupt: - curses.nocbreak() - curses.echo() - curses.endwin() - logger.info("Goodbye! Come back soon.") - except WindowTooSmallException as e: - logger.error(str(e)) - except widget.NotEnoughSpaceForWidget as e: - if str(e).startswith("Height of 1 allocated"): - logger.error("Insufficient vertical space for the interface. Please make your window taller and try again") - input("Press any key to continue...") - except Exception as e: - if str(e).startswith("addwstr"): - logger.error( - "Insufficient horizontal space for the interface. Please make your window wider and try again." - ) - else: - print(f"An exception has occurred: {str(e)} Details:") - print(traceback.format_exc(), file=sys.stderr) - input("Press any key to continue...") - - -# ------------------------------------- -if __name__ == "__main__": - main() diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py index 5905ae29da..49ca1e3583 100644 --- a/invokeai/frontend/install/widgets.py +++ b/invokeai/frontend/install/widgets.py @@ -1,6 +1,7 @@ """ Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py """ + import curses import math import os @@ -267,6 +268,17 @@ class SingleSelectWithChanged(npyscreen.SelectOne): self.on_changed(self.value) +class CheckboxWithChanged(npyscreen.Checkbox): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.on_changed = None + + def whenToggled(self): + super().whenToggled() + if self.on_changed: + self.on_changed(self.value) + + class SingleSelectColumnsSimple(SelectColumnBase, SingleSelectWithChanged): """Row of radio buttons. Spacebar to select.""" diff --git a/invokeai/frontend/merge/__init__.py b/invokeai/frontend/merge/__init__.py index a18da9c0d4..4e56b146f4 100644 --- a/invokeai/frontend/merge/__init__.py +++ b/invokeai/frontend/merge/__init__.py @@ -1,4 +1,5 @@ """ Initialization file for invokeai.frontend.merge """ + from .merge_diffusers import main as invokeai_merge_diffusers # noqa: F401 diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index 92b98b52f9..78b5df859b 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -4,22 +4,43 @@ used to merge 2-3 models together and create a new InvokeAI-registered diffusion Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team """ + import argparse import curses +import re import sys from argparse import Namespace from pathlib import Path -from typing import List +from typing import List, Optional, Tuple import npyscreen from npyscreen import widget -import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_management import BaseModelType, ModelManager, ModelMerger, ModelType +from invokeai.app.services.download import DownloadQueueService +from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage +from invokeai.app.services.model_install import ModelInstallService +from invokeai.app.services.model_metadata import ModelMetadataStoreSQL +from invokeai.app.services.model_records import ModelRecordServiceBase, ModelRecordServiceSQL +from invokeai.app.services.shared.sqlite.sqlite_util import init_db +from invokeai.backend.model_manager import ( + BaseModelType, + ModelFormat, + ModelType, + ModelVariantType, +) +from invokeai.backend.model_manager.merge import ModelMerger +from invokeai.backend.util.logging import InvokeAILogger from invokeai.frontend.install.widgets import FloatTitleSlider, SingleSelectColumns, TextBox config = InvokeAIAppConfig.get_config() +logger = InvokeAILogger.get_logger() + +BASE_TYPES = [ + (BaseModelType.StableDiffusion1, "Models Built on SD-1.x"), + (BaseModelType.StableDiffusion2, "Models Built on SD-2.x"), + (BaseModelType.StableDiffusionXL, "Models Built on SDXL"), +] def _parse_args() -> Namespace: @@ -48,7 +69,7 @@ def _parse_args() -> Namespace: parser.add_argument( "--base_model", type=str, - choices=[x.value for x in BaseModelType], + choices=[x[0].value for x in BASE_TYPES], help="The base model shared by the models to be merged", ) parser.add_argument( @@ -98,17 +119,17 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): super().__init__(parentApp, name) @property - def model_manager(self): - return self.parentApp.model_manager + def record_store(self): + return self.parentApp.record_store def afterEditing(self): self.parentApp.setNextForm(None) def create(self): window_height, window_width = curses.initscr().getmaxyx() - - self.model_names = self.get_model_names() self.current_base = 0 + self.models = self.get_models(BASE_TYPES[self.current_base][0]) + self.model_names = [x[1] for x in self.models] max_width = max([len(x) for x in self.model_names]) max_width += 6 horizontal_layout = max_width * 3 < window_width @@ -128,11 +149,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): self.nextrely += 1 self.base_select = self.add_widget_intelligent( SingleSelectColumns, - values=[ - "Models Built on SD-1.x", - "Models Built on SD-2.x", - "Models Built on SDXL", - ], + values=[x[1] for x in BASE_TYPES], value=[self.current_base], columns=4, max_height=2, @@ -263,21 +280,20 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): sys.exit(0) def marshall_arguments(self) -> dict: - model_names = self.model_names + model_keys = [x[0] for x in self.models] models = [ - model_names[self.model1.value[0]], - model_names[self.model2.value[0]], + model_keys[self.model1.value[0]], + model_keys[self.model2.value[0]], ] if self.model3.value[0] > 0: - models.append(model_names[self.model3.value[0] - 1]) + models.append(model_keys[self.model3.value[0] - 1]) interp = "add_difference" else: interp = self.interpolations[self.merge_method.value[0]] - bases = ["sd-1", "sd-2", "sdxl"] args = { - "model_names": models, - "base_model": BaseModelType(bases[self.base_select.value[0]]), + "model_keys": models, + "base_model": tuple(BaseModelType)[self.base_select.value[0]], "alpha": self.alpha.value, "interp": interp, "force": self.force.value, @@ -311,18 +327,18 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): else: return True - def get_model_names(self, base_model: BaseModelType = BaseModelType.StableDiffusion1) -> List[str]: - model_names = [ - info["model_name"] - for info in self.model_manager.list_models(model_type=ModelType.Main, base_model=base_model) - if info["model_format"] == "diffusers" + def get_models(self, base_model: Optional[BaseModelType] = None) -> List[Tuple[str, str]]: # key to name + models = [ + (x.key, x.name) + for x in self.record_store.search_by_attr(model_type=ModelType.Main, base_model=base_model) + if x.format == ModelFormat("diffusers") and x.variant == ModelVariantType("normal") ] - return sorted(model_names) + return sorted(models, key=lambda x: x[1]) - def _populate_models(self, value=None): - bases = ["sd-1", "sd-2", "sdxl"] - base_model = BaseModelType(bases[value[0]]) - self.model_names = self.get_model_names(base_model) + def _populate_models(self, value: List[int]): + base_model = BASE_TYPES[value[0]][0] + self.models = self.get_models(base_model) + self.model_names = [x[1] for x in self.models] models_plus_none = self.model_names.copy() models_plus_none.insert(0, "None") @@ -334,24 +350,24 @@ class mergeModelsForm(npyscreen.FormMultiPageAction): class Mergeapp(npyscreen.NPSAppManaged): - def __init__(self, model_manager: ModelManager): + def __init__(self, record_store: ModelRecordServiceBase): super().__init__() - self.model_manager = model_manager + self.record_store = record_store def onStart(self): npyscreen.setTheme(npyscreen.Themes.ElegantTheme) self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings") -def run_gui(args: Namespace): - model_manager = ModelManager(config.model_conf_path) - mergeapp = Mergeapp(model_manager) +def run_gui(args: Namespace) -> None: + record_store: ModelRecordServiceBase = get_config_store() + mergeapp = Mergeapp(record_store) mergeapp.run() - args = mergeapp.merge_arguments - merger = ModelMerger(model_manager) + merger = get_model_merger(record_store) merger.merge_diffusion_models_and_save(**args) - logger.info(f'Models merged into new model: "{args["merged_model_name"]}".') + merged_model_name = args["merged_model_name"] + logger.info(f'Models merged into new model: "{merged_model_name}".') def run_cli(args: Namespace): @@ -364,20 +380,54 @@ def run_cli(args: Namespace): args.merged_model_name = "+".join(args.model_names) logger.info(f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"') - model_manager = ModelManager(config.model_conf_path) + record_store: ModelRecordServiceBase = get_config_store() assert ( - not model_manager.model_exists(args.merged_model_name, args.base_model, ModelType.Main) or args.clobber + len(record_store.search_by_attr(args.merged_model_name, args.base_model, ModelType.Main)) == 0 or args.clobber ), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.' - merger = ModelMerger(model_manager) - merger.merge_diffusion_models_and_save(**vars(args)) + merger = get_model_merger(record_store) + model_keys = [] + for name in args.model_names: + if len(name) == 32 and re.match(r"^[0-9a-f]$", name): + model_keys.append(name) + else: + models = record_store.search_by_attr( + model_name=name, model_type=ModelType.Main, base_model=BaseModelType(args.base_model) + ) + assert len(models) > 0, f"{name}: Unknown model" + assert len(models) < 2, f"{name}: More than one model by this name. Please specify the model key instead." + model_keys.append(models[0].key) + + merger.merge_diffusion_models_and_save( + alpha=args.alpha, + model_keys=model_keys, + merged_model_name=args.merged_model_name, + interp=args.interp, + force=args.force, + ) logger.info(f'Models merged into new model: "{args.merged_model_name}".') +def get_config_store() -> ModelRecordServiceSQL: + output_path = config.output_path + assert output_path is not None + image_files = DiskImageFileStorage(output_path / "images") + db = init_db(config=config, logger=InvokeAILogger.get_logger(), image_files=image_files) + return ModelRecordServiceSQL(db, ModelMetadataStoreSQL(db)) + + +def get_model_merger(record_store: ModelRecordServiceBase) -> ModelMerger: + installer = ModelInstallService(app_config=config, record_store=record_store, download_queue=DownloadQueueService()) + installer.start() + return ModelMerger(installer) + + def main(): args = _parse_args() if args.root_dir: config.parse_args(["--root", str(args.root_dir)]) + else: + config.parse_args([]) try: if args.front_end: @@ -386,7 +436,7 @@ def main(): run_cli(args) except widget.NotEnoughSpaceForWidget as e: if str(e).startswith("Height of 1 allocated"): - logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge") + logger.error("You need to have at least two diffusers models in order to merge") else: logger.error("Not enough room for the user interface. Try making this window larger.") sys.exit(-1) diff --git a/invokeai/frontend/merge/merge_diffusers2.py b/invokeai/frontend/merge/merge_diffusers2.py deleted file mode 100644 index b365198f87..0000000000 --- a/invokeai/frontend/merge/merge_diffusers2.py +++ /dev/null @@ -1,438 +0,0 @@ -""" -invokeai.frontend.merge exports a single function called merge_diffusion_models(). - -It merges 2-3 models together and create a new InvokeAI-registered diffusion model. - -Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team -""" -import argparse -import curses -import re -import sys -from argparse import Namespace -from pathlib import Path -from typing import List, Optional, Tuple - -import npyscreen -from npyscreen import widget - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.app.services.model_install import ModelInstallServiceBase -from invokeai.app.services.model_records import ModelRecordServiceBase -from invokeai.backend.install.install_helper import initialize_installer -from invokeai.backend.model_manager import ( - BaseModelType, - ModelFormat, - ModelType, - ModelVariantType, -) -from invokeai.backend.model_manager.merge import ModelMerger -from invokeai.frontend.install.widgets import FloatTitleSlider, SingleSelectColumns, TextBox - -config = InvokeAIAppConfig.get_config() - -BASE_TYPES = [ - (BaseModelType.StableDiffusion1, "Models Built on SD-1.x"), - (BaseModelType.StableDiffusion2, "Models Built on SD-2.x"), - (BaseModelType.StableDiffusionXL, "Models Built on SDXL"), -] - - -def _parse_args() -> Namespace: - parser = argparse.ArgumentParser(description="InvokeAI model merging") - parser.add_argument( - "--root_dir", - type=Path, - default=config.root, - help="Path to the invokeai runtime directory", - ) - parser.add_argument( - "--front_end", - "--gui", - dest="front_end", - action="store_true", - default=False, - help="Activate the text-based graphical front end for collecting parameters. Aside from --root_dir, other parameters will be ignored.", - ) - parser.add_argument( - "--models", - dest="model_names", - type=str, - nargs="+", - help="Two to three model names to be merged", - ) - parser.add_argument( - "--base_model", - type=str, - choices=[x[0].value for x in BASE_TYPES], - help="The base model shared by the models to be merged", - ) - parser.add_argument( - "--merged_model_name", - "--destination", - dest="merged_model_name", - type=str, - help="Name of the output model. If not specified, will be the concatenation of the input model names.", - ) - parser.add_argument( - "--alpha", - type=float, - default=0.5, - help="The interpolation parameter, ranging from 0 to 1. It affects the ratio in which the checkpoints are merged. Higher values give more weight to the 2d and 3d models", - ) - parser.add_argument( - "--interpolation", - dest="interp", - type=str, - choices=["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"], - default="weighted_sum", - help='Interpolation method to use. If three models are present, only "add_difference" will work.', - ) - parser.add_argument( - "--force", - action="store_true", - help="Try to merge models even if they are incompatible with each other", - ) - parser.add_argument( - "--clobber", - "--overwrite", - dest="clobber", - action="store_true", - help="Overwrite the merged model if --merged_model_name already exists", - ) - return parser.parse_args() - - -# ------------------------- GUI HERE ------------------------- -class mergeModelsForm(npyscreen.FormMultiPageAction): - interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid"] - - def __init__(self, parentApp, name): - self.parentApp = parentApp - self.ALLOW_RESIZE = True - self.FIX_MINIMUM_SIZE_WHEN_CREATED = False - super().__init__(parentApp, name) - - @property - def model_record_store(self) -> ModelRecordServiceBase: - installer: ModelInstallServiceBase = self.parentApp.installer - return installer.record_store - - def afterEditing(self) -> None: - self.parentApp.setNextForm(None) - - def create(self) -> None: - window_height, window_width = curses.initscr().getmaxyx() - self.current_base = 0 - self.models = self.get_models(BASE_TYPES[self.current_base][0]) - self.model_names = [x[1] for x in self.models] - max_width = max([len(x) for x in self.model_names]) - max_width += 6 - horizontal_layout = max_width * 3 < window_width - - self.add_widget_intelligent( - npyscreen.FixedText, - color="CONTROL", - value="Select two models to merge and optionally a third.", - editable=False, - ) - self.add_widget_intelligent( - npyscreen.FixedText, - color="CONTROL", - value="Use up and down arrows to move, to select an item, and to move from one field to the next.", - editable=False, - ) - self.nextrely += 1 - self.base_select = self.add_widget_intelligent( - SingleSelectColumns, - values=[x[1] for x in BASE_TYPES], - value=[self.current_base], - columns=4, - max_height=2, - relx=8, - scroll_exit=True, - ) - self.base_select.on_changed = self._populate_models - self.add_widget_intelligent( - npyscreen.FixedText, - value="MODEL 1", - color="GOOD", - editable=False, - rely=6 if horizontal_layout else None, - ) - self.model1 = self.add_widget_intelligent( - npyscreen.SelectOne, - values=self.model_names, - value=0, - max_height=len(self.model_names), - max_width=max_width, - scroll_exit=True, - rely=7, - ) - self.add_widget_intelligent( - npyscreen.FixedText, - value="MODEL 2", - color="GOOD", - editable=False, - relx=max_width + 3 if horizontal_layout else None, - rely=6 if horizontal_layout else None, - ) - self.model2 = self.add_widget_intelligent( - npyscreen.SelectOne, - name="(2)", - values=self.model_names, - value=1, - max_height=len(self.model_names), - max_width=max_width, - relx=max_width + 3 if horizontal_layout else None, - rely=7 if horizontal_layout else None, - scroll_exit=True, - ) - self.add_widget_intelligent( - npyscreen.FixedText, - value="MODEL 3", - color="GOOD", - editable=False, - relx=max_width * 2 + 3 if horizontal_layout else None, - rely=6 if horizontal_layout else None, - ) - models_plus_none = self.model_names.copy() - models_plus_none.insert(0, "None") - self.model3 = self.add_widget_intelligent( - npyscreen.SelectOne, - name="(3)", - values=models_plus_none, - value=0, - max_height=len(self.model_names) + 1, - max_width=max_width, - scroll_exit=True, - relx=max_width * 2 + 3 if horizontal_layout else None, - rely=7 if horizontal_layout else None, - ) - for m in [self.model1, self.model2, self.model3]: - m.when_value_edited = self.models_changed - self.merged_model_name = self.add_widget_intelligent( - TextBox, - name="Name for merged model:", - labelColor="CONTROL", - max_height=3, - value="", - scroll_exit=True, - ) - self.force = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Force merge of models created by different diffusers library versions", - labelColor="CONTROL", - value=True, - scroll_exit=True, - ) - self.nextrely += 1 - self.merge_method = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="Merge Method:", - values=self.interpolations, - value=0, - labelColor="CONTROL", - max_height=len(self.interpolations) + 1, - scroll_exit=True, - ) - self.alpha = self.add_widget_intelligent( - FloatTitleSlider, - name="Weight (alpha) to assign to second and third models:", - out_of=1.0, - step=0.01, - lowest=0, - value=0.5, - labelColor="CONTROL", - scroll_exit=True, - ) - self.model1.editing = True - - def models_changed(self) -> None: - models = self.model1.values - selected_model1 = self.model1.value[0] - selected_model2 = self.model2.value[0] - selected_model3 = self.model3.value[0] - merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}" - self.merged_model_name.value = merged_model_name - - if selected_model3 > 0: - self.merge_method.values = ["add_difference ( A+(B-C) )"] - self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one. - else: - self.merge_method.values = self.interpolations - self.merge_method.value = 0 - - def on_ok(self) -> None: - if self.validate_field_values() and self.check_for_overwrite(): - self.parentApp.setNextForm(None) - self.editing = False - self.parentApp.merge_arguments = self.marshall_arguments() - npyscreen.notify("Starting the merge...") - else: - self.editing = True - - def on_cancel(self) -> None: - sys.exit(0) - - def marshall_arguments(self) -> dict: - model_keys = [x[0] for x in self.models] - models = [ - model_keys[self.model1.value[0]], - model_keys[self.model2.value[0]], - ] - if self.model3.value[0] > 0: - models.append(model_keys[self.model3.value[0] - 1]) - interp = "add_difference" - else: - interp = self.interpolations[self.merge_method.value[0]] - - args = { - "model_keys": models, - "alpha": self.alpha.value, - "interp": interp, - "force": self.force.value, - "merged_model_name": self.merged_model_name.value, - } - return args - - def check_for_overwrite(self) -> bool: - model_out = self.merged_model_name.value - if model_out not in self.model_names: - return True - else: - result: bool = npyscreen.notify_yes_no( - f"The chosen merged model destination, {model_out}, is already in use. Overwrite?" - ) - return result - - def validate_field_values(self) -> bool: - bad_fields = [] - model_names = self.model_names - selected_models = {model_names[self.model1.value[0]], model_names[self.model2.value[0]]} - if self.model3.value[0] > 0: - selected_models.add(model_names[self.model3.value[0] - 1]) - if len(selected_models) < 2: - bad_fields.append(f"Please select two or three DIFFERENT models to compare. You selected {selected_models}") - if len(bad_fields) > 0: - message = "The following problems were detected and must be corrected:" - for problem in bad_fields: - message += f"\n* {problem}" - npyscreen.notify_confirm(message) - return False - else: - return True - - def get_models(self, base_model: Optional[BaseModelType] = None) -> List[Tuple[str, str]]: # key to name - models = [ - (x.key, x.name) - for x in self.model_record_store.search_by_attr(model_type=ModelType.Main, base_model=base_model) - if x.format == ModelFormat("diffusers") - and hasattr(x, "variant") - and x.variant == ModelVariantType("normal") - ] - return sorted(models, key=lambda x: x[1]) - - def _populate_models(self, value: List[int]) -> None: - base_model = BASE_TYPES[value[0]][0] - self.models = self.get_models(base_model) - self.model_names = [x[1] for x in self.models] - - models_plus_none = self.model_names.copy() - models_plus_none.insert(0, "None") - self.model1.values = self.model_names - self.model2.values = self.model_names - self.model3.values = models_plus_none - - self.display() - - -# npyscreen is untyped and causes mypy to get naggy -class Mergeapp(npyscreen.NPSAppManaged): # type: ignore - def __init__(self, installer: ModelInstallServiceBase): - """Initialize the npyscreen application.""" - super().__init__() - self.installer = installer - - def onStart(self) -> None: - npyscreen.setTheme(npyscreen.Themes.ElegantTheme) - self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings") - - -def run_gui(args: Namespace) -> None: - installer = initialize_installer(config) - mergeapp = Mergeapp(installer) - mergeapp.run() - merge_args = mergeapp.merge_arguments - merger = ModelMerger(installer) - merger.merge_diffusion_models_and_save(**merge_args) - logger.info(f'Models merged into new model: "{merge_args.merged_model_name}".') - - -def run_cli(args: Namespace) -> None: - assert args.alpha >= 0 and args.alpha <= 1.0, "alpha must be between 0 and 1" - assert ( - args.model_names and len(args.model_names) >= 1 and len(args.model_names) <= 3 - ), "Please provide the --models argument to list 2 to 3 models to merge. Use --help for full usage." - - if not args.merged_model_name: - args.merged_model_name = "+".join(args.model_names) - logger.info(f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"') - - installer = initialize_installer(config) - store = installer.record_store - assert ( - len(store.search_by_attr(args.merged_model_name, args.base_model, ModelType.Main)) == 0 or args.clobber - ), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.' - - merger = ModelMerger(installer) - model_keys = [] - for name in args.model_names: - if len(name) == 32 and re.match(r"^[0-9a-f]$", name): - model_keys.append(name) - else: - models = store.search_by_attr( - model_name=name, model_type=ModelType.Main, base_model=BaseModelType(args.base_model) - ) - assert len(models) > 0, f"{name}: Unknown model" - assert len(models) < 2, f"{name}: More than one model by this name. Please specify the model key instead." - model_keys.append(models[0].key) - - merger.merge_diffusion_models_and_save( - alpha=args.alpha, - model_keys=model_keys, - merged_model_name=args.merged_model_name, - interp=args.interp, - force=args.force, - ) - logger.info(f'Models merged into new model: "{args.merged_model_name}".') - - -def main() -> None: - args = _parse_args() - if args.root_dir: - config.parse_args(["--root", str(args.root_dir)]) - else: - config.parse_args([]) - - try: - if args.front_end: - run_gui(args) - else: - run_cli(args) - except widget.NotEnoughSpaceForWidget as e: - if str(e).startswith("Height of 1 allocated"): - logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge") - else: - logger.error("Not enough room for the user interface. Try making this window larger.") - sys.exit(-1) - except Exception as e: - logger.error(str(e)) - sys.exit(-1) - except KeyboardInterrupt: - sys.exit(-1) - - -if __name__ == "__main__": - main() diff --git a/invokeai/frontend/training/__init__.py b/invokeai/frontend/training/__init__.py index d4eff2f7fd..7e002b4c03 100644 --- a/invokeai/frontend/training/__init__.py +++ b/invokeai/frontend/training/__init__.py @@ -1,4 +1,5 @@ """ Initialization file for invokeai.frontend.training """ + from .textual_inversion import main as invokeai_textual_inversion # noqa: F401 diff --git a/invokeai/frontend/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py old mode 100755 new mode 100644 index 81b1081bb8..a7dc36515f --- a/invokeai/frontend/training/textual_inversion.py +++ b/invokeai/frontend/training/textual_inversion.py @@ -6,7 +6,6 @@ This is the frontend to "textual_inversion_training.py". Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team """ - import os import re import shutil @@ -261,7 +260,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): def validate_field_values(self) -> bool: bad_fields = [] if self.model.value is None: - bad_fields.append("Model Name must correspond to a known model in models.yaml") + bad_fields.append("Model Name must correspond to a known model in invokeai.db") if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value): bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen") if self.train_data_dir.value is None: @@ -442,7 +441,7 @@ def main() -> None: pass except (widget.NotEnoughSpaceForWidget, Exception) as e: if str(e).startswith("Height of 1 allocated"): - logger.error("You need to have at least one diffusers models defined in models.yaml in order to train") + logger.error("You need to have at least one diffusers models defined in invokeai.db in order to train") elif str(e).startswith("addwstr"): logger.error("Not enough window space for the interface. Please make your window larger and try again.") else: diff --git a/invokeai/frontend/training/textual_inversion2.py b/invokeai/frontend/training/textual_inversion2.py deleted file mode 100644 index 81b1081bb8..0000000000 --- a/invokeai/frontend/training/textual_inversion2.py +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python - -""" -This is the frontend to "textual_inversion_training.py". - -Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team -""" - - -import os -import re -import shutil -import sys -import traceback -from argparse import Namespace -from pathlib import Path -from typing import Dict, List, Optional, Tuple - -import npyscreen -from npyscreen import widget -from omegaconf import OmegaConf - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.install.install_helper import initialize_installer -from invokeai.backend.model_manager import ModelType -from invokeai.backend.training import do_textual_inversion_training, parse_args - -TRAINING_DATA = "text-inversion-training-data" -TRAINING_DIR = "text-inversion-output" -CONF_FILE = "preferences.conf" -config = None - - -class textualInversionForm(npyscreen.FormMultiPageAction): - resolutions = [512, 768, 1024] - lr_schedulers = [ - "linear", - "cosine", - "cosine_with_restarts", - "polynomial", - "constant", - "constant_with_warmup", - ] - precisions = ["no", "fp16", "bf16"] - learnable_properties = ["object", "style"] - - def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None): - self.saved_args = saved_args or {} - super().__init__(parentApp, name) - - def afterEditing(self) -> None: - self.parentApp.setNextForm(None) - - def create(self) -> None: - self.model_names, default = self.get_model_names() - default_initializer_token = "★" - default_placeholder_token = "" - saved_args = self.saved_args - - assert config is not None - - try: - default = self.model_names.index(saved_args["model"]) - except Exception: - pass - - self.add_widget_intelligent( - npyscreen.FixedText, - value="Use ctrl-N and ctrl-P to move to the ext and

revious fields, cursor arrows to make a selection, and space to toggle checkboxes.", - editable=False, - ) - - self.model = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="Model Name:", - values=sorted(self.model_names), - value=default, - max_height=len(self.model_names) + 1, - scroll_exit=True, - ) - self.placeholder_token = self.add_widget_intelligent( - npyscreen.TitleText, - name="Trigger Term:", - value="", # saved_args.get('placeholder_token',''), # to restore previous term - scroll_exit=True, - ) - self.placeholder_token.when_value_edited = self.initializer_changed - self.nextrely -= 1 - self.nextrelx += 30 - self.prompt_token = self.add_widget_intelligent( - npyscreen.FixedText, - name="Trigger term for use in prompt", - value="", - editable=False, - scroll_exit=True, - ) - self.nextrelx -= 30 - self.initializer_token = self.add_widget_intelligent( - npyscreen.TitleText, - name="Initializer:", - value=saved_args.get("initializer_token", default_initializer_token), - scroll_exit=True, - ) - self.resume_from_checkpoint = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Resume from last saved checkpoint", - value=False, - scroll_exit=True, - ) - self.learnable_property = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="Learnable property:", - values=self.learnable_properties, - value=self.learnable_properties.index(saved_args.get("learnable_property", "object")), - max_height=4, - scroll_exit=True, - ) - self.train_data_dir = self.add_widget_intelligent( - npyscreen.TitleFilename, - name="Data Training Directory:", - select_dir=True, - must_exist=False, - value=str( - saved_args.get( - "train_data_dir", - config.root_dir / TRAINING_DATA / default_placeholder_token, - ) - ), - scroll_exit=True, - ) - self.output_dir = self.add_widget_intelligent( - npyscreen.TitleFilename, - name="Output Destination Directory:", - select_dir=True, - must_exist=False, - value=str( - saved_args.get( - "output_dir", - config.root_dir / TRAINING_DIR / default_placeholder_token, - ) - ), - scroll_exit=True, - ) - self.resolution = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="Image resolution (pixels):", - values=self.resolutions, - value=self.resolutions.index(saved_args.get("resolution", 512)), - max_height=4, - scroll_exit=True, - ) - self.center_crop = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Center crop images before resizing to resolution", - value=saved_args.get("center_crop", False), - scroll_exit=True, - ) - self.mixed_precision = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="Mixed Precision:", - values=self.precisions, - value=self.precisions.index(saved_args.get("mixed_precision", "fp16")), - max_height=4, - scroll_exit=True, - ) - self.num_train_epochs = self.add_widget_intelligent( - npyscreen.TitleSlider, - name="Number of training epochs:", - out_of=1000, - step=50, - lowest=1, - value=saved_args.get("num_train_epochs", 100), - scroll_exit=True, - ) - self.max_train_steps = self.add_widget_intelligent( - npyscreen.TitleSlider, - name="Max Training Steps:", - out_of=10000, - step=500, - lowest=1, - value=saved_args.get("max_train_steps", 3000), - scroll_exit=True, - ) - self.train_batch_size = self.add_widget_intelligent( - npyscreen.TitleSlider, - name="Batch Size (reduce if you run out of memory):", - out_of=50, - step=1, - lowest=1, - value=saved_args.get("train_batch_size", 8), - scroll_exit=True, - ) - self.gradient_accumulation_steps = self.add_widget_intelligent( - npyscreen.TitleSlider, - name="Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):", - out_of=10, - step=1, - lowest=1, - value=saved_args.get("gradient_accumulation_steps", 4), - scroll_exit=True, - ) - self.lr_warmup_steps = self.add_widget_intelligent( - npyscreen.TitleSlider, - name="Warmup Steps:", - out_of=100, - step=1, - lowest=0, - value=saved_args.get("lr_warmup_steps", 0), - scroll_exit=True, - ) - self.learning_rate = self.add_widget_intelligent( - npyscreen.TitleText, - name="Learning Rate:", - value=str( - saved_args.get("learning_rate", "5.0e-04"), - ), - scroll_exit=True, - ) - self.scale_lr = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Scale learning rate by number GPUs, steps and batch size", - value=saved_args.get("scale_lr", True), - scroll_exit=True, - ) - self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Use xformers acceleration", - value=saved_args.get("enable_xformers_memory_efficient_attention", False), - scroll_exit=True, - ) - self.lr_scheduler = self.add_widget_intelligent( - npyscreen.TitleSelectOne, - name="Learning rate scheduler:", - values=self.lr_schedulers, - max_height=7, - value=self.lr_schedulers.index(saved_args.get("lr_scheduler", "constant")), - scroll_exit=True, - ) - self.model.editing = True - - def initializer_changed(self) -> None: - placeholder = self.placeholder_token.value - self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)" - self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder) - self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder) - self.resume_from_checkpoint.value = Path(self.output_dir.value).exists() - - def on_ok(self): - if self.validate_field_values(): - self.parentApp.setNextForm(None) - self.editing = False - self.parentApp.ti_arguments = self.marshall_arguments() - npyscreen.notify("Launching textual inversion training. This will take a while...") - else: - self.editing = True - - def ok_cancel(self): - sys.exit(0) - - def validate_field_values(self) -> bool: - bad_fields = [] - if self.model.value is None: - bad_fields.append("Model Name must correspond to a known model in models.yaml") - if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value): - bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen") - if self.train_data_dir.value is None: - bad_fields.append("Data Training Directory cannot be empty") - if self.output_dir.value is None: - bad_fields.append("The Output Destination Directory cannot be empty") - if len(bad_fields) > 0: - message = "The following problems were detected and must be corrected:" - for problem in bad_fields: - message += f"\n* {problem}" - npyscreen.notify_confirm(message) - return False - else: - return True - - def get_model_names(self) -> Tuple[List[str], int]: - global config - assert config is not None - installer = initialize_installer(config) - store = installer.record_store - main_models = store.search_by_attr(model_type=ModelType.Main) - model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"] - default = 0 - return (model_names, default) - - def marshall_arguments(self) -> dict: - args = {} - - # the choices - args.update( - model=self.model_names[self.model.value[0]], - resolution=self.resolutions[self.resolution.value[0]], - lr_scheduler=self.lr_schedulers[self.lr_scheduler.value[0]], - mixed_precision=self.precisions[self.mixed_precision.value[0]], - learnable_property=self.learnable_properties[self.learnable_property.value[0]], - ) - - # all the strings and booleans - for attr in ( - "initializer_token", - "placeholder_token", - "train_data_dir", - "output_dir", - "scale_lr", - "center_crop", - "enable_xformers_memory_efficient_attention", - ): - args[attr] = getattr(self, attr).value - - # all the integers - for attr in ( - "train_batch_size", - "gradient_accumulation_steps", - "num_train_epochs", - "max_train_steps", - "lr_warmup_steps", - ): - args[attr] = int(getattr(self, attr).value) - - # the floats (just one) - args.update(learning_rate=float(self.learning_rate.value)) - - # a special case - if self.resume_from_checkpoint.value and Path(self.output_dir.value).exists(): - args["resume_from_checkpoint"] = "latest" - - return args - - -class MyApplication(npyscreen.NPSAppManaged): - def __init__(self, saved_args: Optional[Dict[str, str]] = None): - super().__init__() - self.ti_arguments = None - self.saved_args = saved_args - - def onStart(self): - npyscreen.setTheme(npyscreen.Themes.DefaultTheme) - self.main = self.addForm( - "MAIN", - textualInversionForm, - name="Textual Inversion Settings", - saved_args=self.saved_args, - ) - - -def copy_to_embeddings_folder(args: Dict[str, str]) -> None: - """ - Copy learned_embeds.bin into the embeddings folder, and offer to - delete the full model and checkpoints. - """ - assert config is not None - source = Path(args["output_dir"], "learned_embeds.bin") - dest_dir_name = args["placeholder_token"].strip("<>") - destination = config.root_dir / "embeddings" / dest_dir_name - os.makedirs(destination, exist_ok=True) - logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}") - shutil.copy(source, destination) - if (input("Delete training logs and intermediate checkpoints? [y] ") or "y").startswith(("y", "Y")): - shutil.rmtree(Path(args["output_dir"])) - else: - logger.info(f'Keeping {args["output_dir"]}') - - -def save_args(args: dict) -> None: - """ - Save the current argument values to an omegaconf file - """ - assert config is not None - dest_dir = config.root_dir / TRAINING_DIR - os.makedirs(dest_dir, exist_ok=True) - conf_file = dest_dir / CONF_FILE - conf = OmegaConf.create(args) - OmegaConf.save(config=conf, f=conf_file) - - -def previous_args() -> dict: - """ - Get the previous arguments used. - """ - assert config is not None - conf_file = config.root_dir / TRAINING_DIR / CONF_FILE - try: - conf = OmegaConf.load(conf_file) - conf["placeholder_token"] = conf["placeholder_token"].strip("<>") - except Exception: - conf = None - - return conf - - -def do_front_end() -> None: - global config - saved_args = previous_args() - myapplication = MyApplication(saved_args=saved_args) - myapplication.run() - - if my_args := myapplication.ti_arguments: - os.makedirs(my_args["output_dir"], exist_ok=True) - - # Automatically add angle brackets around the trigger - if not re.match("^<.+>$", my_args["placeholder_token"]): - my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>" - - my_args["only_save_embeds"] = True - save_args(my_args) - - try: - print(my_args) - do_textual_inversion_training(config, **my_args) - copy_to_embeddings_folder(my_args) - except Exception as e: - logger.error("An exception occurred during training. The exception was:") - logger.error(str(e)) - logger.error("DETAILS:") - logger.error(traceback.format_exc()) - - -def main() -> None: - global config - - args: Namespace = parse_args() - config = InvokeAIAppConfig.get_config() - config.parse_args([]) - - # change root if needed - if args.root_dir: - config.root = args.root_dir - - try: - if args.front_end: - do_front_end() - else: - do_textual_inversion_training(config, **vars(args)) - except AssertionError as e: - logger.error(e) - sys.exit(-1) - except KeyboardInterrupt: - pass - except (widget.NotEnoughSpaceForWidget, Exception) as e: - if str(e).startswith("Height of 1 allocated"): - logger.error("You need to have at least one diffusers models defined in models.yaml in order to train") - elif str(e).startswith("addwstr"): - logger.error("Not enough window space for the interface. Please make your window larger and try again.") - else: - logger.error(e) - sys.exit(-1) - - -if __name__ == "__main__": - main() diff --git a/invokeai/frontend/web/.gitignore b/invokeai/frontend/web/.gitignore index 8e7ebc76a1..3e8a372bc7 100644 --- a/invokeai/frontend/web/.gitignore +++ b/invokeai/frontend/web/.gitignore @@ -41,3 +41,6 @@ stats.html # Yalc .yalc yalc.lock + +# vitest +tsconfig.vitest-temp.json \ No newline at end of file diff --git a/invokeai/frontend/web/.storybook/ReduxInit.tsx b/invokeai/frontend/web/.storybook/ReduxInit.tsx index 55d0132242..7d3f8e0d2b 100644 --- a/invokeai/frontend/web/.storybook/ReduxInit.tsx +++ b/invokeai/frontend/web/.storybook/ReduxInit.tsx @@ -10,13 +10,7 @@ export const ReduxInit = memo((props: PropsWithChildren) => { const dispatch = useAppDispatch(); useGlobalModifiersInit(); useEffect(() => { - dispatch( - modelChanged({ - model_name: 'test_model', - base_model: 'sd-1', - model_type: 'main', - }) - ); + dispatch(modelChanged({ key: 'test_model', base: 'sd-1' })); }, []); return props.children; diff --git a/invokeai/frontend/web/.unimportedrc.json b/invokeai/frontend/web/.unimportedrc.json deleted file mode 100644 index cf96610502..0000000000 --- a/invokeai/frontend/web/.unimportedrc.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "entry": ["src/main.tsx"], - "extensions": [".ts", ".tsx"], - "ignorePatterns": ["**/node_modules/**", "dist/**", "public/**", "**/*.stories.tsx", "config/**"], - "ignoreUnresolved": [], - "ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"], - "respectGitignore": true, - "ignoreUnused": [] -} diff --git a/invokeai/frontend/web/config/common.mts b/invokeai/frontend/web/config/common.mts deleted file mode 100644 index fd559cabd1..0000000000 --- a/invokeai/frontend/web/config/common.mts +++ /dev/null @@ -1,12 +0,0 @@ -import react from '@vitejs/plugin-react-swc'; -import { visualizer } from 'rollup-plugin-visualizer'; -import type { PluginOption, UserConfig } from 'vite'; -import eslint from 'vite-plugin-eslint'; -import tsconfigPaths from 'vite-tsconfig-paths'; - -export const commonPlugins: UserConfig['plugins'] = [ - react(), - eslint(), - tsconfigPaths(), - visualizer() as unknown as PluginOption, -]; diff --git a/invokeai/frontend/web/config/vite.app.config.mts b/invokeai/frontend/web/config/vite.app.config.mts deleted file mode 100644 index 9683ed26a4..0000000000 --- a/invokeai/frontend/web/config/vite.app.config.mts +++ /dev/null @@ -1,33 +0,0 @@ -import type { UserConfig } from 'vite'; - -import { commonPlugins } from './common.mjs'; - -export const appConfig: UserConfig = { - base: './', - plugins: [...commonPlugins], - build: { - chunkSizeWarningLimit: 1500, - }, - server: { - // Proxy HTTP requests to the flask server - proxy: { - // Proxy socket.io to the nodes socketio server - '/ws/socket.io': { - target: 'ws://127.0.0.1:9090', - ws: true, - }, - // Proxy openapi schema definiton - '/openapi.json': { - target: 'http://127.0.0.1:9090/openapi.json', - rewrite: (path) => path.replace(/^\/openapi.json/, ''), - changeOrigin: true, - }, - // proxy nodes api - '/api/v1': { - target: 'http://127.0.0.1:9090/api/v1', - rewrite: (path) => path.replace(/^\/api\/v1/, ''), - changeOrigin: true, - }, - }, - }, -}; diff --git a/invokeai/frontend/web/config/vite.package.config.mts b/invokeai/frontend/web/config/vite.package.config.mts deleted file mode 100644 index 3c05d52e00..0000000000 --- a/invokeai/frontend/web/config/vite.package.config.mts +++ /dev/null @@ -1,46 +0,0 @@ -import path from 'path'; -import type { UserConfig } from 'vite'; -import cssInjectedByJsPlugin from 'vite-plugin-css-injected-by-js'; -import dts from 'vite-plugin-dts'; - -import { commonPlugins } from './common.mjs'; - -export const packageConfig: UserConfig = { - base: './', - plugins: [ - ...commonPlugins, - dts({ - insertTypesEntry: true, - }), - cssInjectedByJsPlugin(), - ], - build: { - cssCodeSplit: true, - lib: { - entry: path.resolve(__dirname, '../src/index.ts'), - name: 'InvokeAIUI', - fileName: (format) => `invoke-ai-ui.${format}.js`, - }, - rollupOptions: { - external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react', '@invoke-ai/ui-library'], - output: { - globals: { - react: 'React', - 'react-dom': 'ReactDOM', - '@emotion/react': 'EmotionReact', - '@invoke-ai/ui-library': 'UiLibrary', - }, - }, - }, - }, - resolve: { - alias: { - app: path.resolve(__dirname, '../src/app'), - assets: path.resolve(__dirname, '../src/assets'), - common: path.resolve(__dirname, '../src/common'), - features: path.resolve(__dirname, '../src/features'), - services: path.resolve(__dirname, '../src/services'), - theme: path.resolve(__dirname, '../src/theme'), - }, - }, -}; diff --git a/invokeai/frontend/web/knip.ts b/invokeai/frontend/web/knip.ts new file mode 100644 index 0000000000..f2057fa904 --- /dev/null +++ b/invokeai/frontend/web/knip.ts @@ -0,0 +1,27 @@ +import type { KnipConfig } from 'knip'; + +const config: KnipConfig = { + ignore: [ + // This file is only used during debugging + 'src/app/store/middleware/debugLoggerMiddleware.ts', + // Autogenerated types - shouldn't ever touch these + 'src/services/api/schema.ts', + ], + ignoreBinaries: ['only-allow'], + rules: { + files: 'warn', + dependencies: 'warn', + unlisted: 'warn', + binaries: 'warn', + unresolved: 'warn', + exports: 'warn', + types: 'warn', + nsExports: 'warn', + nsTypes: 'warn', + enumMembers: 'warn', + classMembers: 'warn', + duplicates: 'warn', + }, +}; + +export default config; diff --git a/invokeai/frontend/web/package.json b/invokeai/frontend/web/package.json index e583169af7..a413604ea3 100644 --- a/invokeai/frontend/web/package.json +++ b/invokeai/frontend/web/package.json @@ -24,16 +24,18 @@ "build": "pnpm run lint && vite build", "typegen": "node scripts/typegen.js", "preview": "vite preview", - "lint:madge": "madge --circular src/main.tsx", + "lint:knip": "knip", + "lint:dpdm": "dpdm --no-warning --no-tree --transform --exit-code circular:1 src/main.tsx", "lint:eslint": "eslint --max-warnings=0 .", "lint:prettier": "prettier --check .", "lint:tsc": "tsc --noEmit", - "lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"", - "fix": "eslint --fix . && prettier --log-level warn --write .", + "lint": "concurrently -g -c red,green,yellow,blue,magenta pnpm:lint:*", + "fix": "knip --fix && eslint --fix . && prettier --log-level warn --write .", "preinstall": "npx only-allow pnpm", "storybook": "storybook dev -p 6006", "build-storybook": "storybook build", - "unimported": "npx unimported" + "test": "vitest", + "test:no-watch": "vitest --no-watch" }, "madge": { "excludeRegExp": [ @@ -52,56 +54,54 @@ "@chakra-ui/react-use-size": "^2.1.0", "@dagrejs/graphlib": "^2.1.13", "@dnd-kit/core": "^6.1.0", + "@dnd-kit/sortable": "^8.0.0", "@dnd-kit/utilities": "^3.2.2", "@fontsource-variable/inter": "^5.0.16", - "@invoke-ai/ui-library": "^0.0.18", - "@mantine/form": "6.0.21", - "@nanostores/react": "^0.7.1", - "@reduxjs/toolkit": "2.0.1", + "@invoke-ai/ui-library": "^0.0.21", + "@nanostores/react": "^0.7.2", + "@reduxjs/toolkit": "2.2.1", "@roarr/browser-log-writer": "^1.3.0", "chakra-react-select": "^4.7.6", "compare-versions": "^6.1.0", "dateformat": "^5.0.3", - "framer-motion": "^10.18.0", - "i18next": "^23.7.16", - "i18next-http-backend": "^2.4.2", + "framer-motion": "^11.0.6", + "i18next": "^23.10.0", + "i18next-http-backend": "^2.5.0", "idb-keyval": "^6.2.1", "jsondiffpatch": "^0.6.0", - "konva": "^9.3.1", + "konva": "^9.3.3", "lodash-es": "^4.17.21", - "nanostores": "^0.9.5", + "nanostores": "^0.10.0", "new-github-issue-url": "^1.0.0", - "overlayscrollbars": "^2.4.6", - "overlayscrollbars-react": "^0.5.3", - "query-string": "^8.1.0", + "overlayscrollbars": "^2.5.0", + "overlayscrollbars-react": "^0.5.4", + "query-string": "^9.0.0", "react": "^18.2.0", "react-colorful": "^5.6.1", "react-dom": "^18.2.0", "react-dropzone": "^14.2.3", "react-error-boundary": "^4.0.12", - "react-hook-form": "^7.49.3", - "react-hotkeys-hook": "4.4.4", - "react-i18next": "^14.0.0", + "react-hook-form": "^7.50.1", + "react-hotkeys-hook": "4.5.0", + "react-i18next": "^14.0.5", "react-icons": "^5.0.1", "react-konva": "^18.2.10", "react-redux": "9.1.0", - "react-resizable-panels": "^1.0.9", + "react-resizable-panels": "^2.0.11", "react-select": "5.8.0", - "react-textarea-autosize": "^8.5.3", - "react-use": "^17.4.3", - "react-virtuoso": "^4.6.2", - "reactflow": "^11.10.2", + "react-use": "^17.5.0", + "react-virtuoso": "^4.7.1", + "reactflow": "^11.10.4", "redux-dynamic-middlewares": "^2.2.0", "redux-remember": "^5.1.0", "roarr": "^7.21.0", "serialize-error": "^11.0.3", "socket.io-client": "^4.7.4", - "type-fest": "^4.9.0", "use-debounce": "^10.0.0", "use-image": "^1.1.1", "uuid": "^9.0.1", "zod": "^3.22.4", - "zod-validation-error": "^3.0.0" + "zod-validation-error": "^3.0.2" }, "peerDependencies": { "@chakra-ui/react": "^2.8.2", @@ -110,57 +110,42 @@ "ts-toolbelt": "^9.6.0" }, "devDependencies": { - "@arthurgeron/eslint-plugin-react-usememo": "^2.2.3", - "@invoke-ai/eslint-config-react": "^0.0.13", - "@invoke-ai/prettier-config-react": "^0.0.6", - "@storybook/addon-docs": "^7.6.10", - "@storybook/addon-essentials": "^7.6.10", - "@storybook/addon-interactions": "^7.6.10", - "@storybook/addon-links": "^7.6.10", - "@storybook/addon-storysource": "^7.6.10", - "@storybook/blocks": "^7.6.10", - "@storybook/manager-api": "^7.6.10", - "@storybook/react": "^7.6.10", - "@storybook/react-vite": "^7.6.10", - "@storybook/test": "^7.6.10", - "@storybook/theming": "^7.6.10", + "@invoke-ai/eslint-config-react": "^0.0.14", + "@invoke-ai/prettier-config-react": "^0.0.7", + "@storybook/addon-essentials": "^7.6.17", + "@storybook/addon-interactions": "^7.6.17", + "@storybook/addon-links": "^7.6.17", + "@storybook/addon-storysource": "^7.6.17", + "@storybook/manager-api": "^7.6.17", + "@storybook/react": "^7.6.17", + "@storybook/react-vite": "^7.6.17", + "@storybook/theming": "^7.6.17", "@types/dateformat": "^5.0.2", "@types/lodash-es": "^4.17.12", - "@types/node": "^20.11.5", - "@types/react": "^18.2.48", - "@types/react-dom": "^18.2.18", - "@types/uuid": "^9.0.7", - "@typescript-eslint/eslint-plugin": "^6.19.0", - "@typescript-eslint/parser": "^6.19.0", - "@vitejs/plugin-react-swc": "^3.5.0", + "@types/node": "^20.11.20", + "@types/react": "^18.2.59", + "@types/react-dom": "^18.2.19", + "@types/uuid": "^9.0.8", + "@vitejs/plugin-react-swc": "^3.6.0", "concurrently": "^8.2.2", - "eslint": "^8.56.0", - "eslint-config-prettier": "^9.1.0", + "dpdm": "^3.14.0", + "eslint": "^8.57.0", "eslint-plugin-i18next": "^6.0.3", - "eslint-plugin-import": "^2.29.1", "eslint-plugin-path": "^1.2.4", - "eslint-plugin-react": "^7.33.2", - "eslint-plugin-react-hooks": "^4.6.0", - "eslint-plugin-simple-import-sort": "^10.0.0", - "eslint-plugin-storybook": "^0.6.15", - "eslint-plugin-unused-imports": "^3.0.0", - "madge": "^6.1.0", + "knip": "^5.0.2", "openapi-types": "^12.1.3", - "openapi-typescript": "^6.7.3", - "prettier": "^3.2.4", + "openapi-typescript": "^6.7.4", + "prettier": "^3.2.5", "rollup-plugin-visualizer": "^5.12.0", - "storybook": "^7.6.10", + "storybook": "^7.6.17", "ts-toolbelt": "^9.6.0", + "tsafe": "^1.6.6", "typescript": "^5.3.3", - "vite": "^5.0.12", - "vite-plugin-css-injected-by-js": "^3.3.1", - "vite-plugin-dts": "^3.7.1", + "vite": "^5.1.4", + "vite-plugin-css-injected-by-js": "^3.4.0", + "vite-plugin-dts": "^3.7.3", "vite-plugin-eslint": "^1.8.1", - "vite-tsconfig-paths": "^4.3.1" - }, - "pnpm": { - "patchedDependencies": { - "reselect@5.0.1": "patches/reselect@5.0.1.patch" - } + "vite-tsconfig-paths": "^4.3.1", + "vitest": "^1.3.1" } } diff --git a/invokeai/frontend/web/pnpm-lock.yaml b/invokeai/frontend/web/pnpm-lock.yaml index ba76a61275..e1a1e4b741 100644 --- a/invokeai/frontend/web/pnpm-lock.yaml +++ b/invokeai/frontend/web/pnpm-lock.yaml @@ -4,15 +4,10 @@ settings: autoInstallPeers: true excludeLinksFromLockfile: false -patchedDependencies: - reselect@5.0.1: - hash: kvbgwzjyy4x4fnh7znyocvb75q - path: patches/reselect@5.0.1.patch - dependencies: '@chakra-ui/react': specifier: ^2.8.2 - version: 2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.48)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0) + version: 2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.57)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0) '@chakra-ui/react-use-size': specifier: ^2.1.0 version: 2.1.0(react@18.2.0) @@ -22,6 +17,9 @@ dependencies: '@dnd-kit/core': specifier: ^6.1.0 version: 6.1.0(react-dom@18.2.0)(react@18.2.0) + '@dnd-kit/sortable': + specifier: ^8.0.0 + version: 8.0.0(@dnd-kit/core@6.1.0)(react@18.2.0) '@dnd-kit/utilities': specifier: ^3.2.2 version: 3.2.2(react@18.2.0) @@ -29,23 +27,20 @@ dependencies: specifier: ^5.0.16 version: 5.0.16 '@invoke-ai/ui-library': - specifier: ^0.0.18 - version: 0.0.18(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.1)(@types/react@18.2.48)(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0) - '@mantine/form': - specifier: 6.0.21 - version: 6.0.21(react@18.2.0) + specifier: ^0.0.21 + version: 0.0.21(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.2)(@types/react@18.2.59)(i18next@23.10.0)(react-dom@18.2.0)(react@18.2.0) '@nanostores/react': - specifier: ^0.7.1 - version: 0.7.1(nanostores@0.9.5)(react@18.2.0) + specifier: ^0.7.2 + version: 0.7.2(nanostores@0.10.0)(react@18.2.0) '@reduxjs/toolkit': - specifier: 2.0.1 - version: 2.0.1(react-redux@9.1.0)(react@18.2.0) + specifier: 2.2.1 + version: 2.2.1(react-redux@9.1.0)(react@18.2.0) '@roarr/browser-log-writer': specifier: ^1.3.0 version: 1.3.0 chakra-react-select: specifier: ^4.7.6 - version: 4.7.6(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.11.3)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + version: 4.7.6(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.11.3)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) compare-versions: specifier: ^6.1.0 version: 6.1.0 @@ -53,14 +48,14 @@ dependencies: specifier: ^5.0.3 version: 5.0.3 framer-motion: - specifier: ^10.18.0 - version: 10.18.0(react-dom@18.2.0)(react@18.2.0) + specifier: ^11.0.6 + version: 11.0.6(react-dom@18.2.0)(react@18.2.0) i18next: - specifier: ^23.7.16 - version: 23.7.16 + specifier: ^23.10.0 + version: 23.10.0 i18next-http-backend: - specifier: ^2.4.2 - version: 2.4.2 + specifier: ^2.5.0 + version: 2.5.0 idb-keyval: specifier: ^6.2.1 version: 6.2.1 @@ -68,26 +63,26 @@ dependencies: specifier: ^0.6.0 version: 0.6.0 konva: - specifier: ^9.3.1 - version: 9.3.1 + specifier: ^9.3.3 + version: 9.3.3 lodash-es: specifier: ^4.17.21 version: 4.17.21 nanostores: - specifier: ^0.9.5 - version: 0.9.5 + specifier: ^0.10.0 + version: 0.10.0 new-github-issue-url: specifier: ^1.0.0 version: 1.0.0 overlayscrollbars: - specifier: ^2.4.6 - version: 2.4.6 + specifier: ^2.5.0 + version: 2.5.0 overlayscrollbars-react: - specifier: ^0.5.3 - version: 0.5.3(overlayscrollbars@2.4.6)(react@18.2.0) + specifier: ^0.5.4 + version: 0.5.4(overlayscrollbars@2.5.0)(react@18.2.0) query-string: - specifier: ^8.1.0 - version: 8.1.0 + specifier: ^9.0.0 + version: 9.0.0 react: specifier: ^18.2.0 version: 18.2.0 @@ -104,41 +99,38 @@ dependencies: specifier: ^4.0.12 version: 4.0.12(react@18.2.0) react-hook-form: - specifier: ^7.49.3 - version: 7.49.3(react@18.2.0) + specifier: ^7.50.1 + version: 7.50.1(react@18.2.0) react-hotkeys-hook: - specifier: 4.4.4 - version: 4.4.4(react-dom@18.2.0)(react@18.2.0) + specifier: 4.5.0 + version: 4.5.0(react-dom@18.2.0)(react@18.2.0) react-i18next: - specifier: ^14.0.0 - version: 14.0.0(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0) + specifier: ^14.0.5 + version: 14.0.5(i18next@23.10.0)(react-dom@18.2.0)(react@18.2.0) react-icons: specifier: ^5.0.1 version: 5.0.1(react@18.2.0) react-konva: specifier: ^18.2.10 - version: 18.2.10(konva@9.3.1)(react-dom@18.2.0)(react@18.2.0) + version: 18.2.10(konva@9.3.3)(react-dom@18.2.0)(react@18.2.0) react-redux: specifier: 9.1.0 - version: 9.1.0(@types/react@18.2.48)(react@18.2.0)(redux@5.0.1) + version: 9.1.0(@types/react@18.2.59)(react@18.2.0)(redux@5.0.1) react-resizable-panels: - specifier: ^1.0.9 - version: 1.0.9(react-dom@18.2.0)(react@18.2.0) + specifier: ^2.0.11 + version: 2.0.11(react-dom@18.2.0)(react@18.2.0) react-select: specifier: 5.8.0 - version: 5.8.0(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - react-textarea-autosize: - specifier: ^8.5.3 - version: 8.5.3(@types/react@18.2.48)(react@18.2.0) + version: 5.8.0(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) react-use: - specifier: ^17.4.3 - version: 17.4.3(react-dom@18.2.0)(react@18.2.0) + specifier: ^17.5.0 + version: 17.5.0(react-dom@18.2.0)(react@18.2.0) react-virtuoso: - specifier: ^4.6.2 - version: 4.6.2(react-dom@18.2.0)(react@18.2.0) + specifier: ^4.7.1 + version: 4.7.1(react-dom@18.2.0)(react@18.2.0) reactflow: - specifier: ^11.10.2 - version: 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + specifier: ^11.10.4 + version: 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) redux-dynamic-middlewares: specifier: ^2.2.0 version: 2.2.0 @@ -154,9 +146,6 @@ dependencies: socket.io-client: specifier: ^4.7.4 version: 4.7.4 - type-fest: - specifier: ^4.9.0 - version: 4.9.0 use-debounce: specifier: ^10.0.0 version: 10.0.0(react@18.2.0) @@ -170,52 +159,40 @@ dependencies: specifier: ^3.22.4 version: 3.22.4 zod-validation-error: - specifier: ^3.0.0 - version: 3.0.0(zod@3.22.4) + specifier: ^3.0.2 + version: 3.0.2(zod@3.22.4) devDependencies: - '@arthurgeron/eslint-plugin-react-usememo': - specifier: ^2.2.3 - version: 2.2.3 '@invoke-ai/eslint-config-react': - specifier: ^0.0.13 - version: 0.0.13(@typescript-eslint/eslint-plugin@6.19.0)(@typescript-eslint/parser@6.19.0)(eslint-config-prettier@9.1.0)(eslint-plugin-import@2.29.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react-refresh@0.4.5)(eslint-plugin-react@7.33.2)(eslint-plugin-simple-import-sort@10.0.0)(eslint-plugin-storybook@0.6.15)(eslint-plugin-unused-imports@3.0.0)(eslint@8.56.0) + specifier: ^0.0.14 + version: 0.0.14(eslint@8.57.0)(prettier@3.2.5)(typescript@5.3.3) '@invoke-ai/prettier-config-react': - specifier: ^0.0.6 - version: 0.0.6(prettier@3.2.4) - '@storybook/addon-docs': - specifier: ^7.6.10 - version: 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + specifier: ^0.0.7 + version: 0.0.7(prettier@3.2.5) '@storybook/addon-essentials': - specifier: ^7.6.10 - version: 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + specifier: ^7.6.17 + version: 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) '@storybook/addon-interactions': - specifier: ^7.6.10 - version: 7.6.10 + specifier: ^7.6.17 + version: 7.6.17 '@storybook/addon-links': - specifier: ^7.6.10 - version: 7.6.10(react@18.2.0) + specifier: ^7.6.17 + version: 7.6.17(react@18.2.0) '@storybook/addon-storysource': - specifier: ^7.6.10 - version: 7.6.10 - '@storybook/blocks': - specifier: ^7.6.10 - version: 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + specifier: ^7.6.17 + version: 7.6.17 '@storybook/manager-api': - specifier: ^7.6.10 - version: 7.6.10(react-dom@18.2.0)(react@18.2.0) + specifier: ^7.6.17 + version: 7.6.17(react-dom@18.2.0)(react@18.2.0) '@storybook/react': - specifier: ^7.6.10 - version: 7.6.10(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3) + specifier: ^7.6.17 + version: 7.6.17(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3) '@storybook/react-vite': - specifier: ^7.6.10 - version: 7.6.10(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3)(vite@5.0.12) - '@storybook/test': - specifier: ^7.6.10 - version: 7.6.10 + specifier: ^7.6.17 + version: 7.6.17(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3)(vite@5.1.4) '@storybook/theming': - specifier: ^7.6.10 - version: 7.6.10(react-dom@18.2.0)(react@18.2.0) + specifier: ^7.6.17 + version: 7.6.17(react-dom@18.2.0)(react@18.2.0) '@types/dateformat': specifier: ^5.0.2 version: 5.0.2 @@ -223,98 +200,80 @@ devDependencies: specifier: ^4.17.12 version: 4.17.12 '@types/node': - specifier: ^20.11.5 - version: 20.11.5 + specifier: ^20.11.20 + version: 20.11.20 '@types/react': - specifier: ^18.2.48 - version: 18.2.48 + specifier: ^18.2.59 + version: 18.2.59 '@types/react-dom': - specifier: ^18.2.18 - version: 18.2.18 + specifier: ^18.2.19 + version: 18.2.19 '@types/uuid': - specifier: ^9.0.7 - version: 9.0.7 - '@typescript-eslint/eslint-plugin': - specifier: ^6.19.0 - version: 6.19.0(@typescript-eslint/parser@6.19.0)(eslint@8.56.0)(typescript@5.3.3) - '@typescript-eslint/parser': - specifier: ^6.19.0 - version: 6.19.0(eslint@8.56.0)(typescript@5.3.3) + specifier: ^9.0.8 + version: 9.0.8 '@vitejs/plugin-react-swc': - specifier: ^3.5.0 - version: 3.5.0(vite@5.0.12) + specifier: ^3.6.0 + version: 3.6.0(vite@5.1.4) concurrently: specifier: ^8.2.2 version: 8.2.2 + dpdm: + specifier: ^3.14.0 + version: 3.14.0 eslint: - specifier: ^8.56.0 - version: 8.56.0 - eslint-config-prettier: - specifier: ^9.1.0 - version: 9.1.0(eslint@8.56.0) + specifier: ^8.57.0 + version: 8.57.0 eslint-plugin-i18next: specifier: ^6.0.3 version: 6.0.3 - eslint-plugin-import: - specifier: ^2.29.1 - version: 2.29.1(@typescript-eslint/parser@6.19.0)(eslint@8.56.0) eslint-plugin-path: specifier: ^1.2.4 - version: 1.2.4(eslint@8.56.0) - eslint-plugin-react: - specifier: ^7.33.2 - version: 7.33.2(eslint@8.56.0) - eslint-plugin-react-hooks: - specifier: ^4.6.0 - version: 4.6.0(eslint@8.56.0) - eslint-plugin-simple-import-sort: - specifier: ^10.0.0 - version: 10.0.0(eslint@8.56.0) - eslint-plugin-storybook: - specifier: ^0.6.15 - version: 0.6.15(eslint@8.56.0)(typescript@5.3.3) - eslint-plugin-unused-imports: - specifier: ^3.0.0 - version: 3.0.0(@typescript-eslint/eslint-plugin@6.19.0)(eslint@8.56.0) - madge: - specifier: ^6.1.0 - version: 6.1.0(typescript@5.3.3) + version: 1.2.4(eslint@8.57.0) + knip: + specifier: ^5.0.2 + version: 5.0.2(@types/node@20.11.20)(typescript@5.3.3) openapi-types: specifier: ^12.1.3 version: 12.1.3 openapi-typescript: - specifier: ^6.7.3 - version: 6.7.3 + specifier: ^6.7.4 + version: 6.7.4 prettier: - specifier: ^3.2.4 - version: 3.2.4 + specifier: ^3.2.5 + version: 3.2.5 rollup-plugin-visualizer: specifier: ^5.12.0 version: 5.12.0 storybook: - specifier: ^7.6.10 - version: 7.6.10 + specifier: ^7.6.17 + version: 7.6.17 ts-toolbelt: specifier: ^9.6.0 version: 9.6.0 + tsafe: + specifier: ^1.6.6 + version: 1.6.6 typescript: specifier: ^5.3.3 version: 5.3.3 vite: - specifier: ^5.0.12 - version: 5.0.12(@types/node@20.11.5) + specifier: ^5.1.4 + version: 5.1.4(@types/node@20.11.20) vite-plugin-css-injected-by-js: - specifier: ^3.3.1 - version: 3.3.1(vite@5.0.12) + specifier: ^3.4.0 + version: 3.4.0(vite@5.1.4) vite-plugin-dts: - specifier: ^3.7.1 - version: 3.7.1(@types/node@20.11.5)(typescript@5.3.3)(vite@5.0.12) + specifier: ^3.7.3 + version: 3.7.3(@types/node@20.11.20)(typescript@5.3.3)(vite@5.1.4) vite-plugin-eslint: specifier: ^1.8.1 - version: 1.8.1(eslint@8.56.0)(vite@5.0.12) + version: 1.8.1(eslint@8.57.0)(vite@5.1.4) vite-tsconfig-paths: specifier: ^4.3.1 - version: 4.3.1(typescript@5.3.3)(vite@5.0.12) + version: 4.3.1(typescript@5.3.3)(vite@5.1.4) + vitest: + specifier: ^1.3.1 + version: 1.3.1(@types/node@20.11.20) packages: @@ -323,19 +282,15 @@ packages: engines: {node: '>=0.10.0'} dev: true - /@adobe/css-tools@4.3.2: - resolution: {integrity: sha512-DA5a1C0gD/pLOvhv33YMrbf2FK3oUzwNl9oOJqE4XVjuEtt6XIakRcsd7eLiOSPkp1kTRQGICTA8cKra/vFbjw==} - dev: true - /@ampproject/remapping@2.2.1: resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} engines: {node: '>=6.0.0'} dependencies: - '@jridgewell/gen-mapping': 0.3.3 - '@jridgewell/trace-mapping': 0.3.21 + '@jridgewell/gen-mapping': 0.3.4 + '@jridgewell/trace-mapping': 0.3.23 dev: true - /@ark-ui/anatomy@1.3.0(@internationalized/date@3.5.1): + /@ark-ui/anatomy@1.3.0(@internationalized/date@3.5.2): resolution: {integrity: sha512-1yG2MrzUlix6KthjQMCNiHnkXrWwEdFAX6D+HqGJaNu0XvaGul2J+wDNtjsdX+gxiWu1nXXEEOAWlFVYMUf65w==} dependencies: '@zag-js/accordion': 0.32.1 @@ -347,7 +302,7 @@ packages: '@zag-js/color-utils': 0.32.1 '@zag-js/combobox': 0.32.1 '@zag-js/date-picker': 0.32.1 - '@zag-js/date-utils': 0.32.1(@internationalized/date@3.5.1) + '@zag-js/date-utils': 0.32.1(@internationalized/date@3.5.2) '@zag-js/dialog': 0.32.1 '@zag-js/editable': 0.32.1 '@zag-js/file-upload': 0.32.1 @@ -374,13 +329,13 @@ packages: - '@internationalized/date' dev: false - /@ark-ui/react@1.3.0(@internationalized/date@3.5.1)(react-dom@18.2.0)(react@18.2.0): + /@ark-ui/react@1.3.0(@internationalized/date@3.5.2)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-JHjNoIX50+mUCTaEGMjfGQWGGi31pKsV646jZJlR/1xohpYJigzg8BvO97cTsVk8fwtur+cm11gz3Nf7f5QUnA==} peerDependencies: react: '>=18.0.0' react-dom: '>=18.0.0' dependencies: - '@ark-ui/anatomy': 1.3.0(@internationalized/date@3.5.1) + '@ark-ui/anatomy': 1.3.0(@internationalized/date@3.5.2) '@zag-js/accordion': 0.32.1 '@zag-js/avatar': 0.32.1 '@zag-js/carousel': 0.32.1 @@ -390,7 +345,7 @@ packages: '@zag-js/combobox': 0.32.1 '@zag-js/core': 0.32.1 '@zag-js/date-picker': 0.32.1 - '@zag-js/date-utils': 0.32.1(@internationalized/date@3.5.1) + '@zag-js/date-utils': 0.32.1(@internationalized/date@3.5.2) '@zag-js/dialog': 0.32.1 '@zag-js/editable': 0.32.1 '@zag-js/file-upload': 0.32.1 @@ -421,13 +376,6 @@ packages: - '@internationalized/date' dev: false - /@arthurgeron/eslint-plugin-react-usememo@2.2.3: - resolution: {integrity: sha512-YJG+8hULmhHAxztaANswpa9hWNqEOSvbZcbd6R/JQzyNlEZ49Xh97kqZGuJGZ74rrmULckEO1m3Jh5ctqrGA2A==} - dependencies: - minimatch: 9.0.3 - uuid: 9.0.1 - dev: true - /@aw-web-design/x-default-browser@1.4.126: resolution: {integrity: sha512-Xk1sIhyNC/esHGGVjL/niHLowM0csl/kFO5uawBy4IrWwy0o1G8LGt3jP6nmWGz+USxeeqbihAmp/oVZju6wug==} hasBin: true @@ -447,20 +395,20 @@ packages: engines: {node: '>=6.9.0'} dev: true - /@babel/core@7.23.7: - resolution: {integrity: sha512-+UpDgowcmqe36d4NwqvKsyPMlOLNGMsfMmQ5WGCu+siCe3t3dfe9njrzGfdN4qq+bcNUt0+Vw6haRxBOycs4dw==} + /@babel/core@7.23.9: + resolution: {integrity: sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==} engines: {node: '>=6.9.0'} dependencies: '@ampproject/remapping': 2.2.1 '@babel/code-frame': 7.23.5 '@babel/generator': 7.23.6 '@babel/helper-compilation-targets': 7.23.6 - '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.7) - '@babel/helpers': 7.23.8 - '@babel/parser': 7.23.6 - '@babel/template': 7.22.15 - '@babel/traverse': 7.23.7 - '@babel/types': 7.23.6 + '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.9) + '@babel/helpers': 7.23.9 + '@babel/parser': 7.23.9 + '@babel/template': 7.23.9 + '@babel/traverse': 7.23.9 + '@babel/types': 7.23.9 convert-source-map: 2.0.0 debug: 4.3.4 gensync: 1.0.0-beta.2 @@ -474,9 +422,9 @@ packages: resolution: {integrity: sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 - '@jridgewell/gen-mapping': 0.3.3 - '@jridgewell/trace-mapping': 0.3.21 + '@babel/types': 7.23.9 + '@jridgewell/gen-mapping': 0.3.4 + '@jridgewell/trace-mapping': 0.3.23 jsesc: 2.5.2 dev: true @@ -484,14 +432,14 @@ packages: resolution: {integrity: sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-builder-binary-assignment-operator-visitor@7.22.15: resolution: {integrity: sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-compilation-targets@7.23.6: @@ -500,62 +448,47 @@ packages: dependencies: '@babel/compat-data': 7.23.5 '@babel/helper-validator-option': 7.23.5 - browserslist: 4.22.2 + browserslist: 4.23.0 lru-cache: 5.1.1 semver: 6.3.1 dev: true - /@babel/helper-create-class-features-plugin@7.23.7(@babel/core@7.23.7): - resolution: {integrity: sha512-xCoqR/8+BoNnXOY7RVSgv6X+o7pmT5q1d+gGcRlXYkI+9B31glE4jeejhKVpA04O1AtzOt7OSQ6VYKP5FcRl9g==} + /@babel/helper-create-class-features-plugin@7.23.10(@babel/core@7.23.9): + resolution: {integrity: sha512-2XpP2XhkXzgxecPNEEK8Vz8Asj9aRxt08oKOqtiZoqV2UGZ5T+EkyP9sXQ9nwMxBIG34a7jmasVqoMop7VdPUw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-annotate-as-pure': 7.22.5 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-function-name': 7.23.0 '@babel/helper-member-expression-to-functions': 7.23.0 '@babel/helper-optimise-call-expression': 7.22.5 - '@babel/helper-replace-supers': 7.22.20(@babel/core@7.23.7) + '@babel/helper-replace-supers': 7.22.20(@babel/core@7.23.9) '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 '@babel/helper-split-export-declaration': 7.22.6 semver: 6.3.1 dev: true - /@babel/helper-create-regexp-features-plugin@7.22.15(@babel/core@7.23.7): + /@babel/helper-create-regexp-features-plugin@7.22.15(@babel/core@7.23.9): resolution: {integrity: sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-annotate-as-pure': 7.22.5 regexpu-core: 5.3.2 semver: 6.3.1 dev: true - /@babel/helper-define-polyfill-provider@0.4.4(@babel/core@7.23.7): - resolution: {integrity: sha512-QcJMILQCu2jm5TFPGA3lCpJJTeEP+mqeXooG/NZbg/h5FTFi6V0+99ahlRsW8/kRLyb24LZVCCiclDedhLKcBA==} - peerDependencies: - '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 - dependencies: - '@babel/core': 7.23.7 - '@babel/helper-compilation-targets': 7.23.6 - '@babel/helper-plugin-utils': 7.22.5 - debug: 4.3.4 - lodash.debounce: 4.0.8 - resolve: 1.22.8 - transitivePeerDependencies: - - supports-color - dev: true - - /@babel/helper-define-polyfill-provider@0.5.0(@babel/core@7.23.7): + /@babel/helper-define-polyfill-provider@0.5.0(@babel/core@7.23.9): resolution: {integrity: sha512-NovQquuQLAQ5HuyjCz7WQP9MjRj7dx++yspwiyUiGl9ZyadHRSql1HZh5ogRd8W8w6YM6EQ/NTB8rgjLt5W65Q==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-compilation-targets': 7.23.6 '@babel/helper-plugin-utils': 7.22.5 debug: 4.3.4 @@ -574,37 +507,37 @@ packages: resolution: {integrity: sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/template': 7.22.15 - '@babel/types': 7.23.6 + '@babel/template': 7.23.9 + '@babel/types': 7.23.9 dev: true /@babel/helper-hoist-variables@7.22.5: resolution: {integrity: sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-member-expression-to-functions@7.23.0: resolution: {integrity: sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-module-imports@7.22.15: resolution: {integrity: sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 - /@babel/helper-module-transforms@7.23.3(@babel/core@7.23.7): + /@babel/helper-module-transforms@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-module-imports': 7.22.15 '@babel/helper-simple-access': 7.22.5 @@ -616,7 +549,7 @@ packages: resolution: {integrity: sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-plugin-utils@7.22.5: @@ -624,25 +557,25 @@ packages: engines: {node: '>=6.9.0'} dev: true - /@babel/helper-remap-async-to-generator@7.22.20(@babel/core@7.23.7): + /@babel/helper-remap-async-to-generator@7.22.20(@babel/core@7.23.9): resolution: {integrity: sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-annotate-as-pure': 7.22.5 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-wrap-function': 7.22.20 dev: true - /@babel/helper-replace-supers@7.22.20(@babel/core@7.23.7): + /@babel/helper-replace-supers@7.22.20(@babel/core@7.23.9): resolution: {integrity: sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-member-expression-to-functions': 7.23.0 '@babel/helper-optimise-call-expression': 7.22.5 @@ -652,21 +585,21 @@ packages: resolution: {integrity: sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-skip-transparent-expression-wrappers@7.22.5: resolution: {integrity: sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-split-export-declaration@7.22.6: resolution: {integrity: sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==} engines: {node: '>=6.9.0'} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@babel/helper-string-parser@7.23.4: @@ -687,17 +620,17 @@ packages: engines: {node: '>=6.9.0'} dependencies: '@babel/helper-function-name': 7.23.0 - '@babel/template': 7.22.15 - '@babel/types': 7.23.6 + '@babel/template': 7.23.9 + '@babel/types': 7.23.9 dev: true - /@babel/helpers@7.23.8: - resolution: {integrity: sha512-KDqYz4PiOWvDFrdHLPhKtCThtIcKVy6avWD2oG4GEvyQ+XDZwHD4YQd+H2vNMnq2rkdxsDkU82T+Vk8U/WXHRQ==} + /@babel/helpers@7.23.9: + resolution: {integrity: sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==} engines: {node: '>=6.9.0'} dependencies: - '@babel/template': 7.22.15 - '@babel/traverse': 7.23.7 - '@babel/types': 7.23.6 + '@babel/template': 7.23.9 + '@babel/traverse': 7.23.9 + '@babel/types': 7.23.9 transitivePeerDependencies: - supports-color dev: true @@ -710,966 +643,966 @@ packages: chalk: 2.4.2 js-tokens: 4.0.0 - /@babel/parser@7.23.6: - resolution: {integrity: sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==} + /@babel/parser@7.23.9: + resolution: {integrity: sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==} engines: {node: '>=6.0.0'} hasBin: true dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true - /@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.23.3(@babel/core@7.23.7): + /@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-iRkKcCqb7iGnq9+3G6rZ+Ciz5VywC4XNRHe57lKM+jOeYAoR0lVqdeeDRfh0tQcTfw/+vBhHn926FmQhLtlFLQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.23.3(@babel/core@7.23.7): + /@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-WwlxbfMNdVEpQjZmK5mhm7oSwD3dS6eU+Iwsi4Knl9wAletWem7kaRsGOG+8UEbRyqxY4SS5zvtfXwX+jMxUwQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.13.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 - '@babel/plugin-transform-optional-chaining': 7.23.4(@babel/core@7.23.7) + '@babel/plugin-transform-optional-chaining': 7.23.4(@babel/core@7.23.9) dev: true - /@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@7.23.7(@babel/core@7.23.7): + /@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@7.23.7(@babel/core@7.23.9): resolution: {integrity: sha512-LlRT7HgaifEpQA1ZgLVOIJZZFVPWN5iReq/7/JixwBtwcoeVGDBD53ZV28rrsLYOZs1Y/EHhA8N/Z6aazHR8cw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.23.7): + /@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2(@babel/core@7.23.9): resolution: {integrity: sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 dev: true - /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.23.7): + /@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.23.9): resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.23.7): + /@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.23.9): resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.23.7): + /@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.23.9): resolution: {integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-dynamic-import@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-dynamic-import@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-export-namespace-from@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-export-namespace-from@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-flow@7.23.3(@babel/core@7.23.7): + /@babel/plugin-syntax-flow@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-YZiAIpkJAwQXBJLIQbRFayR5c+gJ35Vcz3bg954k7cd73zqjvhacJuL9RbrzPz8qPmZdgqP6EUKwy0PCNhaaPA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-import-assertions@7.23.3(@babel/core@7.23.7): + /@babel/plugin-syntax-import-assertions@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-lPgDSU+SJLK3xmFDTV2ZRQAiM7UuUjGidwBywFavObCiZc1BeAAcMtHJKUya92hPHO+at63JJPLygilZard8jw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-import-attributes@7.23.3(@babel/core@7.23.7): + /@babel/plugin-syntax-import-attributes@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-pawnE0P9g10xgoP7yKr6CK63K2FMsTE+FZidZO/1PwRdzmAPVs+HS1mAURUsgaoxammTJvULUdIkEK0gOcU2tA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.23.7): + /@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.23.9): resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-jsx@7.23.3(@babel/core@7.23.7): + /@babel/plugin-syntax-jsx@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.23.7): + /@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.23.9): resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.23.7): + /@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.23.9): resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.23.7): + /@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.23.9): resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.23.7): + /@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.23.9): resolution: {integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.23.7): + /@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.23.9): resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-typescript@7.23.3(@babel/core@7.23.7): + /@babel/plugin-syntax-typescript@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-syntax-unicode-sets-regex@7.18.6(@babel/core@7.23.7): + /@babel/plugin-syntax-unicode-sets-regex@7.18.6(@babel/core@7.23.9): resolution: {integrity: sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-arrow-functions@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-arrow-functions@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-NzQcQrzaQPkaEwoTm4Mhyl8jI1huEL/WWIEvudjTCMJ9aBZNpsJbMASx7EQECtQQPS/DcnFpo0FIh3LvEO9cxQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-async-generator-functions@7.23.7(@babel/core@7.23.7): - resolution: {integrity: sha512-PdxEpL71bJp1byMG0va5gwQcXHxuEYC/BgI/e88mGTtohbZN28O5Yit0Plkkm/dBzCF/BxmbNcses1RH1T+urA==} + /@babel/plugin-transform-async-generator-functions@7.23.9(@babel/core@7.23.9): + resolution: {integrity: sha512-8Q3veQEDGe14dTYuwagbRtwxQDnytyg1JFu4/HwEMETeofocrB0U0ejBJIXoeG/t2oXZ8kzCyI0ZZfbT80VFNQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-plugin-utils': 7.22.5 - '@babel/helper-remap-async-to-generator': 7.22.20(@babel/core@7.23.7) - '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.23.7) + '@babel/helper-remap-async-to-generator': 7.22.20(@babel/core@7.23.9) + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-async-to-generator@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-async-to-generator@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-A7LFsKi4U4fomjqXJlZg/u0ft/n8/7n7lpffUP/ZULx/DtV9SGlNKZolHH6PE8Xl1ngCc0M11OaeZptXVkfKSw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-module-imports': 7.22.15 '@babel/helper-plugin-utils': 7.22.5 - '@babel/helper-remap-async-to-generator': 7.22.20(@babel/core@7.23.7) + '@babel/helper-remap-async-to-generator': 7.22.20(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-block-scoped-functions@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-block-scoped-functions@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-vI+0sIaPIO6CNuM9Kk5VmXcMVRiOpDh7w2zZt9GXzmE/9KD70CUEVhvPR/etAeNK/FAEkhxQtXOzVF3EuRL41A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-block-scoping@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-block-scoping@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-0QqbP6B6HOh7/8iNR4CQU2Th/bbRtBp4KS9vcaZd1fZ0wSh5Fyssg0UCIHwxh+ka+pNDREbVLQnHCMHKZfPwfw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-class-properties@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-class-properties@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-uM+AN8yCIjDPccsKGlw271xjJtGii+xQIF/uMPS8H15L12jZTsLfF4o5vNO7d/oUguOyfdikHGc/yi9ge4SGIg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-class-features-plugin': 7.23.7(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-class-features-plugin': 7.23.10(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-class-static-block@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-class-static-block@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-nsWu/1M+ggti1SOALj3hfx5FXzAY06fwPJsUZD4/A5e1bWi46VUIWtD+kOX6/IdhXGsXBWllLFDSnqSCdUNydQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.12.0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-class-features-plugin': 7.23.7(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-class-features-plugin': 7.23.10(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.23.7) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-classes@7.23.8(@babel/core@7.23.7): + /@babel/plugin-transform-classes@7.23.8(@babel/core@7.23.9): resolution: {integrity: sha512-yAYslGsY1bX6Knmg46RjiCiNSwJKv2IUC8qOdYKqMMr0491SXFhcHqOdRDeCRohOOIzwN/90C6mQ9qAKgrP7dg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-annotate-as-pure': 7.22.5 '@babel/helper-compilation-targets': 7.23.6 '@babel/helper-environment-visitor': 7.22.20 '@babel/helper-function-name': 7.23.0 '@babel/helper-plugin-utils': 7.22.5 - '@babel/helper-replace-supers': 7.22.20(@babel/core@7.23.7) + '@babel/helper-replace-supers': 7.22.20(@babel/core@7.23.9) '@babel/helper-split-export-declaration': 7.22.6 globals: 11.12.0 dev: true - /@babel/plugin-transform-computed-properties@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-computed-properties@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-dTj83UVTLw/+nbiHqQSFdwO9CbTtwq1DsDqm3CUEtDrZNET5rT5E6bIdTlOftDTDLMYxvxHNEYO4B9SLl8SLZw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/template': 7.22.15 + '@babel/template': 7.23.9 dev: true - /@babel/plugin-transform-destructuring@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-destructuring@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-n225npDqjDIr967cMScVKHXJs7rout1q+tt50inyBCPkyZ8KxeI6d+GIbSBTT/w/9WdlWDOej3V9HE5Lgk57gw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-dotall-regex@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-dotall-regex@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-vgnFYDHAKzFaTVp+mneDsIEbnJ2Np/9ng9iviHw3P/KVcgONxpNULEW/51Z/BaFojG2GI2GwwXck5uV1+1NOYQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-duplicate-keys@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-duplicate-keys@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-RrqQ+BQmU3Oyav3J+7/myfvRCq7Tbz+kKLLshUmMwNlDHExbGL7ARhajvoBJEvc+fCguPPu887N+3RRXBVKZUA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-dynamic-import@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-dynamic-import@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-V6jIbLhdJK86MaLh4Jpghi8ho5fGzt3imHOBu/x0jlBaPYqDoWz4RDXjmMOfnh+JWNaQleEAByZLV0QzBT4YQQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.23.7) + '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-exponentiation-operator@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-exponentiation-operator@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-5fhCsl1odX96u7ILKHBj4/Y8vipoqwsJMh4csSA8qFfxrZDEA4Ssku2DyNvMJSmZNOEBT750LfFPbtrnTP90BQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-builder-binary-assignment-operator-visitor': 7.22.15 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-export-namespace-from@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-export-namespace-from@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-GzuSBcKkx62dGzZI1WVgTWvkkz84FZO5TC5T8dl/Tht/rAla6Dg/Mz9Yhypg+ezVACf/rgDuQt3kbWEv7LdUDQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.23.7) + '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-flow-strip-types@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-flow-strip-types@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-26/pQTf9nQSNVJCrLB1IkHUKyPxR+lMrH2QDPG89+Znu9rAMbtrybdbWeE9bb7gzjmE5iXHEY+e0HUwM6Co93Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-flow': 7.23.3(@babel/core@7.23.7) + '@babel/plugin-syntax-flow': 7.23.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-for-of@7.23.6(@babel/core@7.23.7): + /@babel/plugin-transform-for-of@7.23.6(@babel/core@7.23.9): resolution: {integrity: sha512-aYH4ytZ0qSuBbpfhuofbg/e96oQ7U2w1Aw/UQmKT+1l39uEhUPoFS3fHevDc1G0OvewyDudfMKY1OulczHzWIw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 dev: true - /@babel/plugin-transform-function-name@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-function-name@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-I1QXp1LxIvt8yLaib49dRW5Okt7Q4oaxao6tFVKS/anCdEOMtYwWVKoiOA1p34GOWIZjUK0E+zCp7+l1pfQyiw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-compilation-targets': 7.23.6 '@babel/helper-function-name': 7.23.0 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-json-strings@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-json-strings@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-81nTOqM1dMwZ/aRXQ59zVubN9wHGqk6UtqRK+/q+ciXmRy8fSolhGVvG09HHRGo4l6fr/c4ZhXUQH0uFW7PZbg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.23.7) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-literals@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-literals@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-wZ0PIXRxnwZvl9AYpqNUxpZ5BiTGrYt7kueGQ+N5FiQ7RCOD4cm8iShd6S6ggfVIWaJf2EMk8eRzAh52RfP4rQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-logical-assignment-operators@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-logical-assignment-operators@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-Mc/ALf1rmZTP4JKKEhUwiORU+vcfarFVLfcFiolKUo6sewoxSEgl36ak5t+4WamRsNr6nzjZXQjM35WsU+9vbg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.23.7) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-member-expression-literals@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-member-expression-literals@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-sC3LdDBDi5x96LA+Ytekz2ZPk8i/Ck+DEuDbRAll5rknJ5XRTSaPKEYwomLcs1AA8wg9b3KjIQRsnApj+q51Ag==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-modules-amd@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-modules-amd@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-vJYQGxeKM4t8hYCKVBlZX/gtIY2I7mRGFNcm85sgXGMTBcoV3QdVtdpbcWEbzbfUIUZKwvgFT82mRvaQIebZzw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-modules-commonjs@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-modules-commonjs@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-aVS0F65LKsdNOtcz6FRCpE4OgsP2OFnW46qNxNIX9h3wuzaNcSQsJysuMwqSibC98HPrf2vCgtxKNwS0DAlgcA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-simple-access': 7.22.5 dev: true - /@babel/plugin-transform-modules-systemjs@7.23.3(@babel/core@7.23.7): - resolution: {integrity: sha512-ZxyKGTkF9xT9YJuKQRo19ewf3pXpopuYQd8cDXqNzc3mUNbOME0RKMoZxviQk74hwzfQsEe66dE92MaZbdHKNQ==} + /@babel/plugin-transform-modules-systemjs@7.23.9(@babel/core@7.23.9): + resolution: {integrity: sha512-KDlPRM6sLo4o1FkiSlXoAa8edLXFsKKIda779fbLrvmeuc3itnjCtaO6RrtoaANsIJANj+Vk1zqbZIMhkCAHVw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-hoist-variables': 7.22.5 - '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.7) + '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-validator-identifier': 7.22.20 dev: true - /@babel/plugin-transform-modules-umd@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-modules-umd@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-zHsy9iXX2nIsCBFPud3jKn1IRPWg3Ing1qOZgeKV39m1ZgIdpJqvlWVeiHBZC6ITRG0MfskhYe9cLgntfSFPIg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-module-transforms': 7.23.3(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-named-capturing-groups-regex@7.22.5(@babel/core@7.23.7): + /@babel/plugin-transform-named-capturing-groups-regex@7.22.5(@babel/core@7.23.9): resolution: {integrity: sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-new-target@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-new-target@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-YJ3xKqtJMAT5/TIZnpAR3I+K+WaDowYbN3xyxI8zxx/Gsypwf9B9h0VB+1Nh6ACAAPRS5NSRje0uVv5i79HYGQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-nullish-coalescing-operator@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-nullish-coalescing-operator@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-jHE9EVVqHKAQx+VePv5LLGHjmHSJR76vawFPTdlxR/LVJPfOEGxREQwQfjuZEOPTwG92X3LINSh3M40Rv4zpVA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.23.7) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-numeric-separator@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-numeric-separator@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-mps6auzgwjRrwKEZA05cOwuDc9FAzoyFS4ZsG/8F43bTLf/TgkJg7QXOrPO1JO599iA3qgK9MXdMGOEC8O1h6Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.23.7) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-object-rest-spread@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-object-rest-spread@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-9x9K1YyeQVw0iOXJlIzwm8ltobIIv7j2iLyP2jIhEbqPRQ7ScNgwQufU2I0Gq11VjyG4gI4yMXt2VFags+1N3g==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: '@babel/compat-data': 7.23.5 - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-compilation-targets': 7.23.6 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-transform-parameters': 7.23.3(@babel/core@7.23.7) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-transform-parameters': 7.23.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-object-super@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-object-super@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-BwQ8q0x2JG+3lxCVFohg+KbQM7plfpBwThdW9A6TMtWwLsbDA01Ek2Zb/AgDN39BiZsExm4qrXxjk+P1/fzGrA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/helper-replace-supers': 7.22.20(@babel/core@7.23.7) + '@babel/helper-replace-supers': 7.22.20(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-optional-catch-binding@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-optional-catch-binding@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-XIq8t0rJPHf6Wvmbn9nFxU6ao4c7WhghTR5WyV8SrJfUFzyxhCm4nhC+iAp3HFhbAKLfYpgzhJ6t4XCtVwqO5A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.23.7) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-optional-chaining@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-optional-chaining@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-ZU8y5zWOfjM5vZ+asjgAPwDaBjJzgufjES89Rs4Lpq63O300R/kOz30WCLo6BxxX6QVEilwSlpClnG5cZaikTA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 - '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.23.7) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-parameters@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-parameters@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-09lMt6UsUb3/34BbECKVbVwrT9bO6lILWln237z7sLaWnMsTi7Yc9fhX5DLpkJzAGfaReXI22wP41SZmnAA3Vw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-private-methods@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-private-methods@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-UzqRcRtWsDMTLrRWFvUBDwmw06tCQH9Rl1uAjfh6ijMSmGYQ+fpdB+cnqRC8EMh5tuuxSv0/TejGL+7vyj+50g==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-class-features-plugin': 7.23.7(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-class-features-plugin': 7.23.10(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-private-property-in-object@7.23.4(@babel/core@7.23.7): + /@babel/plugin-transform-private-property-in-object@7.23.4(@babel/core@7.23.9): resolution: {integrity: sha512-9G3K1YqTq3F4Vt88Djx1UZ79PDyj+yKRnUy7cZGSMe+a7jkwD259uKKuUzQlPkGam7R+8RJwh5z4xO27fA1o2A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-annotate-as-pure': 7.22.5 - '@babel/helper-create-class-features-plugin': 7.23.7(@babel/core@7.23.7) + '@babel/helper-create-class-features-plugin': 7.23.10(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.23.7) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-property-literals@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-property-literals@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-jR3Jn3y7cZp4oEWPFAlRsSWjxKe4PZILGBSd4nis1TsC5qeSpb+nrtihJuDhNI7QHiVbUaiXa0X2RZY3/TI6Nw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-react-jsx-self@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-react-jsx-self@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-react-jsx-source@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-react-jsx-source@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-regenerator@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-regenerator@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-KP+75h0KghBMcVpuKisx3XTu9Ncut8Q8TuvGO4IhY+9D5DFEckQefOuIsB/gQ2tG71lCke4NMrtIPS8pOj18BQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 regenerator-transform: 0.15.2 dev: true - /@babel/plugin-transform-reserved-words@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-reserved-words@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-QnNTazY54YqgGxwIexMZva9gqbPa15t/x9VS+0fsEFWplwVpXYZivtgl43Z1vMpc1bdPP2PP8siFeVcnFvA3Cg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-shorthand-properties@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-shorthand-properties@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-ED2fgqZLmexWiN+YNFX26fx4gh5qHDhn1O2gvEhreLW2iI63Sqm4llRLCXALKrCnbN4Jy0VcMQZl/SAzqug/jg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-spread@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-spread@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-VvfVYlrlBVu+77xVTOAoxQ6mZbnIq5FM0aGBSFEcIh03qHf+zNqA4DC/3XMUozTg7bZV3e3mZQ0i13VB6v5yUg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-skip-transparent-expression-wrappers': 7.22.5 dev: true - /@babel/plugin-transform-sticky-regex@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-sticky-regex@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-HZOyN9g+rtvnOU3Yh7kSxXrKbzgrm5X4GncPY1QOquu7epga5MxKHVpYu2hvQnry/H+JjckSYRb93iNfsioAGg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-template-literals@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-template-literals@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-Flok06AYNp7GV2oJPZZcP9vZdszev6vPBkHLwxwSpaIqx75wn6mUd3UFWsSsA0l8nXAKkyCmL/sR02m8RYGeHg==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-typeof-symbol@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-typeof-symbol@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-4t15ViVnaFdrPC74be1gXBSMzXk3B4Us9lP7uLRQHTFpV5Dvt33pn+2MyyNxmN3VTTm3oTrZVMUmuw3oBnQ2oQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-typescript@7.23.6(@babel/core@7.23.7): + /@babel/plugin-transform-typescript@7.23.6(@babel/core@7.23.9): resolution: {integrity: sha512-6cBG5mBvUu4VUD04OHKnYzbuHNP8huDsD3EDqqpIpsswTDoqHCjLoHb6+QgsV1WsT2nipRqCPgxD3LXnEO7XfA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-annotate-as-pure': 7.22.5 - '@babel/helper-create-class-features-plugin': 7.23.7(@babel/core@7.23.7) + '@babel/helper-create-class-features-plugin': 7.23.10(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 - '@babel/plugin-syntax-typescript': 7.23.3(@babel/core@7.23.7) + '@babel/plugin-syntax-typescript': 7.23.3(@babel/core@7.23.9) dev: true - /@babel/plugin-transform-unicode-escapes@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-unicode-escapes@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-OMCUx/bU6ChE3r4+ZdylEqAjaQgHAgipgW8nsCfu5pGqDcFytVd91AwRvUJSBZDz0exPGgnjoqhgRYLRjFZc9Q==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-unicode-property-regex@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-unicode-property-regex@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-KcLIm+pDZkWZQAFJ9pdfmh89EwVfmNovFBcXko8szpBeF8z68kWIPeKlmSOkT9BXJxs2C0uk+5LxoxIv62MROA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-unicode-regex@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-unicode-regex@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-wMHpNA4x2cIA32b/ci3AfwNgheiva2W0WUKWTK7vBHBhDKfPsc5cFGNWm69WBqpwd86u1qwZ9PWevKqm1A3yAw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/plugin-transform-unicode-sets-regex@7.23.3(@babel/core@7.23.7): + /@babel/plugin-transform-unicode-sets-regex@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-W7lliA/v9bNR83Qc3q1ip9CQMZ09CcHDbHfbLRDNuAhn1Mvkr1ZNF7hPmztMQvtTGVLJ9m8IZqWsTkXOml8dbw==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-create-regexp-features-plugin': 7.22.15(@babel/core@7.23.9) '@babel/helper-plugin-utils': 7.22.5 dev: true - /@babel/preset-env@7.23.8(@babel/core@7.23.7): - resolution: {integrity: sha512-lFlpmkApLkEP6woIKprO6DO60RImpatTQKtz4sUcDjVcK8M8mQ4sZsuxaTMNOZf0sqAq/ReYW1ZBHnOQwKpLWA==} + /@babel/preset-env@7.23.9(@babel/core@7.23.9): + resolution: {integrity: sha512-3kBGTNBBk9DQiPoXYS0g0BYlwTQYUTifqgKTjxUwEUkduRT2QOa0FPGBJ+NROQhGyYO5BuTJwGvBnqKDykac6A==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: '@babel/compat-data': 7.23.5 - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-compilation-targets': 7.23.6 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-validator-option': 7.23.5 - '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly': 7.23.7(@babel/core@7.23.7) - '@babel/plugin-proposal-private-property-in-object': 7.21.0-placeholder-for-preset-env.2(@babel/core@7.23.7) - '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.23.7) - '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.23.7) - '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.23.7) - '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-import-assertions': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-syntax-import-attributes': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.23.7) - '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.23.7) - '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.23.7) - '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.23.7) - '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.23.7) - '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.23.7) - '@babel/plugin-syntax-unicode-sets-regex': 7.18.6(@babel/core@7.23.7) - '@babel/plugin-transform-arrow-functions': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-async-generator-functions': 7.23.7(@babel/core@7.23.7) - '@babel/plugin-transform-async-to-generator': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-block-scoped-functions': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-block-scoping': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-class-properties': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-class-static-block': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-classes': 7.23.8(@babel/core@7.23.7) - '@babel/plugin-transform-computed-properties': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-destructuring': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-dotall-regex': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-duplicate-keys': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-dynamic-import': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-exponentiation-operator': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-export-namespace-from': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-for-of': 7.23.6(@babel/core@7.23.7) - '@babel/plugin-transform-function-name': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-json-strings': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-literals': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-logical-assignment-operators': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-member-expression-literals': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-modules-amd': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-modules-systemjs': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-modules-umd': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-named-capturing-groups-regex': 7.22.5(@babel/core@7.23.7) - '@babel/plugin-transform-new-target': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-nullish-coalescing-operator': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-numeric-separator': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-object-rest-spread': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-object-super': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-optional-catch-binding': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-optional-chaining': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-parameters': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-private-methods': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-private-property-in-object': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-property-literals': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-regenerator': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-reserved-words': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-shorthand-properties': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-spread': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-sticky-regex': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-template-literals': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-typeof-symbol': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-unicode-escapes': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-unicode-property-regex': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-unicode-regex': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-unicode-sets-regex': 7.23.3(@babel/core@7.23.7) - '@babel/preset-modules': 0.1.6-no-external-plugins(@babel/core@7.23.7) - babel-plugin-polyfill-corejs2: 0.4.8(@babel/core@7.23.7) - babel-plugin-polyfill-corejs3: 0.8.7(@babel/core@7.23.7) - babel-plugin-polyfill-regenerator: 0.5.5(@babel/core@7.23.7) - core-js-compat: 3.35.0 + '@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly': 7.23.7(@babel/core@7.23.9) + '@babel/plugin-proposal-private-property-in-object': 7.21.0-placeholder-for-preset-env.2(@babel/core@7.23.9) + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.23.9) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.23.9) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.23.9) + '@babel/plugin-syntax-dynamic-import': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-export-namespace-from': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-import-assertions': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-syntax-import-attributes': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.23.9) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.23.9) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.23.9) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.23.9) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.23.9) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.23.9) + '@babel/plugin-syntax-unicode-sets-regex': 7.18.6(@babel/core@7.23.9) + '@babel/plugin-transform-arrow-functions': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-async-generator-functions': 7.23.9(@babel/core@7.23.9) + '@babel/plugin-transform-async-to-generator': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-block-scoped-functions': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-block-scoping': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-class-properties': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-class-static-block': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-classes': 7.23.8(@babel/core@7.23.9) + '@babel/plugin-transform-computed-properties': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-destructuring': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-dotall-regex': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-duplicate-keys': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-dynamic-import': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-exponentiation-operator': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-export-namespace-from': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-for-of': 7.23.6(@babel/core@7.23.9) + '@babel/plugin-transform-function-name': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-json-strings': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-literals': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-logical-assignment-operators': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-member-expression-literals': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-modules-amd': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-modules-systemjs': 7.23.9(@babel/core@7.23.9) + '@babel/plugin-transform-modules-umd': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-named-capturing-groups-regex': 7.22.5(@babel/core@7.23.9) + '@babel/plugin-transform-new-target': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-nullish-coalescing-operator': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-numeric-separator': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-object-rest-spread': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-object-super': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-optional-catch-binding': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-optional-chaining': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-parameters': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-private-methods': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-private-property-in-object': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-property-literals': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-regenerator': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-reserved-words': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-shorthand-properties': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-spread': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-sticky-regex': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-template-literals': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-typeof-symbol': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-unicode-escapes': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-unicode-property-regex': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-unicode-regex': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-unicode-sets-regex': 7.23.3(@babel/core@7.23.9) + '@babel/preset-modules': 0.1.6-no-external-plugins(@babel/core@7.23.9) + babel-plugin-polyfill-corejs2: 0.4.8(@babel/core@7.23.9) + babel-plugin-polyfill-corejs3: 0.9.0(@babel/core@7.23.9) + babel-plugin-polyfill-regenerator: 0.5.5(@babel/core@7.23.9) + core-js-compat: 3.36.0 semver: 6.3.1 transitivePeerDependencies: - supports-color dev: true - /@babel/preset-flow@7.23.3(@babel/core@7.23.7): + /@babel/preset-flow@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-7yn6hl8RIv+KNk6iIrGZ+D06VhVY35wLVf23Cz/mMu1zOr7u4MMP4j0nZ9tLf8+4ZFpnib8cFYgB/oYg9hfswA==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-validator-option': 7.23.5 - '@babel/plugin-transform-flow-strip-types': 7.23.3(@babel/core@7.23.7) + '@babel/plugin-transform-flow-strip-types': 7.23.3(@babel/core@7.23.9) dev: true - /@babel/preset-modules@0.1.6-no-external-plugins(@babel/core@7.23.7): + /@babel/preset-modules@0.1.6-no-external-plugins(@babel/core@7.23.9): resolution: {integrity: sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==} peerDependencies: '@babel/core': ^7.0.0-0 || ^8.0.0-0 <8.0.0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 esutils: 2.0.3 dev: true - /@babel/preset-typescript@7.23.3(@babel/core@7.23.7): + /@babel/preset-typescript@7.23.3(@babel/core@7.23.9): resolution: {integrity: sha512-17oIGVlqz6CchO9RFYn5U6ZpWRZIngayYCtrPRSgANSwC2V1Jb+iP74nVxzzXJte8b8BYxrL1yY96xfhTBrNNQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@babel/helper-plugin-utils': 7.22.5 '@babel/helper-validator-option': 7.23.5 - '@babel/plugin-syntax-jsx': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-typescript': 7.23.6(@babel/core@7.23.7) + '@babel/plugin-syntax-jsx': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-typescript': 7.23.6(@babel/core@7.23.9) dev: true - /@babel/register@7.23.7(@babel/core@7.23.7): + /@babel/register@7.23.7(@babel/core@7.23.9): resolution: {integrity: sha512-EjJeB6+kvpk+Y5DAkEAmbOBEFkh9OASx0huoEkqYTFxAZHzOAX2Oh5uwAUuL2rUddqfM0SA+KPXV2TbzoZ2kvQ==} engines: {node: '>=6.9.0'} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 clone-deep: 4.0.1 find-cache-dir: 2.1.0 make-dir: 2.1.0 @@ -1681,36 +1614,23 @@ packages: resolution: {integrity: sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==} dev: true - /@babel/runtime@7.23.6: - resolution: {integrity: sha512-zHd0eUrf5GZoOWVCXp6koAKQTfZV07eit6bGPmJgnZdnSAvvZee6zniW2XMF7Cmc4ISOOnPy3QaSiIJGJkVEDQ==} + /@babel/runtime@7.23.9: + resolution: {integrity: sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==} engines: {node: '>=6.9.0'} dependencies: regenerator-runtime: 0.14.1 - /@babel/runtime@7.23.7: - resolution: {integrity: sha512-w06OXVOFso7LcbzMiDGt+3X7Rh7Ho8MmgPoWU3rarH+8upf+wSU/grlGbWzQyr3DkdN6ZeuMFjpdwW0Q+HxobA==} - engines: {node: '>=6.9.0'} - dependencies: - regenerator-runtime: 0.14.1 - dev: false - - /@babel/runtime@7.23.8: - resolution: {integrity: sha512-Y7KbAP984rn1VGMbGqKmBLio9V7y5Je9GvU4rQPCPinCyNfUcToxIXl06d59URp/F3LwinvODxab5N/G6qggkw==} - engines: {node: '>=6.9.0'} - dependencies: - regenerator-runtime: 0.14.1 - - /@babel/template@7.22.15: - resolution: {integrity: sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==} + /@babel/template@7.23.9: + resolution: {integrity: sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.23.5 - '@babel/parser': 7.23.6 - '@babel/types': 7.23.6 + '@babel/parser': 7.23.9 + '@babel/types': 7.23.9 dev: true - /@babel/traverse@7.23.7: - resolution: {integrity: sha512-tY3mM8rH9jM0YHFGyfC0/xf+SB5eKUu7HPj7/k3fpi9dAlsMc5YbQvDi0Sh2QTPXqMhyaAtzAr807TIyfQrmyg==} + /@babel/traverse@7.23.9: + resolution: {integrity: sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==} engines: {node: '>=6.9.0'} dependencies: '@babel/code-frame': 7.23.5 @@ -1719,16 +1639,16 @@ packages: '@babel/helper-function-name': 7.23.0 '@babel/helper-hoist-variables': 7.22.5 '@babel/helper-split-export-declaration': 7.22.6 - '@babel/parser': 7.23.6 - '@babel/types': 7.23.6 + '@babel/parser': 7.23.9 + '@babel/types': 7.23.9 debug: 4.3.4 globals: 11.12.0 transitivePeerDependencies: - supports-color dev: true - /@babel/types@7.23.6: - resolution: {integrity: sha512-+uarb83brBzPKN38NX1MkB6vb6+mwvR6amUulqAE7ccQw1pEl+bCia9TbdG1lsnFP7lZySvUn37CHyXQdfTwzg==} + /@babel/types@7.23.9: + resolution: {integrity: sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==} engines: {node: '>=6.9.0'} dependencies: '@babel/helper-string-parser': 7.23.4 @@ -1758,6 +1678,25 @@ packages: react: 18.2.0 dev: false + /@chakra-ui/accordion@2.3.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0): + resolution: {integrity: sha512-FSXRm8iClFyU+gVaXisOSEw0/4Q+qZbFRiuhIAkVU6Boj0FxAMrlo9a8AV5TuF77rgaHytCdHk0Ng+cyUijrag==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/descendant': 3.1.0(react@18.2.0) + '@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/react-context': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-controllable-state': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-merge-refs': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + '@chakra-ui/transition': 2.1.0(framer-motion@11.0.5)(react@18.2.0) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + /@chakra-ui/alert@2.2.2(@chakra-ui/system@2.6.2)(react@18.2.0): resolution: {integrity: sha512-jHg4LYMRNOJH830ViLuicjb3F+v6iriE/2G5T+Sd0Hna04nukNJ1MxUmBPE+vI22me2dIflfelu2v9wdB6Pojw==} peerDependencies: @@ -1912,7 +1851,7 @@ packages: '@emotion/react': '>=10.0.35' react: '>=18' dependencies: - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) react: 18.2.0 dev: false @@ -1953,14 +1892,26 @@ packages: resolution: {integrity: sha512-IGM/yGUHS+8TOQrZGpAKOJl/xGBrmRYJrmbHfUE7zrG3PpQyXvbLDP1M+RggkCFVgHlJi2wpYIf0QtQlU0XZfw==} dev: false - /@chakra-ui/focus-lock@2.1.0(@types/react@18.2.48)(react@18.2.0): + /@chakra-ui/focus-lock@2.1.0(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-EmGx4PhWGjm4dpjRqM4Aa+rCWBxP+Rq8Uc/nAVnD4YVqkEhBkrPTpui2lnjsuxqNaZ24fIAZ10cF1hlpemte/w==} peerDependencies: react: '>=18' dependencies: '@chakra-ui/dom-utils': 2.1.0 react: 18.2.0 - react-focus-lock: 2.9.6(@types/react@18.2.48)(react@18.2.0) + react-focus-lock: 2.11.1(@types/react@18.2.57)(react@18.2.0) + transitivePeerDependencies: + - '@types/react' + dev: false + + /@chakra-ui/focus-lock@2.1.0(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-EmGx4PhWGjm4dpjRqM4Aa+rCWBxP+Rq8Uc/nAVnD4YVqkEhBkrPTpui2lnjsuxqNaZ24fIAZ10cF1hlpemte/w==} + peerDependencies: + react: '>=18' + dependencies: + '@chakra-ui/dom-utils': 2.1.0 + react: 18.2.0 + react-focus-lock: 2.11.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' dev: false @@ -2109,7 +2060,61 @@ packages: react: 18.2.0 dev: false - /@chakra-ui/modal@2.3.1(@chakra-ui/system@2.6.2)(@types/react@18.2.48)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0): + /@chakra-ui/menu@2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0): + resolution: {integrity: sha512-lJS7XEObzJxsOwWQh7yfG4H8FzFPRP5hVPN/CL+JzytEINCSBvsCDHrYPQGp7jzpCi8vnTqQQGQe0f8dwnXd2g==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/clickable': 2.1.0(react@18.2.0) + '@chakra-ui/descendant': 3.1.0(react@18.2.0) + '@chakra-ui/lazy-utils': 2.0.5 + '@chakra-ui/popper': 3.1.0(react@18.2.0) + '@chakra-ui/react-children-utils': 2.0.6(react@18.2.0) + '@chakra-ui/react-context': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-animation-state': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-controllable-state': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-disclosure': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-focus-effect': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-merge-refs': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-outside-click': 2.2.0(react@18.2.0) + '@chakra-ui/react-use-update-effect': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + '@chakra-ui/transition': 2.1.0(framer-motion@11.0.5)(react@18.2.0) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + + /@chakra-ui/menu@2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.6)(react@18.2.0): + resolution: {integrity: sha512-lJS7XEObzJxsOwWQh7yfG4H8FzFPRP5hVPN/CL+JzytEINCSBvsCDHrYPQGp7jzpCi8vnTqQQGQe0f8dwnXd2g==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/clickable': 2.1.0(react@18.2.0) + '@chakra-ui/descendant': 3.1.0(react@18.2.0) + '@chakra-ui/lazy-utils': 2.0.5 + '@chakra-ui/popper': 3.1.0(react@18.2.0) + '@chakra-ui/react-children-utils': 2.0.6(react@18.2.0) + '@chakra-ui/react-context': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-animation-state': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-controllable-state': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-disclosure': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-focus-effect': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-merge-refs': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-outside-click': 2.2.0(react@18.2.0) + '@chakra-ui/react-use-update-effect': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + '@chakra-ui/transition': 2.1.0(framer-motion@11.0.6)(react@18.2.0) + framer-motion: 11.0.6(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + + /@chakra-ui/modal@2.3.1(@chakra-ui/system@2.6.2)(@types/react@18.2.57)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-TQv1ZaiJMZN+rR9DK0snx/OPwmtaGH1HbZtlYt4W4s6CzyK541fxLRTjIXfEzIGpvNW+b6VFuFjbcR78p4DEoQ==} peerDependencies: '@chakra-ui/system': '>=2.0.0' @@ -2118,7 +2123,33 @@ packages: react-dom: '>=18' dependencies: '@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) - '@chakra-ui/focus-lock': 2.1.0(@types/react@18.2.48)(react@18.2.0) + '@chakra-ui/focus-lock': 2.1.0(@types/react@18.2.57)(react@18.2.0) + '@chakra-ui/portal': 2.1.0(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/react-context': 2.1.0(react@18.2.0) + '@chakra-ui/react-types': 2.0.7(react@18.2.0) + '@chakra-ui/react-use-merge-refs': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + '@chakra-ui/transition': 2.1.0(framer-motion@11.0.5)(react@18.2.0) + aria-hidden: 1.2.3 + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + react-remove-scroll: 2.5.7(@types/react@18.2.57)(react@18.2.0) + transitivePeerDependencies: + - '@types/react' + dev: false + + /@chakra-ui/modal@2.3.1(@chakra-ui/system@2.6.2)(@types/react@18.2.59)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-TQv1ZaiJMZN+rR9DK0snx/OPwmtaGH1HbZtlYt4W4s6CzyK541fxLRTjIXfEzIGpvNW+b6VFuFjbcR78p4DEoQ==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + react-dom: '>=18' + dependencies: + '@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/focus-lock': 2.1.0(@types/react@18.2.59)(react@18.2.0) '@chakra-ui/portal': 2.1.0(react-dom@18.2.0)(react@18.2.0) '@chakra-ui/react-context': 2.1.0(react@18.2.0) '@chakra-ui/react-types': 2.0.7(react@18.2.0) @@ -2130,7 +2161,7 @@ packages: framer-motion: 10.18.0(react-dom@18.2.0)(react@18.2.0) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-remove-scroll: 2.5.7(@types/react@18.2.48)(react@18.2.0) + react-remove-scroll: 2.5.7(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' dev: false @@ -2204,6 +2235,29 @@ packages: react: 18.2.0 dev: false + /@chakra-ui/popover@2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0): + resolution: {integrity: sha512-K+2ai2dD0ljvJnlrzesCDT9mNzLifE3noGKZ3QwLqd/K34Ym1W/0aL1ERSynrcG78NKoXS54SdEzkhCZ4Gn/Zg==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/lazy-utils': 2.0.5 + '@chakra-ui/popper': 3.1.0(react@18.2.0) + '@chakra-ui/react-context': 2.1.0(react@18.2.0) + '@chakra-ui/react-types': 2.0.7(react@18.2.0) + '@chakra-ui/react-use-animation-state': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-disclosure': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-focus-effect': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-focus-on-pointer-down': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-merge-refs': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + /@chakra-ui/popper@3.1.0(react@18.2.0): resolution: {integrity: sha512-ciDdpdYbeFG7og6/6J8lkTFxsSvwTdMLFkpVylAF6VNC22jssiWfquj2eyD4rJnzkRFPvIWJq8hvbfhsm+AjSg==} peerDependencies: @@ -2251,8 +2305,8 @@ packages: '@chakra-ui/react-env': 3.1.0(react@18.2.0) '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) '@chakra-ui/utils': 2.0.15 - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) - '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.59)(react@18.2.0) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: false @@ -2468,7 +2522,78 @@ packages: react: 18.2.0 dev: false - /@chakra-ui/react@2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.48)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0): + /@chakra-ui/react@2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.57)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-Hn0moyxxyCDKuR9ywYpqgX8dvjqwu9ArwpIb9wHNYjnODETjLwazgNIliCVBRcJvysGRiV51U2/JtJVrpeCjUQ==} + peerDependencies: + '@emotion/react': ^11.0.0 + '@emotion/styled': ^11.0.0 + framer-motion: '>=4.0.0' + react: '>=18' + react-dom: '>=18' + dependencies: + '@chakra-ui/accordion': 2.3.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0) + '@chakra-ui/alert': 2.2.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/avatar': 2.3.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/breadcrumb': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/button': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/card': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/checkbox': 2.3.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/control-box': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/counter': 2.1.0(react@18.2.0) + '@chakra-ui/css-reset': 2.3.0(@emotion/react@11.11.3)(react@18.2.0) + '@chakra-ui/editable': 3.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/focus-lock': 2.1.0(@types/react@18.2.57)(react@18.2.0) + '@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/hooks': 2.2.1(react@18.2.0) + '@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/image': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/input': 2.1.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/live-region': 2.1.0(react@18.2.0) + '@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/menu': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0) + '@chakra-ui/modal': 2.3.1(@chakra-ui/system@2.6.2)(@types/react@18.2.57)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/number-input': 2.1.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/pin-input': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/popover': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0) + '@chakra-ui/popper': 3.1.0(react@18.2.0) + '@chakra-ui/portal': 2.1.0(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/progress': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/provider': 2.4.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/radio': 2.1.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/react-env': 3.1.0(react@18.2.0) + '@chakra-ui/select': 2.1.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/skeleton': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/skip-nav': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/slider': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/stat': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/stepper': 2.3.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/styled-system': 2.9.2 + '@chakra-ui/switch': 2.1.2(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0) + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + '@chakra-ui/table': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/tabs': 3.0.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/tag': 3.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/textarea': 2.1.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/theme': 3.3.1(@chakra-ui/styled-system@2.9.2) + '@chakra-ui/theme-utils': 2.0.21 + '@chakra-ui/toast': 7.0.2(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/tooltip': 2.3.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/transition': 2.1.0(framer-motion@11.0.5)(react@18.2.0) + '@chakra-ui/utils': 2.0.15 + '@chakra-ui/visually-hidden': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.57)(react@18.2.0) + '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.57)(react@18.2.0) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + transitivePeerDependencies: + - '@types/react' + dev: false + + /@chakra-ui/react@2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.59)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-Hn0moyxxyCDKuR9ywYpqgX8dvjqwu9ArwpIb9wHNYjnODETjLwazgNIliCVBRcJvysGRiV51U2/JtJVrpeCjUQ==} peerDependencies: '@emotion/react': ^11.0.0 @@ -2489,7 +2614,7 @@ packages: '@chakra-ui/counter': 2.1.0(react@18.2.0) '@chakra-ui/css-reset': 2.3.0(@emotion/react@11.11.3)(react@18.2.0) '@chakra-ui/editable': 3.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) - '@chakra-ui/focus-lock': 2.1.0(@types/react@18.2.48)(react@18.2.0) + '@chakra-ui/focus-lock': 2.1.0(@types/react@18.2.59)(react@18.2.0) '@chakra-ui/form-control': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/hooks': 2.2.1(react@18.2.0) '@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) @@ -2499,7 +2624,7 @@ packages: '@chakra-ui/live-region': 2.1.0(react@18.2.0) '@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/menu': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.2.0) - '@chakra-ui/modal': 2.3.1(@chakra-ui/system@2.6.2)(@types/react@18.2.48)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/modal': 2.3.1(@chakra-ui/system@2.6.2)(@types/react@18.2.59)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0) '@chakra-ui/number-input': 2.1.2(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/pin-input': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/popover': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.2.0) @@ -2530,8 +2655,8 @@ packages: '@chakra-ui/transition': 2.1.0(framer-motion@10.18.0)(react@18.2.0) '@chakra-ui/utils': 2.0.15 '@chakra-ui/visually-hidden': 2.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) - '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.59)(react@18.2.0) framer-motion: 10.18.0(react-dom@18.2.0)(react@18.2.0) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) @@ -2657,6 +2782,20 @@ packages: react: 18.2.0 dev: false + /@chakra-ui/switch@2.1.2(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react@18.2.0): + resolution: {integrity: sha512-pgmi/CC+E1v31FcnQhsSGjJnOE2OcND4cKPyTE+0F+bmGm48Q/b5UmKD9Y+CmZsrt/7V3h8KNczowupfuBfIHA==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/checkbox': 2.3.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + /@chakra-ui/system@2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0): resolution: {integrity: sha512-EGtpoEjLrUu4W1fHD+a62XR+hzC5YfsWm+6lO0Kybcga3yYEij9beegO0jZgug27V+Rf7vns95VPVP6mFd/DEQ==} peerDependencies: @@ -2670,8 +2809,8 @@ packages: '@chakra-ui/styled-system': 2.9.2 '@chakra-ui/theme-utils': 2.0.21 '@chakra-ui/utils': 2.0.15 - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) - '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.59)(react@18.2.0) react: 18.2.0 react-fast-compare: 3.2.2 dev: false @@ -2785,6 +2924,29 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: false + /@chakra-ui/toast@7.0.2(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-yvRP8jFKRs/YnkuE41BVTq9nB2v/KDRmje9u6dgDmE5+1bFt3bwjdf9gVbif4u5Ve7F7BGk5E093ARRVtvLvXA==} + peerDependencies: + '@chakra-ui/system': 2.6.2 + framer-motion: '>=4.0.0' + react: '>=18' + react-dom: '>=18' + dependencies: + '@chakra-ui/alert': 2.2.2(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/close-button': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) + '@chakra-ui/portal': 2.1.0(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/react-context': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-timeout': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-update-effect': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/styled-system': 2.9.2 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + '@chakra-ui/theme': 3.3.1(@chakra-ui/styled-system@2.9.2) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: false + /@chakra-ui/tooltip@2.3.1(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-Rh39GBn/bL4kZpuEMPPRwYNnccRCL+w9OqamWHIB3Qboxs6h8cOyXfIdGxjo72lvhu1QI/a4KFqkM3St+WfC0A==} peerDependencies: @@ -2807,6 +2969,28 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: false + /@chakra-ui/tooltip@2.3.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.5)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-Rh39GBn/bL4kZpuEMPPRwYNnccRCL+w9OqamWHIB3Qboxs6h8cOyXfIdGxjo72lvhu1QI/a4KFqkM3St+WfC0A==} + peerDependencies: + '@chakra-ui/system': '>=2.0.0' + framer-motion: '>=4.0.0' + react: '>=18' + react-dom: '>=18' + dependencies: + '@chakra-ui/dom-utils': 2.1.0 + '@chakra-ui/popper': 3.1.0(react@18.2.0) + '@chakra-ui/portal': 2.1.0(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/react-types': 2.0.7(react@18.2.0) + '@chakra-ui/react-use-disclosure': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-event-listener': 2.1.0(react@18.2.0) + '@chakra-ui/react-use-merge-refs': 2.1.0(react@18.2.0) + '@chakra-ui/shared-utils': 2.0.5 + '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + dev: false + /@chakra-ui/transition@2.1.0(framer-motion@10.18.0)(react@18.2.0): resolution: {integrity: sha512-orkT6T/Dt+/+kVwJNy7zwJ+U2xAZ3EU7M3XCs45RBvUnZDr/u9vdmaM/3D/rOpmQJWgQBwKPJleUXrYWUagEDQ==} peerDependencies: @@ -2818,6 +3002,28 @@ packages: react: 18.2.0 dev: false + /@chakra-ui/transition@2.1.0(framer-motion@11.0.5)(react@18.2.0): + resolution: {integrity: sha512-orkT6T/Dt+/+kVwJNy7zwJ+U2xAZ3EU7M3XCs45RBvUnZDr/u9vdmaM/3D/rOpmQJWgQBwKPJleUXrYWUagEDQ==} + peerDependencies: + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/shared-utils': 2.0.5 + framer-motion: 11.0.5(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + + /@chakra-ui/transition@2.1.0(framer-motion@11.0.6)(react@18.2.0): + resolution: {integrity: sha512-orkT6T/Dt+/+kVwJNy7zwJ+U2xAZ3EU7M3XCs45RBvUnZDr/u9vdmaM/3D/rOpmQJWgQBwKPJleUXrYWUagEDQ==} + peerDependencies: + framer-motion: '>=4.0.0' + react: '>=18' + dependencies: + '@chakra-ui/shared-utils': 2.0.5 + framer-motion: 11.0.6(react-dom@18.2.0)(react@18.2.0) + react: 18.2.0 + dev: false + /@chakra-ui/utils@2.0.15: resolution: {integrity: sha512-El4+jL0WSaYYs+rJbuYFDbjmfCcfGDmRY95GO4xwzit6YAPZBLcR65rOEwLps+XWluZTy1xdMrusg/hW0c1aAA==} dependencies: @@ -2849,14 +3055,6 @@ packages: engines: {node: '>17.0.0'} dev: false - /@dependents/detective-less@3.0.2: - resolution: {integrity: sha512-1YUvQ+e0eeTWAHoN8Uz2x2U37jZs6IGutiIE5LXId7cxfUGhtZjzxE06FdUiuiRrW+UE0vNCdSNPH2lY4dQCOQ==} - engines: {node: '>=12'} - dependencies: - gonzales-pe: 4.3.0 - node-source-walk: 5.0.2 - dev: true - /@discoveryjs/json-ext@0.5.7: resolution: {integrity: sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==} engines: {node: '>=10.0.0'} @@ -2884,6 +3082,18 @@ packages: tslib: 2.6.2 dev: false + /@dnd-kit/sortable@8.0.0(@dnd-kit/core@6.1.0)(react@18.2.0): + resolution: {integrity: sha512-U3jk5ebVXe1Lr7c2wU7SBZjcWdQP+j7peHJfCspnA81enlu88Mgd7CC8Q+pub9ubP7eKVETzJW+IBAhsqbSu/g==} + peerDependencies: + '@dnd-kit/core': ^6.1.0 + react: '>=16.8.0' + dependencies: + '@dnd-kit/core': 6.1.0(react-dom@18.2.0)(react@18.2.0) + '@dnd-kit/utilities': 3.2.2(react@18.2.0) + react: 18.2.0 + tslib: 2.6.2 + dev: false + /@dnd-kit/utilities@3.2.2(react@18.2.0): resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==} peerDependencies: @@ -2897,7 +3107,7 @@ packages: resolution: {integrity: sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ==} dependencies: '@babel/helper-module-imports': 7.22.15 - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@emotion/hash': 0.9.1 '@emotion/memoize': 0.8.1 '@emotion/serialize': 1.1.3 @@ -2947,7 +3157,7 @@ packages: resolution: {integrity: sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==} dev: false - /@emotion/react@11.11.3(@types/react@18.2.48)(react@18.2.0): + /@emotion/react@11.11.3(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-Cnn0kuq4DoONOMcnoVsTOR8E+AdnKFf//6kUWc4LCdnxj31pZWn7rIULd6Y7/Js1PiPHzn7SKCM9vB/jBni8eA==} peerDependencies: '@types/react': '*' @@ -2956,14 +3166,35 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@emotion/babel-plugin': 11.11.0 '@emotion/cache': 11.11.0 '@emotion/serialize': 1.1.3 '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) '@emotion/utils': 1.2.1 '@emotion/weak-memoize': 0.3.1 - '@types/react': 18.2.48 + '@types/react': 18.2.57 + hoist-non-react-statics: 3.3.2 + react: 18.2.0 + dev: false + + /@emotion/react@11.11.3(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-Cnn0kuq4DoONOMcnoVsTOR8E+AdnKFf//6kUWc4LCdnxj31pZWn7rIULd6Y7/Js1PiPHzn7SKCM9vB/jBni8eA==} + peerDependencies: + '@types/react': '*' + react: '>=16.8.0' + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@babel/runtime': 7.23.9 + '@emotion/babel-plugin': 11.11.0 + '@emotion/cache': 11.11.0 + '@emotion/serialize': 1.1.3 + '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) + '@emotion/utils': 1.2.1 + '@emotion/weak-memoize': 0.3.1 + '@types/react': 18.2.59 hoist-non-react-statics: 3.3.2 react: 18.2.0 dev: false @@ -2982,7 +3213,7 @@ packages: resolution: {integrity: sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA==} dev: false - /@emotion/styled@11.11.0(@emotion/react@11.11.3)(@types/react@18.2.48)(react@18.2.0): + /@emotion/styled@11.11.0(@emotion/react@11.11.3)(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-hM5Nnvu9P3midq5aaXj4I+lnSfNi7Pmd4EWk1fOZ3pxookaQTNew6bp4JaCBYM4HVFZF9g7UjJmsUmC2JlxOng==} peerDependencies: '@emotion/react': ^11.0.0-rc.0 @@ -2992,14 +3223,35 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@emotion/babel-plugin': 11.11.0 '@emotion/is-prop-valid': 1.2.1 - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.57)(react@18.2.0) '@emotion/serialize': 1.1.3 '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) '@emotion/utils': 1.2.1 - '@types/react': 18.2.48 + '@types/react': 18.2.57 + react: 18.2.0 + dev: false + + /@emotion/styled@11.11.0(@emotion/react@11.11.3)(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-hM5Nnvu9P3midq5aaXj4I+lnSfNi7Pmd4EWk1fOZ3pxookaQTNew6bp4JaCBYM4HVFZF9g7UjJmsUmC2JlxOng==} + peerDependencies: + '@emotion/react': ^11.0.0-rc.0 + '@types/react': '*' + react: '>=16.8.0' + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@babel/runtime': 7.23.9 + '@emotion/babel-plugin': 11.11.0 + '@emotion/is-prop-valid': 1.2.1 + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@emotion/serialize': 1.1.3 + '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) + '@emotion/utils': 1.2.1 + '@types/react': 18.2.59 react: 18.2.0 dev: false @@ -3022,8 +3274,33 @@ packages: resolution: {integrity: sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww==} dev: false - /@esbuild/aix-ppc64@0.19.11: - resolution: {integrity: sha512-FnzU0LyE3ySQk7UntJO4+qIiQgI7KoODnZg5xzXIrFJlKd2P2gwHsHY4927xj9y5PJmJSzULiUCWmv7iWnNa7g==} + /@ericcornelissen/bash-parser@0.5.2: + resolution: {integrity: sha512-4pIMTa1nEFfMXitv7oaNEWOdM+zpOZavesa5GaiWTgda6Zk32CFGxjUp/iIaN0PwgUW1yTq/fztSjbpE8SLGZQ==} + engines: {node: '>=4'} + dependencies: + array-last: 1.3.0 + babylon: 6.18.0 + compose-function: 3.0.3 + deep-freeze: 0.0.1 + filter-iterator: 0.0.1 + filter-obj: 1.1.0 + has-own-property: 0.1.0 + identity-function: 1.0.0 + is-iterable: 1.1.1 + iterable-lookahead: 1.0.0 + lodash.curry: 4.1.1 + magic-string: 0.16.0 + map-obj: 2.0.0 + object-pairs: 0.1.0 + object-values: 1.0.0 + reverse-arguments: 1.0.0 + shell-quote-word: 1.0.1 + to-pascal-case: 1.0.0 + unescape-js: 1.1.4 + dev: true + + /@esbuild/aix-ppc64@0.19.12: + resolution: {integrity: sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==} engines: {node: '>=12'} cpu: [ppc64] os: [aix] @@ -3040,8 +3317,8 @@ packages: dev: true optional: true - /@esbuild/android-arm64@0.19.11: - resolution: {integrity: sha512-aiu7K/5JnLj//KOnOfEZ0D90obUkRzDMyqd/wNAUQ34m4YUPVhRZpnqKV9uqDGxT7cToSDnIHsGooyIczu9T+Q==} + /@esbuild/android-arm64@0.19.12: + resolution: {integrity: sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==} engines: {node: '>=12'} cpu: [arm64] os: [android] @@ -3058,8 +3335,8 @@ packages: dev: true optional: true - /@esbuild/android-arm@0.19.11: - resolution: {integrity: sha512-5OVapq0ClabvKvQ58Bws8+wkLCV+Rxg7tUVbo9xu034Nm536QTII4YzhaFriQ7rMrorfnFKUsArD2lqKbFY4vw==} + /@esbuild/android-arm@0.19.12: + resolution: {integrity: sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==} engines: {node: '>=12'} cpu: [arm] os: [android] @@ -3076,8 +3353,8 @@ packages: dev: true optional: true - /@esbuild/android-x64@0.19.11: - resolution: {integrity: sha512-eccxjlfGw43WYoY9QgB82SgGgDbibcqyDTlk3l3C0jOVHKxrjdc9CTwDUQd0vkvYg5um0OH+GpxYvp39r+IPOg==} + /@esbuild/android-x64@0.19.12: + resolution: {integrity: sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==} engines: {node: '>=12'} cpu: [x64] os: [android] @@ -3094,8 +3371,8 @@ packages: dev: true optional: true - /@esbuild/darwin-arm64@0.19.11: - resolution: {integrity: sha512-ETp87DRWuSt9KdDVkqSoKoLFHYTrkyz2+65fj9nfXsaV3bMhTCjtQfw3y+um88vGRKRiF7erPrh/ZuIdLUIVxQ==} + /@esbuild/darwin-arm64@0.19.12: + resolution: {integrity: sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==} engines: {node: '>=12'} cpu: [arm64] os: [darwin] @@ -3112,8 +3389,8 @@ packages: dev: true optional: true - /@esbuild/darwin-x64@0.19.11: - resolution: {integrity: sha512-fkFUiS6IUK9WYUO/+22omwetaSNl5/A8giXvQlcinLIjVkxwTLSktbF5f/kJMftM2MJp9+fXqZ5ezS7+SALp4g==} + /@esbuild/darwin-x64@0.19.12: + resolution: {integrity: sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==} engines: {node: '>=12'} cpu: [x64] os: [darwin] @@ -3130,8 +3407,8 @@ packages: dev: true optional: true - /@esbuild/freebsd-arm64@0.19.11: - resolution: {integrity: sha512-lhoSp5K6bxKRNdXUtHoNc5HhbXVCS8V0iZmDvyWvYq9S5WSfTIHU2UGjcGt7UeS6iEYp9eeymIl5mJBn0yiuxA==} + /@esbuild/freebsd-arm64@0.19.12: + resolution: {integrity: sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==} engines: {node: '>=12'} cpu: [arm64] os: [freebsd] @@ -3148,8 +3425,8 @@ packages: dev: true optional: true - /@esbuild/freebsd-x64@0.19.11: - resolution: {integrity: sha512-JkUqn44AffGXitVI6/AbQdoYAq0TEullFdqcMY/PCUZ36xJ9ZJRtQabzMA+Vi7r78+25ZIBosLTOKnUXBSi1Kw==} + /@esbuild/freebsd-x64@0.19.12: + resolution: {integrity: sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==} engines: {node: '>=12'} cpu: [x64] os: [freebsd] @@ -3166,8 +3443,8 @@ packages: dev: true optional: true - /@esbuild/linux-arm64@0.19.11: - resolution: {integrity: sha512-LneLg3ypEeveBSMuoa0kwMpCGmpu8XQUh+mL8XXwoYZ6Be2qBnVtcDI5azSvh7vioMDhoJFZzp9GWp9IWpYoUg==} + /@esbuild/linux-arm64@0.19.12: + resolution: {integrity: sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==} engines: {node: '>=12'} cpu: [arm64] os: [linux] @@ -3184,8 +3461,8 @@ packages: dev: true optional: true - /@esbuild/linux-arm@0.19.11: - resolution: {integrity: sha512-3CRkr9+vCV2XJbjwgzjPtO8T0SZUmRZla+UL1jw+XqHZPkPgZiyWvbDvl9rqAN8Zl7qJF0O/9ycMtjU67HN9/Q==} + /@esbuild/linux-arm@0.19.12: + resolution: {integrity: sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==} engines: {node: '>=12'} cpu: [arm] os: [linux] @@ -3202,8 +3479,8 @@ packages: dev: true optional: true - /@esbuild/linux-ia32@0.19.11: - resolution: {integrity: sha512-caHy++CsD8Bgq2V5CodbJjFPEiDPq8JJmBdeyZ8GWVQMjRD0sU548nNdwPNvKjVpamYYVL40AORekgfIubwHoA==} + /@esbuild/linux-ia32@0.19.12: + resolution: {integrity: sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==} engines: {node: '>=12'} cpu: [ia32] os: [linux] @@ -3220,8 +3497,8 @@ packages: dev: true optional: true - /@esbuild/linux-loong64@0.19.11: - resolution: {integrity: sha512-ppZSSLVpPrwHccvC6nQVZaSHlFsvCQyjnvirnVjbKSHuE5N24Yl8F3UwYUUR1UEPaFObGD2tSvVKbvR+uT1Nrg==} + /@esbuild/linux-loong64@0.19.12: + resolution: {integrity: sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==} engines: {node: '>=12'} cpu: [loong64] os: [linux] @@ -3238,8 +3515,8 @@ packages: dev: true optional: true - /@esbuild/linux-mips64el@0.19.11: - resolution: {integrity: sha512-B5x9j0OgjG+v1dF2DkH34lr+7Gmv0kzX6/V0afF41FkPMMqaQ77pH7CrhWeR22aEeHKaeZVtZ6yFwlxOKPVFyg==} + /@esbuild/linux-mips64el@0.19.12: + resolution: {integrity: sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==} engines: {node: '>=12'} cpu: [mips64el] os: [linux] @@ -3256,8 +3533,8 @@ packages: dev: true optional: true - /@esbuild/linux-ppc64@0.19.11: - resolution: {integrity: sha512-MHrZYLeCG8vXblMetWyttkdVRjQlQUb/oMgBNurVEnhj4YWOr4G5lmBfZjHYQHHN0g6yDmCAQRR8MUHldvvRDA==} + /@esbuild/linux-ppc64@0.19.12: + resolution: {integrity: sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==} engines: {node: '>=12'} cpu: [ppc64] os: [linux] @@ -3274,8 +3551,8 @@ packages: dev: true optional: true - /@esbuild/linux-riscv64@0.19.11: - resolution: {integrity: sha512-f3DY++t94uVg141dozDu4CCUkYW+09rWtaWfnb3bqe4w5NqmZd6nPVBm+qbz7WaHZCoqXqHz5p6CM6qv3qnSSQ==} + /@esbuild/linux-riscv64@0.19.12: + resolution: {integrity: sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==} engines: {node: '>=12'} cpu: [riscv64] os: [linux] @@ -3292,8 +3569,8 @@ packages: dev: true optional: true - /@esbuild/linux-s390x@0.19.11: - resolution: {integrity: sha512-A5xdUoyWJHMMlcSMcPGVLzYzpcY8QP1RtYzX5/bS4dvjBGVxdhuiYyFwp7z74ocV7WDc0n1harxmpq2ePOjI0Q==} + /@esbuild/linux-s390x@0.19.12: + resolution: {integrity: sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==} engines: {node: '>=12'} cpu: [s390x] os: [linux] @@ -3310,8 +3587,8 @@ packages: dev: true optional: true - /@esbuild/linux-x64@0.19.11: - resolution: {integrity: sha512-grbyMlVCvJSfxFQUndw5mCtWs5LO1gUlwP4CDi4iJBbVpZcqLVT29FxgGuBJGSzyOxotFG4LoO5X+M1350zmPA==} + /@esbuild/linux-x64@0.19.12: + resolution: {integrity: sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==} engines: {node: '>=12'} cpu: [x64] os: [linux] @@ -3328,8 +3605,8 @@ packages: dev: true optional: true - /@esbuild/netbsd-x64@0.19.11: - resolution: {integrity: sha512-13jvrQZJc3P230OhU8xgwUnDeuC/9egsjTkXN49b3GcS5BKvJqZn86aGM8W9pd14Kd+u7HuFBMVtrNGhh6fHEQ==} + /@esbuild/netbsd-x64@0.19.12: + resolution: {integrity: sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==} engines: {node: '>=12'} cpu: [x64] os: [netbsd] @@ -3346,8 +3623,8 @@ packages: dev: true optional: true - /@esbuild/openbsd-x64@0.19.11: - resolution: {integrity: sha512-ysyOGZuTp6SNKPE11INDUeFVVQFrhcNDVUgSQVDzqsqX38DjhPEPATpid04LCoUr2WXhQTEZ8ct/EgJCUDpyNw==} + /@esbuild/openbsd-x64@0.19.12: + resolution: {integrity: sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==} engines: {node: '>=12'} cpu: [x64] os: [openbsd] @@ -3364,8 +3641,8 @@ packages: dev: true optional: true - /@esbuild/sunos-x64@0.19.11: - resolution: {integrity: sha512-Hf+Sad9nVwvtxy4DXCZQqLpgmRTQqyFyhT3bZ4F2XlJCjxGmRFF0Shwn9rzhOYRB61w9VMXUkxlBy56dk9JJiQ==} + /@esbuild/sunos-x64@0.19.12: + resolution: {integrity: sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==} engines: {node: '>=12'} cpu: [x64] os: [sunos] @@ -3382,8 +3659,8 @@ packages: dev: true optional: true - /@esbuild/win32-arm64@0.19.11: - resolution: {integrity: sha512-0P58Sbi0LctOMOQbpEOvOL44Ne0sqbS0XWHMvvrg6NE5jQ1xguCSSw9jQeUk2lfrXYsKDdOe6K+oZiwKPilYPQ==} + /@esbuild/win32-arm64@0.19.12: + resolution: {integrity: sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==} engines: {node: '>=12'} cpu: [arm64] os: [win32] @@ -3400,8 +3677,8 @@ packages: dev: true optional: true - /@esbuild/win32-ia32@0.19.11: - resolution: {integrity: sha512-6YOrWS+sDJDmshdBIQU+Uoyh7pQKrdykdefC1avn76ss5c+RN6gut3LZA4E2cH5xUEp5/cA0+YxRaVtRAb0xBg==} + /@esbuild/win32-ia32@0.19.12: + resolution: {integrity: sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==} engines: {node: '>=12'} cpu: [ia32] os: [win32] @@ -3418,8 +3695,8 @@ packages: dev: true optional: true - /@esbuild/win32-x64@0.19.11: - resolution: {integrity: sha512-vfkhltrjCAb603XaFhqhAF4LGDi2M4OrCRrFusyQ+iTLQ/o60QQXxc9cZC/FFpihBI9N1Grn6SMKVJ4KP7Fuiw==} + /@esbuild/win32-x64@0.19.12: + resolution: {integrity: sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==} engines: {node: '>=12'} cpu: [x64] os: [win32] @@ -3427,13 +3704,13 @@ packages: dev: true optional: true - /@eslint-community/eslint-utils@4.4.0(eslint@8.56.0): + /@eslint-community/eslint-utils@4.4.0(eslint@8.57.0): resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 dependencies: - eslint: 8.56.0 + eslint: 8.57.0 eslint-visitor-keys: 3.4.3 dev: true @@ -3450,7 +3727,7 @@ packages: debug: 4.3.4 espree: 9.6.1 globals: 13.24.0 - ignore: 5.3.0 + ignore: 5.3.1 import-fresh: 3.3.0 js-yaml: 4.1.0 minimatch: 3.1.2 @@ -3459,8 +3736,8 @@ packages: - supports-color dev: true - /@eslint/js@8.56.0: - resolution: {integrity: sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==} + /@eslint/js@8.57.0: + resolution: {integrity: sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true @@ -3473,45 +3750,35 @@ packages: engines: {node: '>=14'} dev: true - /@floating-ui/core@1.5.2: - resolution: {integrity: sha512-Ii3MrfY/GAIN3OhXNzpCKaLxHQfJF9qvwq/kEJYdqDxeIHa01K8sldugal6TmeeXl+WMvhv9cnVzUTaFFJF09A==} - dependencies: - '@floating-ui/utils': 0.1.6 - dev: false - - /@floating-ui/core@1.5.3: - resolution: {integrity: sha512-O0WKDOo0yhJuugCx6trZQj5jVJ9yR0ystG2JaNAemYUWce+pmM6WUEFIibnWyEJKdrDxhm75NoSRME35FNaM/Q==} + /@floating-ui/core@1.6.0: + resolution: {integrity: sha512-PcF++MykgmTj3CIyOQbKA/hDzOAiqI3mhuoN44WRCopIs1sgoDoU4oty4Jtqaj/y3oDU6fnVSm4QG0a3t5i0+g==} dependencies: '@floating-ui/utils': 0.2.1 - /@floating-ui/dom@1.5.3: - resolution: {integrity: sha512-ClAbQnEqJAKCJOEbbLo5IUlZHkNszqhuxS4fHAVxRPXPya6Ysf2G8KypnYcOTpx6I8xcgF9bbHb6g/2KpbV8qA==} - dependencies: - '@floating-ui/core': 1.5.2 - '@floating-ui/utils': 0.1.6 - dev: false - /@floating-ui/dom@1.5.4: resolution: {integrity: sha512-jByEsHIY+eEdCjnTVu+E3ephzTOzkQ8hgUfGwos+bg7NlH33Zc5uO+QHz1mrQUOgIKKDD1RtS201P9NvAfq3XQ==} dependencies: - '@floating-ui/core': 1.5.3 + '@floating-ui/core': 1.6.0 + '@floating-ui/utils': 0.2.1 + dev: false + + /@floating-ui/dom@1.6.3: + resolution: {integrity: sha512-RnDthu3mzPlQ31Ss/BTwQ1zjzIhr3lk1gZB1OC56h/1vEtaXkESrOqL5fQVMfXpwGtRwX+YsZBdyHtJMQnkArw==} + dependencies: + '@floating-ui/core': 1.6.0 '@floating-ui/utils': 0.2.1 - /@floating-ui/react-dom@2.0.6(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-IB8aCRFxr8nFkdYZgH+Otd9EVQPJoynxeFRGTB8voPoZMRWo8XjYuCRgpI1btvuKY69XMiLnW+ym7zoBHM90Rw==} + /@floating-ui/react-dom@2.0.8(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-HOdqOt3R3OGeTKidaLvJKcgg75S6tibQ3Tif4eyd91QnIJWr0NLvoXFpJA/j8HqkFSL68GDca9AuyWEHlhyClw==} peerDependencies: react: '>=16.8.0' react-dom: '>=16.8.0' dependencies: - '@floating-ui/dom': 1.5.4 + '@floating-ui/dom': 1.6.3 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@floating-ui/utils@0.1.6: - resolution: {integrity: sha512-OfX7E2oUDYxtBvsuS4e/jSn4Q9Qb6DzgeYtsAdkPZ47znpoNsMgZw0+tVijiv3uGNR6dgNlty6r9rzIzHjtd/A==} - dev: false - /@floating-ui/utils@0.2.1: resolution: {integrity: sha512-9TANp6GPoMtYzQdt54kfAyMmz1+osLlXdg2ENroU7zzrtflTLrrC/lgrIfaSe+Wu0b89GKccT7vxXA0MoAIO+Q==} @@ -3519,11 +3786,11 @@ packages: resolution: {integrity: sha512-k+BUNqksTL+AN+o+OV7ILeiE9B5M5X+/jA7LWvCwjbV9ovXTqZyKRhA/x7uYv/ml8WQ0XNLBM7cRFIx4jW0/hg==} dev: false - /@humanwhocodes/config-array@0.11.13: - resolution: {integrity: sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==} + /@humanwhocodes/config-array@0.11.14: + resolution: {integrity: sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==} engines: {node: '>=10.10.0'} dependencies: - '@humanwhocodes/object-schema': 2.0.1 + '@humanwhocodes/object-schema': 2.0.2 debug: 4.3.4 minimatch: 3.1.2 transitivePeerDependencies: @@ -3535,88 +3802,86 @@ packages: engines: {node: '>=12.22'} dev: true - /@humanwhocodes/object-schema@2.0.1: - resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==} + /@humanwhocodes/object-schema@2.0.2: + resolution: {integrity: sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==} dev: true - /@internationalized/date@3.5.1: - resolution: {integrity: sha512-LUQIfwU9e+Fmutc/DpRTGXSdgYZLBegi4wygCWDSVmUdLTaMHsQyASDiJtREwanwKuQLq0hY76fCJ9J/9I2xOQ==} + /@internationalized/date@3.5.2: + resolution: {integrity: sha512-vo1yOMUt2hzp63IutEaTUxROdvQg1qlMRsbCvbay2AK2Gai7wIgCyK5weEX3nHkiLgo4qCXHijFNC/ILhlRpOQ==} dependencies: - '@swc/helpers': 0.5.3 + '@swc/helpers': 0.5.6 dev: false - /@internationalized/number@3.5.0: - resolution: {integrity: sha512-ZY1BW8HT9WKYvaubbuqXbbDdHhOUMfE2zHHFJeTppid0S+pc8HtdIxFxaYMsGjCb4UsF+MEJ4n2TfU7iHnUK8w==} + /@internationalized/number@3.5.1: + resolution: {integrity: sha512-N0fPU/nz15SwR9IbfJ5xaS9Ss/O5h1sVXMZf43vc9mxEG48ovglvvzBjF53aHlq20uoR6c+88CrIXipU/LSzwg==} dependencies: - '@swc/helpers': 0.5.3 + '@swc/helpers': 0.5.6 dev: false - /@invoke-ai/eslint-config-react@0.0.13(@typescript-eslint/eslint-plugin@6.19.0)(@typescript-eslint/parser@6.19.0)(eslint-config-prettier@9.1.0)(eslint-plugin-import@2.29.1)(eslint-plugin-react-hooks@4.6.0)(eslint-plugin-react-refresh@0.4.5)(eslint-plugin-react@7.33.2)(eslint-plugin-simple-import-sort@10.0.0)(eslint-plugin-storybook@0.6.15)(eslint-plugin-unused-imports@3.0.0)(eslint@8.56.0): - resolution: {integrity: sha512-dfo9k+wPHdvpy1z6ABoYXR/Ttzs1FAnbC46ttIxVhZuqDq8K5cLWznivrOfl7f0hJb8Cb8HiuQb4pHDxhHBDqA==} + /@invoke-ai/eslint-config-react@0.0.14(eslint@8.57.0)(prettier@3.2.5)(typescript@5.3.3): + resolution: {integrity: sha512-6ZUY9zgdDhv2WUoLdDKOQdU9ImnH0CBOFtRlOaNOh34IOsNRfn+JA7wqA0PKnkiNrlfPkIQWhn4GRJp68NT5bw==} peerDependencies: - '@typescript-eslint/eslint-plugin': ^6.19.0 - '@typescript-eslint/parser': ^6.19.0 eslint: ^8.56.0 - eslint-config-prettier: ^9.1.0 - eslint-plugin-import: ^2.29.1 - eslint-plugin-react: ^7.33.2 - eslint-plugin-react-hooks: ^4.6.0 - eslint-plugin-react-refresh: ^0.4.5 - eslint-plugin-simple-import-sort: ^10.0.0 - eslint-plugin-storybook: ^0.6.15 - eslint-plugin-unused-imports: ^3.0.0 + prettier: ^3.2.5 + typescript: ^5.3.3 dependencies: - '@typescript-eslint/eslint-plugin': 6.19.0(@typescript-eslint/parser@6.19.0)(eslint@8.56.0)(typescript@5.3.3) - '@typescript-eslint/parser': 6.19.0(eslint@8.56.0)(typescript@5.3.3) - eslint: 8.56.0 - eslint-config-prettier: 9.1.0(eslint@8.56.0) - eslint-plugin-import: 2.29.1(@typescript-eslint/parser@6.19.0)(eslint@8.56.0) - eslint-plugin-react: 7.33.2(eslint@8.56.0) - eslint-plugin-react-hooks: 4.6.0(eslint@8.56.0) - eslint-plugin-react-refresh: 0.4.5(eslint@8.56.0) - eslint-plugin-simple-import-sort: 10.0.0(eslint@8.56.0) - eslint-plugin-storybook: 0.6.15(eslint@8.56.0)(typescript@5.3.3) - eslint-plugin-unused-imports: 3.0.0(@typescript-eslint/eslint-plugin@6.19.0)(eslint@8.56.0) + '@typescript-eslint/eslint-plugin': 7.1.0(@typescript-eslint/parser@7.1.0)(eslint@8.57.0)(typescript@5.3.3) + '@typescript-eslint/parser': 7.1.0(eslint@8.57.0)(typescript@5.3.3) + eslint: 8.57.0 + eslint-config-prettier: 9.1.0(eslint@8.57.0) + eslint-plugin-import: 2.29.1(@typescript-eslint/parser@7.1.0)(eslint@8.57.0) + eslint-plugin-react: 7.33.2(eslint@8.57.0) + eslint-plugin-react-hooks: 4.6.0(eslint@8.57.0) + eslint-plugin-react-refresh: 0.4.5(eslint@8.57.0) + eslint-plugin-simple-import-sort: 12.0.0(eslint@8.57.0) + eslint-plugin-storybook: 0.8.0(eslint@8.57.0)(typescript@5.3.3) + eslint-plugin-unused-imports: 3.1.0(@typescript-eslint/eslint-plugin@7.1.0)(eslint@8.57.0) + prettier: 3.2.5 + typescript: 5.3.3 + transitivePeerDependencies: + - eslint-import-resolver-typescript + - eslint-import-resolver-webpack + - supports-color dev: true - /@invoke-ai/prettier-config-react@0.0.6(prettier@3.2.4): - resolution: {integrity: sha512-qHE6GAw/Aka/8TLTN9U1U+8pxjaFe5irDv/uSgzqmrBR1rGiVyMp19pEficWRRt+03zYdquiiDjTmoabWQxY0Q==} + /@invoke-ai/prettier-config-react@0.0.7(prettier@3.2.5): + resolution: {integrity: sha512-vQeWzqwih116TBlIJII93L8ictj6uv7PxcSlAGNZrzG2UcaCFMsQqKCsB/qio26uihgv/EtvN6XAF96SnE0TKw==} peerDependencies: - prettier: ^3.2.4 + prettier: ^3.2.5 dependencies: - prettier: 3.2.4 + prettier: 3.2.5 dev: true - /@invoke-ai/ui-library@0.0.18(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.1)(@types/react@18.2.48)(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-Yme+2+pzYy3TPb7ZT0hYmBwahH29ZRSVIxLKSexh3BsbJXbTzGssRQU78QvK6Ymxemgbso3P8Rs+IW0zNhQKjQ==} + /@invoke-ai/ui-library@0.0.21(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@fontsource-variable/inter@5.0.16)(@internationalized/date@3.5.2)(@types/react@18.2.59)(i18next@23.10.0)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-tCvgkBPDt0gNq+8IcR03e/Mw7R8Mb/SMXTqx3FEIxlTQEo93A/D38dKXeDCzTdx4sQ+sknfB+JLBbHs6sg5hhQ==} peerDependencies: '@fontsource-variable/inter': ^5.0.16 react: ^18.2.0 react-dom: ^18.2.0 dependencies: - '@ark-ui/react': 1.3.0(@internationalized/date@3.5.1)(react-dom@18.2.0)(react@18.2.0) + '@ark-ui/react': 1.3.0(@internationalized/date@3.5.2)(react-dom@18.2.0)(react@18.2.0) '@chakra-ui/anatomy': 2.2.2 '@chakra-ui/icons': 2.1.1(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/portal': 2.1.0(react-dom@18.2.0)(react@18.2.0) - '@chakra-ui/react': 2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.48)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0) + '@chakra-ui/react': 2.8.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(@types/react@18.2.59)(framer-motion@10.18.0)(react-dom@18.2.0)(react@18.2.0) '@chakra-ui/styled-system': 2.9.2 '@chakra-ui/theme-tools': 2.1.2(@chakra-ui/styled-system@2.9.2) - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) - '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@emotion/styled': 11.11.0(@emotion/react@11.11.3)(@types/react@18.2.59)(react@18.2.0) '@fontsource-variable/inter': 5.0.16 - '@nanostores/react': 0.7.1(nanostores@0.9.5)(react@18.2.0) - chakra-react-select: 4.7.6(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.11.3)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@nanostores/react': 0.7.2(nanostores@0.9.5)(react@18.2.0) + chakra-react-select: 4.7.6(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.11.3)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) framer-motion: 10.18.0(react-dom@18.2.0)(react@18.2.0) lodash-es: 4.17.21 nanostores: 0.9.5 - overlayscrollbars: 2.4.7 - overlayscrollbars-react: 0.5.4(overlayscrollbars@2.4.7)(react@18.2.0) + overlayscrollbars: 2.5.0 + overlayscrollbars-react: 0.5.4(overlayscrollbars@2.5.0)(react@18.2.0) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-i18next: 14.0.1(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0) + react-i18next: 14.0.5(i18next@23.10.0)(react-dom@18.2.0)(react@18.2.0) react-icons: 5.0.1(react@18.2.0) - react-select: 5.8.0(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + react-select: 5.8.0(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) transitivePeerDependencies: - '@chakra-ui/form-control' - '@chakra-ui/icon' @@ -3669,9 +3934,9 @@ packages: resolution: {integrity: sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 '@jest/types': 29.6.3 - '@jridgewell/trace-mapping': 0.3.21 + '@jridgewell/trace-mapping': 0.3.23 babel-plugin-istanbul: 6.1.1 chalk: 4.1.2 convert-source-map: 2.0.0 @@ -3694,7 +3959,7 @@ packages: dependencies: '@types/istanbul-lib-coverage': 2.0.6 '@types/istanbul-reports': 3.0.4 - '@types/node': 20.11.5 + '@types/node': 20.11.20 '@types/yargs': 16.0.9 chalk: 4.1.2 dev: true @@ -3706,12 +3971,12 @@ packages: '@jest/schemas': 29.6.3 '@types/istanbul-lib-coverage': 2.0.6 '@types/istanbul-reports': 3.0.4 - '@types/node': 20.11.5 + '@types/node': 20.11.20 '@types/yargs': 17.0.32 chalk: 4.1.2 dev: true - /@joshwooding/vite-plugin-react-docgen-typescript@0.3.0(typescript@5.3.3)(vite@5.0.12): + /@joshwooding/vite-plugin-react-docgen-typescript@0.3.0(typescript@5.3.3)(vite@5.1.4): resolution: {integrity: sha512-2D6y7fNvFmsLmRt6UCOFJPvFoPMJGT0Uh1Wg0RaigUp7kdQPs6yYn8Dmx6GZkOH/NW0yMTwRz/p0SRMMRo50vA==} peerDependencies: typescript: '>= 4.3.x' @@ -3725,20 +3990,20 @@ packages: magic-string: 0.27.0 react-docgen-typescript: 2.2.2(typescript@5.3.3) typescript: 5.3.3 - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) dev: true - /@jridgewell/gen-mapping@0.3.3: - resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==} + /@jridgewell/gen-mapping@0.3.4: + resolution: {integrity: sha512-Oud2QPM5dHviZNn4y/WhhYKSXksv+1xLEIsNrAbGcFzUN3ubqWRFT5gwPchNc5NuzILOU4tPBDTZ4VwhL8Y7cw==} engines: {node: '>=6.0.0'} dependencies: '@jridgewell/set-array': 1.1.2 '@jridgewell/sourcemap-codec': 1.4.15 - '@jridgewell/trace-mapping': 0.3.21 + '@jridgewell/trace-mapping': 0.3.23 dev: true - /@jridgewell/resolve-uri@3.1.1: - resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + /@jridgewell/resolve-uri@3.1.2: + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} engines: {node: '>=6.0.0'} dev: true @@ -3750,10 +4015,10 @@ packages: /@jridgewell/sourcemap-codec@1.4.15: resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} - /@jridgewell/trace-mapping@0.3.21: - resolution: {integrity: sha512-SRfKmRe1KvYnxjEMtxEr+J4HIeMX5YBg/qhRHpxEIGjhX1rshcHlnFUE9K0GazhVKWM7B+nARSkV8LuvJdJ5/g==} + /@jridgewell/trace-mapping@0.3.23: + resolution: {integrity: sha512-9/4foRoUKp8s96tSkh8DlAAc5A0Ty8vLXld+l9gjKKY6ckwI8G15f0hskGmuLZu78ZlGa1vtsfOa+lnB4vG6Jg==} dependencies: - '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.4.15 dev: true @@ -3761,44 +4026,34 @@ packages: resolution: {integrity: sha512-dfLbk+PwWvFzSxwk3n5ySL0hfBog779o8h68wK/7/APo/7cgyWp5jcXockbxdk5kFRkbeXWm4Fbi9FrdN381sA==} dev: true - /@mantine/form@6.0.21(react@18.2.0): - resolution: {integrity: sha512-d4tlxyZic7MSDnaPx/WliCX1sRFDkUd2nxx4MxxO2T4OSek0YDqTlSBCxeoveu60P+vrQQN5rbbsVsaOJBe4SQ==} - peerDependencies: - react: '>=16.8.0' - dependencies: - fast-deep-equal: 3.1.3 - klona: 2.0.6 - react: 18.2.0 - dev: false - /@mdx-js/react@2.3.0(react@18.2.0): resolution: {integrity: sha512-zQH//gdOmuu7nt2oJR29vFhDv88oGPmVw6BggmrHeMI+xgEkp1B2dX9/bMBSYtK0dyLX/aOmesKS09g222K1/g==} peerDependencies: react: '>=16' dependencies: - '@types/mdx': 2.0.10 - '@types/react': 18.2.48 + '@types/mdx': 2.0.11 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@microsoft/api-extractor-model@7.28.3(@types/node@20.11.5): + /@microsoft/api-extractor-model@7.28.3(@types/node@20.11.20): resolution: {integrity: sha512-wT/kB2oDbdZXITyDh2SQLzaWwTOFbV326fP0pUwNW00WeliARs0qjmXBWmGWardEzp2U3/axkO3Lboqun6vrig==} dependencies: '@microsoft/tsdoc': 0.14.2 '@microsoft/tsdoc-config': 0.16.2 - '@rushstack/node-core-library': 3.62.0(@types/node@20.11.5) + '@rushstack/node-core-library': 3.62.0(@types/node@20.11.20) transitivePeerDependencies: - '@types/node' dev: true - /@microsoft/api-extractor@7.39.0(@types/node@20.11.5): + /@microsoft/api-extractor@7.39.0(@types/node@20.11.20): resolution: {integrity: sha512-PuXxzadgnvp+wdeZFPonssRAj/EW4Gm4s75TXzPk09h3wJ8RS3x7typf95B4vwZRrPTQBGopdUl+/vHvlPdAcg==} hasBin: true dependencies: - '@microsoft/api-extractor-model': 7.28.3(@types/node@20.11.5) + '@microsoft/api-extractor-model': 7.28.3(@types/node@20.11.20) '@microsoft/tsdoc': 0.14.2 '@microsoft/tsdoc-config': 0.16.2 - '@rushstack/node-core-library': 3.62.0(@types/node@20.11.5) + '@rushstack/node-core-library': 3.62.0(@types/node@20.11.20) '@rushstack/rig-package': 0.5.1 '@rushstack/ts-command-line': 4.17.1 colors: 1.2.5 @@ -3824,11 +4079,22 @@ packages: resolution: {integrity: sha512-9b8mPpKrfeGRuhFH5iO1iwCLeIIsV6+H1sRfxbkoGXIyQE2BTsPd9zqSqQJ+pv5sJ/hT5M1zvOFL02MnEezFug==} dev: true - /@nanostores/react@0.7.1(nanostores@0.9.5)(react@18.2.0): - resolution: {integrity: sha512-EXQg9N4MdI4eJQz/AZLIx3hxQ6BuBmV4Q55bCd5YCSgEOAW7tGTsIZxpRXxvxLXzflNvHTBvfrDNY38TlSVBkQ==} - engines: {node: ^16.0.0 || ^18.0.0 || >=20.0.0} + /@nanostores/react@0.7.2(nanostores@0.10.0)(react@18.2.0): + resolution: {integrity: sha512-e3OhHJFv3NMSFYDgREdlAQqkyBTHJM91s31kOZ4OvZwJKdFk5BLk0MLbh51EOGUz9QGX2aCHfy1RvweSi7fgwA==} + engines: {node: ^18.0.0 || >=20.0.0} peerDependencies: - nanostores: ^0.9.0 + nanostores: ^0.9.0 || ^0.10.0 + react: '>=18.0.0' + dependencies: + nanostores: 0.10.0 + react: 18.2.0 + dev: false + + /@nanostores/react@0.7.2(nanostores@0.9.5)(react@18.2.0): + resolution: {integrity: sha512-e3OhHJFv3NMSFYDgREdlAQqkyBTHJM91s31kOZ4OvZwJKdFk5BLk0MLbh51EOGUz9QGX2aCHfy1RvweSi7fgwA==} + engines: {node: ^18.0.0 || >=20.0.0} + peerDependencies: + nanostores: ^0.9.0 || ^0.10.0 react: '>=18.0.0' dependencies: nanostores: 0.9.5 @@ -3851,17 +4117,91 @@ packages: run-parallel: 1.2.0 dev: true + /@nodelib/fs.scandir@3.0.0: + resolution: {integrity: sha512-ktI9+PxfHYtKjF3cLTUAh2N+b8MijCRPNwKJNqTVdL0gB0QxLU2rIRaZ1t71oEa3YBDE6bukH1sR0+CDnpp/Mg==} + engines: {node: '>=16.14.0'} + dependencies: + '@nodelib/fs.stat': 3.0.0 + run-parallel: 1.2.0 + dev: true + /@nodelib/fs.stat@2.0.5: resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} engines: {node: '>= 8'} dev: true + /@nodelib/fs.stat@3.0.0: + resolution: {integrity: sha512-2tQOI38s19P9i7X/Drt0v8iMA+KMsgdhB/dyPER+e+2Y8L1Z7QvnuRdW/uLuf5YRFUYmnj4bMA6qCuZHFI1GDQ==} + engines: {node: '>=16.14.0'} + dev: true + /@nodelib/fs.walk@1.2.8: resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} engines: {node: '>= 8'} dependencies: '@nodelib/fs.scandir': 2.1.5 - fastq: 1.16.0 + fastq: 1.17.1 + dev: true + + /@nodelib/fs.walk@2.0.0: + resolution: {integrity: sha512-54voNDBobGdMl3BUXSu7UaDh1P85PGHWlJ5e0XhPugo1JulOyCtp2I+5ri4wplGDJ8QGwPEQW7/x3yTLU7yF1A==} + engines: {node: '>=16.14.0'} + dependencies: + '@nodelib/fs.scandir': 3.0.0 + fastq: 1.17.1 + dev: true + + /@npmcli/git@5.0.4: + resolution: {integrity: sha512-nr6/WezNzuYUppzXRaYu/W4aT5rLxdXqEFupbh6e/ovlYFQ8hpu1UUPV3Ir/YTl+74iXl2ZOMlGzudh9ZPUchQ==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + '@npmcli/promise-spawn': 7.0.1 + lru-cache: 10.2.0 + npm-pick-manifest: 9.0.0 + proc-log: 3.0.0 + promise-inflight: 1.0.1 + promise-retry: 2.0.1 + semver: 7.6.0 + which: 4.0.0 + transitivePeerDependencies: + - bluebird + dev: true + + /@npmcli/map-workspaces@3.0.4: + resolution: {integrity: sha512-Z0TbvXkRbacjFFLpVpV0e2mheCh+WzQpcqL+4xp49uNJOxOnIAPZyXtUxZ5Qn3QBTGKA11Exjd9a5411rBrhDg==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dependencies: + '@npmcli/name-from-folder': 2.0.0 + glob: 10.3.10 + minimatch: 9.0.3 + read-package-json-fast: 3.0.2 + dev: true + + /@npmcli/name-from-folder@2.0.0: + resolution: {integrity: sha512-pwK+BfEBZJbKdNYpHHRTNBwBoqrN/iIMO0AiGvYsp3Hoaq0WbgGSWQR6SCldZovoDpY3yje5lkFUe6gsDgJ2vg==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dev: true + + /@npmcli/package-json@5.0.0: + resolution: {integrity: sha512-OI2zdYBLhQ7kpNPaJxiflofYIpkNLi+lnGdzqUOfRmCF3r2l1nadcjtCYMJKv/Utm/ZtlffaUuTiAktPHbc17g==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + '@npmcli/git': 5.0.4 + glob: 10.3.10 + hosted-git-info: 7.0.1 + json-parse-even-better-errors: 3.0.1 + normalize-package-data: 6.0.0 + proc-log: 3.0.0 + semver: 7.6.0 + transitivePeerDependencies: + - bluebird + dev: true + + /@npmcli/promise-spawn@7.0.1: + resolution: {integrity: sha512-P4KkF9jX3y+7yFUxgcUdDtLy+t4OlDGuEBLNs57AZsfSfg+uV6MLndqGpnl4831ggaEdXwR50XFoZP4VFtHolg==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + which: 4.0.0 dev: true /@pkgjs/parseargs@0.11.0: @@ -3871,6 +4211,130 @@ packages: dev: true optional: true + /@pnpm/constants@7.1.1: + resolution: {integrity: sha512-31pZqMtjwV+Vaq7MaPrT1EoDFSYwye3dp6BiHIGRJmVThCQwySRKM7hCvqqI94epNkqFAAYoWrNynWoRYosGdw==} + engines: {node: '>=16.14'} + dev: true + + /@pnpm/core-loggers@9.0.6(@pnpm/logger@5.0.0): + resolution: {integrity: sha512-iK67SGbp+06bA/elpg51wygPFjNA7JKHtKkpLxqXXHw+AjFFBC3f2OznJsCIuDK6HdGi5UhHLYqo5QxJ2gMqJQ==} + engines: {node: '>=16.14'} + peerDependencies: + '@pnpm/logger': ^5.0.0 + dependencies: + '@pnpm/logger': 5.0.0 + '@pnpm/types': 9.4.2 + dev: true + + /@pnpm/error@5.0.3: + resolution: {integrity: sha512-ONJU5cUeoeJSy50qOYsMZQHTA/9QKmGgh1ATfEpCLgtbdwqUiwD9MxHNeXUYYI/pocBCz6r1ZCFqiQvO+8SUKA==} + engines: {node: '>=16.14'} + dependencies: + '@pnpm/constants': 7.1.1 + dev: true + + /@pnpm/fetching-types@5.0.0: + resolution: {integrity: sha512-o9gdO1v8Uc5P2fBBuW6GSpfTqIivQmQlqjQJdFiQX0m+tgxlrMRneIg392jZuc6fk7kFqjLheInlslgJfwY+4Q==} + engines: {node: '>=16.14'} + dependencies: + '@zkochan/retry': 0.2.0 + node-fetch: 3.0.0-beta.9 + transitivePeerDependencies: + - domexception + dev: true + + /@pnpm/graceful-fs@3.2.0: + resolution: {integrity: sha512-vRoXJxscDpHak7YE9SqCkzfrayn+Lw+YueOeHIPEqkgokrHeYgYeONoc2kGh0ObHaRtNSsonozVfJ456kxLNvA==} + engines: {node: '>=16.14'} + dependencies: + graceful-fs: 4.2.11 + dev: true + + /@pnpm/logger@5.0.0: + resolution: {integrity: sha512-YfcB2QrX+Wx1o6LD1G2Y2fhDhOix/bAY/oAnMpHoNLsKkWIRbt1oKLkIFvxBMzLwAEPqnYWguJrYC+J6i4ywbw==} + engines: {node: '>=12.17'} + dependencies: + bole: 5.0.11 + ndjson: 2.0.0 + dev: true + + /@pnpm/npm-package-arg@1.0.0: + resolution: {integrity: sha512-oQYP08exi6mOPdAZZWcNIGS+KKPsnNwUBzSuAEGWuCcqwMAt3k/WVCqVIXzBxhO5sP2b43og69VHmPj6IroKqw==} + engines: {node: '>=14.6'} + dependencies: + hosted-git-info: 4.1.0 + semver: 7.6.0 + validate-npm-package-name: 4.0.0 + dev: true + + /@pnpm/npm-resolver@18.1.1(@pnpm/logger@5.0.0): + resolution: {integrity: sha512-NptzncmMD5ZMimbjWkGpMzuBRhlCY+sh7mzypPdBOTNlh5hmEQe/VaRKjNK4V9/b0C/llElkvIePL6acybu86w==} + engines: {node: '>=16.14'} + peerDependencies: + '@pnpm/logger': ^5.0.0 + dependencies: + '@pnpm/core-loggers': 9.0.6(@pnpm/logger@5.0.0) + '@pnpm/error': 5.0.3 + '@pnpm/fetching-types': 5.0.0 + '@pnpm/graceful-fs': 3.2.0 + '@pnpm/logger': 5.0.0 + '@pnpm/resolve-workspace-range': 5.0.1 + '@pnpm/resolver-base': 11.1.0 + '@pnpm/types': 9.4.2 + '@zkochan/retry': 0.2.0 + encode-registry: 3.0.1 + load-json-file: 6.2.0 + lru-cache: 10.2.0 + normalize-path: 3.0.0 + p-limit: 3.1.0 + p-memoize: 4.0.1 + parse-npm-tarball-url: 3.0.0 + path-temp: 2.1.0 + ramda: /@pnpm/ramda@0.28.1 + rename-overwrite: 5.0.0 + semver: 7.6.0 + ssri: 10.0.5 + version-selector-type: 3.0.0 + transitivePeerDependencies: + - domexception + dev: true + + /@pnpm/ramda@0.28.1: + resolution: {integrity: sha512-zcAG+lvU0fMziNeGXpPyCyCJYp5ZVrPElEE4t14jAmViaihohocZ+dDkcRIyAomox8pQsuZnv1EyHR+pOhmUWw==} + dev: true + + /@pnpm/resolve-workspace-range@5.0.1: + resolution: {integrity: sha512-yQ0pMthlw8rTgS/C9hrjne+NEnnSNevCjtdodd7i15I59jMBYciHifZ/vjg0NY+Jl+USTc3dBE+0h/4tdYjMKg==} + engines: {node: '>=16.14'} + dependencies: + semver: 7.6.0 + dev: true + + /@pnpm/resolver-base@11.1.0: + resolution: {integrity: sha512-y2qKaj18pwe1VWc3YXEitdYFo+WqOOt60aqTUuOVkJAirUzz0DzuYh3Ifct4znYWPdgUXHaN5DMphNF5iL85rA==} + engines: {node: '>=16.14'} + dependencies: + '@pnpm/types': 9.4.2 + dev: true + + /@pnpm/types@9.4.2: + resolution: {integrity: sha512-g1hcF8Nv4gd76POilz9gD4LITAPXOe5nX4ijgr8ixCbLQZfcpYiMfJ+C1RlMNRUDo8vhlNB4O3bUlxmT6EAQXA==} + engines: {node: '>=16.14'} + dev: true + + /@pnpm/workspace.pkgs-graph@2.0.15(@pnpm/logger@5.0.0): + resolution: {integrity: sha512-Txxd5FzzVfBfGCTngISaxFlJzZhzdS8BUrCEtAWJfZOFbQzpWy27rzkaS7TaWW2dHiFcCVYzPI/2vgxfeRansA==} + engines: {node: '>=16.14'} + dependencies: + '@pnpm/npm-package-arg': 1.0.0 + '@pnpm/npm-resolver': 18.1.1(@pnpm/logger@5.0.0) + '@pnpm/resolve-workspace-range': 5.0.1 + ramda: /@pnpm/ramda@0.28.1 + transitivePeerDependencies: + - '@pnpm/logger' + - domexception + dev: true + /@popperjs/core@2.11.8: resolution: {integrity: sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==} dev: false @@ -3878,16 +4342,16 @@ packages: /@radix-ui/number@1.0.1: resolution: {integrity: sha512-T5gIdVO2mmPW3NNhjNgEP3cqMXjXL9UbO0BzWcXfvdBs+BohbQxvd/K5hSVKmn9/lbTdsQVKbUcP5WLCwvUbBg==} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 dev: true /@radix-ui/primitive@1.0.1: resolution: {integrity: sha512-yQ8oGX2GVsEYMWGxcovu1uGWPCxV5BFfeeYxqPmuAzUyLT9qmaMXSAhXpb0WrspIeqYzdJpkh2vHModJPgRIaw==} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 dev: true - /@radix-ui/react-arrow@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-arrow@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-wSP+pHsB/jQRaL6voubsQ/ZlrGBHHrOjmBnr19hxYgtS0WvAFwZhK2WP/YY5yF9uKECCEEDGxuLxq1NBK51wFA==} peerDependencies: '@types/react': '*' @@ -3900,15 +4364,15 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-collection@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-collection@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-3SzW+0PW7yBBoQlT8wNcGtaxaD0XSu0uLUFgrtHY08Acx05TaHaOmVLR73c0j/cqpDy53KBMO7s0dx2wmOIDIA==} peerDependencies: '@types/react': '*' @@ -3921,18 +4385,18 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-context': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-slot': 1.0.2(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-compose-refs@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-compose-refs@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-fDSBgd44FKHa1FRMU59qBMPFcl2PZE+2nmqunj+BWFyYYjnhIDWL2ItDs3rrbJDQOtzt5nIebLCQc4QRfz6LJw==} peerDependencies: '@types/react': '*' @@ -3941,12 +4405,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-context@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-context@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-ebbrdFoYTcuZ0v4wG5tedGnp9tzcV8awzsxYph7gXUyvnNLuTIcCk1q17JEbnVhXAKG9oX3KtchwiMIAYp9NLg==} peerDependencies: '@types/react': '*' @@ -3955,12 +4419,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-direction@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-direction@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-RXcvnXgyvYvBEOhCBuddKecVkoMiI10Jcm5cTI7abJRAHYfFxeu+FBQs/DvdxSYucxR5mna0dNsL6QFlds5TMA==} peerDependencies: '@types/react': '*' @@ -3969,12 +4433,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-dismissable-layer@1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-dismissable-layer@1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==} peerDependencies: '@types/react': '*' @@ -3987,19 +4451,19 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-escape-keydown': 1.0.3(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-escape-keydown': 1.0.3(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-focus-guards@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-focus-guards@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-Rect2dWbQ8waGzhMavsIbmSVCgYxkXLxxR3ZvCX79JOglzdEy4JXMb98lq4hPxUbLr77nP0UOGf4rcMU+s1pUA==} peerDependencies: '@types/react': '*' @@ -4008,12 +4472,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-focus-scope@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-focus-scope@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-upXdPfqI4islj2CslyfUBNlaJCPybbqRHAi1KER7Isel9Q2AtSJ0zRBZv8mWQiFXD2nyAJ4BhC3yXgZ6kMBSrQ==} peerDependencies: '@types/react': '*' @@ -4026,17 +4490,17 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-id@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-id@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-tI7sT/kqYp8p96yGWY1OAnLHrqDgzHefRBKQ2YAkBS5ja7QLcZ9Z/uY7bEjPUatf8RomoXM8/1sMj1IJaE5UzQ==} peerDependencies: '@types/react': '*' @@ -4045,13 +4509,13 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-popper@1.1.2(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-popper@1.1.2(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==} peerDependencies: '@types/react': '*' @@ -4064,24 +4528,24 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@floating-ui/react-dom': 2.0.6(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-context': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-rect': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-size': 1.0.1(@types/react@18.2.48)(react@18.2.0) + '@babel/runtime': 7.23.9 + '@floating-ui/react-dom': 2.0.8(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-arrow': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-rect': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-size': 1.0.1(@types/react@18.2.59)(react@18.2.0) '@radix-ui/rect': 1.0.1 - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-portal@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-portal@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==} peerDependencies: '@types/react': '*' @@ -4094,15 +4558,15 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-primitive@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-primitive@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-yi58uVyoAcK/Nq1inRY56ZSjKypBNKTa/1mcL8qdl6oJeEaDbOldlzrGn7P6Q3Id5d+SYNGc5AJgc4vGhjs5+g==} peerDependencies: '@types/react': '*' @@ -4115,15 +4579,15 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-slot': 1.0.2(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-roving-focus@1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-roving-focus@1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-2mUg5Mgcu001VkGy+FfzZyzbmuUWzgWkj3rvv4yu+mLw03+mTzbxZHvfcGyFp2b8EkQeMkpRQ5FiA2Vr2O6TeQ==} peerDependencies: '@types/react': '*' @@ -4136,23 +4600,23 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-context': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-direction': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-id': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-direction': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-id': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-select@1.2.2(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-select@1.2.2(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-zI7McXr8fNaSrUY9mZe4x/HC0jTLY9fWNhO1oLWYMQGDXuV4UCivIGTxwioSzO0ZCYX9iSLyWmAh/1TOmX3Cnw==} peerDependencies: '@types/react': '*' @@ -4165,35 +4629,35 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/number': 1.0.1 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-context': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-direction': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-dismissable-layer': 1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-focus-guards': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-focus-scope': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-id': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-popper': 1.1.2(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-portal': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-slot': 1.0.2(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-use-previous': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-visually-hidden': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@radix-ui/react-collection': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-context': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-direction': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-dismissable-layer': 1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-focus-guards': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-focus-scope': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-id': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-popper': 1.1.2(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-portal': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-slot': 1.0.2(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-use-previous': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-visually-hidden': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 aria-hidden: 1.2.3 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-remove-scroll: 2.5.5(@types/react@18.2.48)(react@18.2.0) + react-remove-scroll: 2.5.5(@types/react@18.2.59)(react@18.2.0) dev: true - /@radix-ui/react-separator@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-separator@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-itYmTy/kokS21aiV5+Z56MZB54KrhPgn6eHDKkFeOLR34HMN2s8PaN47qZZAGnvupcjxHaFZnW4pQEh0BvvVuw==} peerDependencies: '@types/react': '*' @@ -4206,15 +4670,15 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-slot@1.0.2(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-slot@1.0.2(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-YeTpuq4deV+6DusvVUW4ivBgnkHwECUu0BiN43L5UCDFgdhsRUWAghhTF5MbvNTPzmiFOx90asDSUjWuCNapwg==} peerDependencies: '@types/react': '*' @@ -4223,13 +4687,13 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@radix-ui/react-compose-refs': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-toggle-group@1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-toggle-group@1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-Uaj/M/cMyiyT9Bx6fOZO0SAG4Cls0GptBWiBmBxofmDbNVnYYoyRWj/2M/6VCi/7qcXFWnHhRUfdfZFvvkuu8A==} peerDependencies: '@types/react': '*' @@ -4242,21 +4706,21 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-context': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-direction': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-roving-focus': 1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-toggle': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@radix-ui/react-context': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-direction': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-roving-focus': 1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-toggle': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-toggle@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-toggle@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-Pkqg3+Bc98ftZGsl60CLANXQBBQ4W3mTFS9EJvNxKMZ7magklKV69/id1mlAlOFDDfHvlCms0fx8fA4CMKDJHg==} peerDependencies: '@types/react': '*' @@ -4269,17 +4733,17 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-use-controllable-state': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-toolbar@1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-toolbar@1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-tBgmM/O7a07xbaEkYJWYTXkIdU/1pW4/KZORR43toC/4XWyBCURK0ei9kMUdp+gTPPKBgYLxXmRSH1EVcIDp8Q==} peerDependencies: '@types/react': '*' @@ -4292,21 +4756,21 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/primitive': 1.0.1 - '@radix-ui/react-context': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-direction': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-roving-focus': 1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-separator': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-toggle-group': 1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@radix-ui/react-context': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-direction': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-roving-focus': 1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-separator': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-toggle-group': 1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@radix-ui/react-use-callback-ref@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-callback-ref@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-D94LjX4Sp0xJFVaoQOd3OO9k7tpBYNOXdVhkltUbGv2Qb9OXdrg/CpsjlZv7ia14Sylv398LswWBVVu5nqKzAQ==} peerDependencies: '@types/react': '*' @@ -4315,12 +4779,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-use-controllable-state@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-controllable-state@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-Svl5GY5FQeN758fWKrjM6Qb7asvXeiZltlT4U2gVfl8Gx5UAv2sMR0LWo8yhsIZh2oQ0eFdZ59aoOOMV7b47VA==} peerDependencies: '@types/react': '*' @@ -4329,13 +4793,13 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-use-escape-keydown@1.0.3(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-escape-keydown@1.0.3(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-vyL82j40hcFicA+M4Ex7hVkB9vHgSse1ZWomAqV2Je3RleKGO5iM8KMOEtfoSB0PnIelMd2lATjTGMYqN5ylTg==} peerDependencies: '@types/react': '*' @@ -4344,13 +4808,13 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@radix-ui/react-use-callback-ref': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-use-layout-effect@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-layout-effect@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-v/5RegiJWYdoCvMnITBkNNx6bCj20fiaJnWtRkU18yITptraXjffz5Qbn05uOiQnOvi+dbkznkoaMltz1GnszQ==} peerDependencies: '@types/react': '*' @@ -4359,12 +4823,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-use-previous@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-previous@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-cV5La9DPwiQ7S0gf/0qiD6YgNqM5Fk97Kdrlc5yBcrF3jyEZQwm7vYFqMo4IfeHgJXsRaMvLABFtd0OVEmZhDw==} peerDependencies: '@types/react': '*' @@ -4373,12 +4837,12 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-use-rect@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-rect@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-Cq5DLuSiuYVKNU8orzJMbl15TXilTnJKUCltMVQg53BQOF1/C5toAaGrowkgksdBQ9H+SRL23g0HDmg9tvmxXw==} peerDependencies: '@types/react': '*' @@ -4387,13 +4851,13 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 '@radix-ui/rect': 1.0.1 - '@types/react': 18.2.48 + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-use-size@1.0.1(@types/react@18.2.48)(react@18.2.0): + /@radix-ui/react-use-size@1.0.1(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-ibay+VqrgcaI6veAojjofPATwledXiSmX+C0KrBk/xgpX9rBzPV3OsfwlhQdUOFbh+LKQorLYT+xTXW9V8yd0g==} peerDependencies: '@types/react': '*' @@ -4402,13 +4866,13 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.48)(react@18.2.0) - '@types/react': 18.2.48 + '@babel/runtime': 7.23.9 + '@radix-ui/react-use-layout-effect': 1.0.1(@types/react@18.2.59)(react@18.2.0) + '@types/react': 18.2.59 react: 18.2.0 dev: true - /@radix-ui/react-visually-hidden@1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /@radix-ui/react-visually-hidden@1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-D4w41yN5YRKtu464TLnByKzMDG/JlMPHtfZgQAu9v6mNakUqGUI9vUrfQKz8NK41VMm/xbZbh76NUTVtIYqOMA==} peerDependencies: '@types/react': '*' @@ -4421,10 +4885,10 @@ packages: '@types/react-dom': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@types/react': 18.2.48 - '@types/react-dom': 18.2.18 + '@babel/runtime': 7.23.9 + '@radix-ui/react-primitive': 1.0.3(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@types/react': 18.2.59 + '@types/react-dom': 18.2.19 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true @@ -4432,43 +4896,43 @@ packages: /@radix-ui/rect@1.0.1: resolution: {integrity: sha512-fyrgCaedtvMg9NK3en0pnOYJdtfwxUcNolezkNPUsoX57X8oQk+NkqcvzHXD2uKNij6GXmWU9NDru2IWjrO4BQ==} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 dev: true - /@reactflow/background@11.3.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-PhkvoFtO/NXJgFtBvfbPwdR/6/dl25egQlFhKWS3T4aYa7rh80dvf6dF3t6+JXJS4q5ToYJizD2/n8/qylo1yQ==} + /@reactflow/background@11.3.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-byj/G9pEC8tN0wT/ptcl/LkEP/BBfa33/SvBkqE4XwyofckqF87lKp573qGlisfnsijwAbpDlf81PuFL41So4Q==} peerDependencies: react: '>=17' react-dom: '>=17' dependencies: - '@reactflow/core': 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/core': 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) classcat: 5.0.4 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - zustand: 4.4.7(@types/react@18.2.48)(react@18.2.0) + zustand: 4.5.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' - immer dev: false - /@reactflow/controls@11.2.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-mugzVALH/SuKlVKk+JCRm1OXQ+p8e9+k8PCTIaqL+nBl+lPF8KA4uMm8ApsOvhuSAb2A80ezewpyvYHr0qSYVA==} + /@reactflow/controls@11.2.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-e8nWplbYfOn83KN1BrxTXS17+enLyFnjZPbyDgHSRLtI5ZGPKF/8iRXV+VXb2LFVzlu4Wh3la/pkxtfP/0aguA==} peerDependencies: react: '>=17' react-dom: '>=17' dependencies: - '@reactflow/core': 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/core': 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) classcat: 5.0.4 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - zustand: 4.4.7(@types/react@18.2.48)(react@18.2.0) + zustand: 4.5.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' - immer dev: false - /@reactflow/core@11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-/cbTxtFpfkIGReSVkcnQhS4Jx4VFY2AhPlJ5n0sbPtnR7OWowF9zodh5Yyzr4j1NOUoBgJ9h+UqGEwwY2dbAlw==} + /@reactflow/core@11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-j3i9b2fsTX/sBbOm+RmNzYEFWbNx4jGWGuGooh2r1jQaE2eV+TLJgiG/VNOp0q5mBl9f6g1IXs3Gm86S9JfcGw==} peerDependencies: react: '>=17' react-dom: '>=17' @@ -4483,19 +4947,19 @@ packages: d3-zoom: 3.0.0 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - zustand: 4.4.7(@types/react@18.2.48)(react@18.2.0) + zustand: 4.5.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' - immer dev: false - /@reactflow/minimap@11.7.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-Pwqw31tJ663cJur6ypqyJU33nPckvTepmz96erdQZoHsfOyLmFj4nXT7afC30DJ48lp0nfNsw+028mlf7f/h4g==} + /@reactflow/minimap@11.7.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-le95jyTtt3TEtJ1qa7tZ5hyM4S7gaEQkW43cixcMOZLu33VAdc2aCpJg/fXcRrrf7moN2Mbl9WIMNXUKsp5ILA==} peerDependencies: react: '>=17' react-dom: '>=17' dependencies: - '@reactflow/core': 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/core': 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) '@types/d3-selection': 3.0.10 '@types/d3-zoom': 3.0.8 classcat: 5.0.4 @@ -4503,48 +4967,48 @@ packages: d3-zoom: 3.0.0 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - zustand: 4.4.7(@types/react@18.2.48)(react@18.2.0) + zustand: 4.5.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' - immer dev: false - /@reactflow/node-resizer@2.2.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-BMBstmWNiklHnnAjHu8irkiPQ8/k8nnjzqlTql4acbVhD6Tsdxx/t/saOkELmfQODqGZNiPw9+pHcAHgtE6oNQ==} + /@reactflow/node-resizer@2.2.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-HfickMm0hPDIHt9qH997nLdgLt0kayQyslKE0RS/GZvZ4UMQJlx/NRRyj5y47Qyg0NnC66KYOQWDM9LLzRTnUg==} peerDependencies: react: '>=17' react-dom: '>=17' dependencies: - '@reactflow/core': 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/core': 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) classcat: 5.0.4 d3-drag: 3.0.0 d3-selection: 3.0.0 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - zustand: 4.4.7(@types/react@18.2.48)(react@18.2.0) + zustand: 4.5.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' - immer dev: false - /@reactflow/node-toolbar@1.3.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-75moEQKg23YKA3A2DNSFhq719ZPmby5mpwOD+NO7ZffJ88oMS/2eY8l8qpA3hvb1PTBHDxyKazhJirW+f4t0Wg==} + /@reactflow/node-toolbar@1.3.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-VmgxKmToax4sX1biZ9LXA7cj/TBJ+E5cklLGwquCCVVxh+lxpZGTBF3a5FJGVHiUNBBtFsC8ldcSZIK4cAlQww==} peerDependencies: react: '>=17' react-dom: '>=17' dependencies: - '@reactflow/core': 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/core': 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) classcat: 5.0.4 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - zustand: 4.4.7(@types/react@18.2.48)(react@18.2.0) + zustand: 4.5.1(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' - immer dev: false - /@reduxjs/toolkit@2.0.1(react-redux@9.1.0)(react@18.2.0): - resolution: {integrity: sha512-fxIjrR9934cmS8YXIGd9e7s1XRsEU++aFc9DVNMFMRTM5Vtsg2DCRMj21eslGtDt43IUf9bJL3h5bwUlZleibA==} + /@reduxjs/toolkit@2.2.1(react-redux@9.1.0)(react@18.2.0): + resolution: {integrity: sha512-8CREoqJovQW/5I4yvvijm/emUiCCmcs4Ev4XPWd4mizSO+dD3g5G6w34QK5AGeNrSH7qM8Fl66j4vuV7dpOdkw==} peerDependencies: react: ^16.9.0 || ^17.0.0 || ^18 react-redux: ^7.2.1 || ^8.1.3 || ^9.0.0 @@ -4556,10 +5020,10 @@ packages: dependencies: immer: 10.0.3 react: 18.2.0 - react-redux: 9.1.0(@types/react@18.2.48)(react@18.2.0)(redux@5.0.1) + react-redux: 9.1.0(@types/react@18.2.59)(react@18.2.0)(redux@5.0.1) redux: 5.0.1 redux-thunk: 3.1.0(redux@5.0.1) - reselect: 5.0.1(patch_hash=kvbgwzjyy4x4fnh7znyocvb75q) + reselect: 5.1.0 dev: false /@roarr/browser-log-writer@1.3.0: @@ -4593,111 +5057,111 @@ packages: picomatch: 2.3.1 dev: true - /@rollup/rollup-android-arm-eabi@4.9.4: - resolution: {integrity: sha512-ub/SN3yWqIv5CWiAZPHVS1DloyZsJbtXmX4HxUTIpS0BHm9pW5iYBo2mIZi+hE3AeiTzHz33blwSnhdUo+9NpA==} + /@rollup/rollup-android-arm-eabi@4.12.0: + resolution: {integrity: sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==} cpu: [arm] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-android-arm64@4.9.4: - resolution: {integrity: sha512-ehcBrOR5XTl0W0t2WxfTyHCR/3Cq2jfb+I4W+Ch8Y9b5G+vbAecVv0Fx/J1QKktOrgUYsIKxWAKgIpvw56IFNA==} + /@rollup/rollup-android-arm64@4.12.0: + resolution: {integrity: sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==} cpu: [arm64] os: [android] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-arm64@4.9.4: - resolution: {integrity: sha512-1fzh1lWExwSTWy8vJPnNbNM02WZDS8AW3McEOb7wW+nPChLKf3WG2aG7fhaUmfX5FKw9zhsF5+MBwArGyNM7NA==} + /@rollup/rollup-darwin-arm64@4.12.0: + resolution: {integrity: sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==} cpu: [arm64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-darwin-x64@4.9.4: - resolution: {integrity: sha512-Gc6cukkF38RcYQ6uPdiXi70JB0f29CwcQ7+r4QpfNpQFVHXRd0DfWFidoGxjSx1DwOETM97JPz1RXL5ISSB0pA==} + /@rollup/rollup-darwin-x64@4.12.0: + resolution: {integrity: sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==} cpu: [x64] os: [darwin] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm-gnueabihf@4.9.4: - resolution: {integrity: sha512-g21RTeFzoTl8GxosHbnQZ0/JkuFIB13C3T7Y0HtKzOXmoHhewLbVTFBQZu+z5m9STH6FZ7L/oPgU4Nm5ErN2fw==} + /@rollup/rollup-linux-arm-gnueabihf@4.12.0: + resolution: {integrity: sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==} cpu: [arm] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-gnu@4.9.4: - resolution: {integrity: sha512-TVYVWD/SYwWzGGnbfTkrNpdE4HON46orgMNHCivlXmlsSGQOx/OHHYiQcMIOx38/GWgwr/po2LBn7wypkWw/Mg==} + /@rollup/rollup-linux-arm64-gnu@4.12.0: + resolution: {integrity: sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-arm64-musl@4.9.4: - resolution: {integrity: sha512-XcKvuendwizYYhFxpvQ3xVpzje2HHImzg33wL9zvxtj77HvPStbSGI9czrdbfrf8DGMcNNReH9pVZv8qejAQ5A==} + /@rollup/rollup-linux-arm64-musl@4.12.0: + resolution: {integrity: sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==} cpu: [arm64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-riscv64-gnu@4.9.4: - resolution: {integrity: sha512-LFHS/8Q+I9YA0yVETyjonMJ3UA+DczeBd/MqNEzsGSTdNvSJa1OJZcSH8GiXLvcizgp9AlHs2walqRcqzjOi3A==} + /@rollup/rollup-linux-riscv64-gnu@4.12.0: + resolution: {integrity: sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==} cpu: [riscv64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-gnu@4.9.4: - resolution: {integrity: sha512-dIYgo+j1+yfy81i0YVU5KnQrIJZE8ERomx17ReU4GREjGtDW4X+nvkBak2xAUpyqLs4eleDSj3RrV72fQos7zw==} + /@rollup/rollup-linux-x64-gnu@4.12.0: + resolution: {integrity: sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-linux-x64-musl@4.9.4: - resolution: {integrity: sha512-RoaYxjdHQ5TPjaPrLsfKqR3pakMr3JGqZ+jZM0zP2IkDtsGa4CqYaWSfQmZVgFUCgLrTnzX+cnHS3nfl+kB6ZQ==} + /@rollup/rollup-linux-x64-musl@4.12.0: + resolution: {integrity: sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==} cpu: [x64] os: [linux] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-arm64-msvc@4.9.4: - resolution: {integrity: sha512-T8Q3XHV+Jjf5e49B4EAaLKV74BbX7/qYBRQ8Wop/+TyyU0k+vSjiLVSHNWdVd1goMjZcbhDmYZUYW5RFqkBNHQ==} + /@rollup/rollup-win32-arm64-msvc@4.12.0: + resolution: {integrity: sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==} cpu: [arm64] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-ia32-msvc@4.9.4: - resolution: {integrity: sha512-z+JQ7JirDUHAsMecVydnBPWLwJjbppU+7LZjffGf+Jvrxq+dVjIE7By163Sc9DKc3ADSU50qPVw0KonBS+a+HQ==} + /@rollup/rollup-win32-ia32-msvc@4.12.0: + resolution: {integrity: sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==} cpu: [ia32] os: [win32] requiresBuild: true dev: true optional: true - /@rollup/rollup-win32-x64-msvc@4.9.4: - resolution: {integrity: sha512-LfdGXCV9rdEify1oxlN9eamvDSjv9md9ZVMAbNHA87xqIfFCxImxan9qZ8+Un54iK2nnqPlbnSi4R54ONtbWBw==} + /@rollup/rollup-win32-x64-msvc@4.12.0: + resolution: {integrity: sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==} cpu: [x64] os: [win32] requiresBuild: true dev: true optional: true - /@rushstack/node-core-library@3.62.0(@types/node@20.11.5): + /@rushstack/node-core-library@3.62.0(@types/node@20.11.20): resolution: {integrity: sha512-88aJn2h8UpSvdwuDXBv1/v1heM6GnBf3RjEy6ZPP7UnzHNCqOHA2Ut+ScYUbXcqIdfew9JlTAe3g+cnX9xQ/Aw==} peerDependencies: '@types/node': '*' @@ -4705,7 +5169,7 @@ packages: '@types/node': optional: true dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 colors: 1.2.5 fs-extra: 7.0.1 import-lazy: 4.0.0 @@ -4735,33 +5199,43 @@ packages: resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} dev: true + /@snyk/github-codeowners@1.1.0: + resolution: {integrity: sha512-lGFf08pbkEac0NYgVf4hdANpAgApRjNByLXB+WBip3qj1iendOIyAwP2GKkKbQMNVy2r1xxDf0ssfWscoiC+Vw==} + engines: {node: '>=8.10'} + hasBin: true + dependencies: + commander: 4.1.1 + ignore: 5.3.1 + p-map: 4.0.0 + dev: true + /@socket.io/component-emitter@3.1.0: resolution: {integrity: sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==} dev: false - /@storybook/addon-actions@7.6.10: - resolution: {integrity: sha512-pcKmf0H/caGzKDy8cz1adNSjv+KOBWLJ11RzGExrWm+Ad5ACifwlsQPykJ3TQ/21sTd9IXVrE9uuq4LldEnPbg==} + /@storybook/addon-actions@7.6.17: + resolution: {integrity: sha512-TBphs4v6LRfyTpFo/WINF0TkMaE3rrNog7wW5mbz6n0j8o53kDN4o9ZEcygSL5zQX43CAaghQTeDCss7ueG7ZQ==} dependencies: - '@storybook/core-events': 7.6.10 + '@storybook/core-events': 7.6.17 '@storybook/global': 5.0.0 - '@types/uuid': 9.0.7 + '@types/uuid': 9.0.8 dequal: 2.0.3 - polished: 4.2.2 + polished: 4.3.1 uuid: 9.0.1 dev: true - /@storybook/addon-backgrounds@7.6.10: - resolution: {integrity: sha512-kGzsN1QkfyI8Cz7TErEx9OCB3PMzpCFGLd/iy7FreXwbMbeAQ3/9fYgKUsNOYgOhuTz7S09koZUWjS/WJuZGFA==} + /@storybook/addon-backgrounds@7.6.17: + resolution: {integrity: sha512-7dize7x8+37PH77kmt69b0xSaeDqOcZ4fpzW6+hk53hIaCVU26eGs4+j+743Xva31eOgZWNLupUhOpUDc6SqZw==} dependencies: '@storybook/global': 5.0.0 memoizerific: 1.11.3 ts-dedent: 2.2.0 dev: true - /@storybook/addon-controls@7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-LjwCQRMWq1apLtFwDi6U8MI6ITUr+KhxJucZ60tfc58RgB2v8ayozyDAonFEONsx9YSR1dNIJ2Z/e2rWTBJeYA==} + /@storybook/addon-controls@7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-zR0aLaUF7FtV/nMRyfniFbCls/e0DAAoXACuOAUAwNAv0lbIS8AyZZiHSmKucCvziUQ6WceeCC7+du3C+9y0rQ==} dependencies: - '@storybook/blocks': 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@storybook/blocks': 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) lodash: 4.17.21 ts-dedent: 2.2.0 transitivePeerDependencies: @@ -4773,27 +5247,27 @@ packages: - supports-color dev: true - /@storybook/addon-docs@7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-GtyQ9bMx1AOOtl6ZS9vwK104HFRK+tqzxddRRxhXkpyeKu3olm9aMgXp35atE/3fJSqyyDm2vFtxxH8mzBA20A==} + /@storybook/addon-docs@7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-FKa4Mdy7nhgvEVZJHpMkHriDzpVHbohn87zv9NCL+Ctjs1iAmzGwxEm0culszyDS1HN2ToVoY0h8CSi2RSSZqA==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: '@jest/transform': 29.7.0 '@mdx-js/react': 2.3.0(react@18.2.0) - '@storybook/blocks': 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@storybook/client-logger': 7.6.10 - '@storybook/components': 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@storybook/csf-plugin': 7.6.10 - '@storybook/csf-tools': 7.6.10 + '@storybook/blocks': 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@storybook/client-logger': 7.6.17 + '@storybook/components': 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@storybook/csf-plugin': 7.6.17 + '@storybook/csf-tools': 7.6.17 '@storybook/global': 5.0.0 '@storybook/mdx2-csf': 1.1.0 - '@storybook/node-logger': 7.6.10 - '@storybook/postinstall': 7.6.10 - '@storybook/preview-api': 7.6.10 - '@storybook/react-dom-shim': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/theming': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/types': 7.6.10 + '@storybook/node-logger': 7.6.17 + '@storybook/postinstall': 7.6.17 + '@storybook/preview-api': 7.6.17 + '@storybook/react-dom-shim': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/theming': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.6.17 fs-extra: 11.2.0 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) @@ -4807,25 +5281,25 @@ packages: - supports-color dev: true - /@storybook/addon-essentials@7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-cjbuCCK/3dtUity0Uqi5LwbkgfxqCCE5x5mXZIk9lTMeDz5vB9q6M5nzncVDy8F8przF3NbDLLgxKlt8wjiICg==} + /@storybook/addon-essentials@7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-qlSpamxuYfT2taF953nC9QijGF2pSbg1ewMNpdwLTj16PTZvR/d8NCDMTJujI1bDwM2m18u8Yc43ibh5LEmxCw==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: - '@storybook/addon-actions': 7.6.10 - '@storybook/addon-backgrounds': 7.6.10 - '@storybook/addon-controls': 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@storybook/addon-docs': 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@storybook/addon-highlight': 7.6.10 - '@storybook/addon-measure': 7.6.10 - '@storybook/addon-outline': 7.6.10 - '@storybook/addon-toolbars': 7.6.10 - '@storybook/addon-viewport': 7.6.10 - '@storybook/core-common': 7.6.10 - '@storybook/manager-api': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/node-logger': 7.6.10 - '@storybook/preview-api': 7.6.10 + '@storybook/addon-actions': 7.6.17 + '@storybook/addon-backgrounds': 7.6.17 + '@storybook/addon-controls': 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-docs': 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@storybook/addon-highlight': 7.6.17 + '@storybook/addon-measure': 7.6.17 + '@storybook/addon-outline': 7.6.17 + '@storybook/addon-toolbars': 7.6.17 + '@storybook/addon-viewport': 7.6.17 + '@storybook/core-common': 7.6.17 + '@storybook/manager-api': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/node-logger': 7.6.17 + '@storybook/preview-api': 7.6.17 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) ts-dedent: 2.2.0 @@ -4836,24 +5310,24 @@ packages: - supports-color dev: true - /@storybook/addon-highlight@7.6.10: - resolution: {integrity: sha512-dIuS5QmoT1R+gFOcf6CoBa6D9UR5/wHCfPqPRH8dNNcCLtIGSHWQ4v964mS5OCq1Huj7CghmR15lOUk7SaYwUA==} + /@storybook/addon-highlight@7.6.17: + resolution: {integrity: sha512-R1yBPUUqGn+60aJakn8q+5Zt34E/gU3n3VmgPdryP0LJUdZ5q1/RZShoVDV+yYQ40htMH6oaCv3OyyPzFAGJ6A==} dependencies: '@storybook/global': 5.0.0 dev: true - /@storybook/addon-interactions@7.6.10: - resolution: {integrity: sha512-lEsAdP/PrOZK/KmRbZ/fU4RjEqDP+e/PBlVVVJT2QvHniWK/xxkjCD0axsHU/XuaeQRFhmg0/KR342PC/cIf9A==} + /@storybook/addon-interactions@7.6.17: + resolution: {integrity: sha512-6zlX+RDQ1PlA6fp7C+hun8t7h2RXfCGs5dGrhEenp2lqnR/rYuUJRC0tmKpkZBb8kZVcbSChzkB/JYkBjBCzpQ==} dependencies: '@storybook/global': 5.0.0 - '@storybook/types': 7.6.10 + '@storybook/types': 7.6.17 jest-mock: 27.5.1 - polished: 4.2.2 + polished: 4.3.1 ts-dedent: 2.2.0 dev: true - /@storybook/addon-links@7.6.10(react@18.2.0): - resolution: {integrity: sha512-s/WkSYHpr2pb9p57j6u/xDBg3TKJhBq55YMl0GB5gXgkRPIeuGbPhGJhm2yTGVFLvXgr/aHHnOxb/R/W8PiRhA==} + /@storybook/addon-links@7.6.17(react@18.2.0): + resolution: {integrity: sha512-iFUwKObRn0EKI0zMETsil2p9a/81rCuSMEWECsi+khkCAs1FUnD2cT6Ag5ydcNcBXsdtdfDJdtXQrkw+TSoStQ==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 peerDependenciesMeta: @@ -4866,62 +5340,62 @@ packages: ts-dedent: 2.2.0 dev: true - /@storybook/addon-measure@7.6.10: - resolution: {integrity: sha512-OVfTI56+kc4hLWfZ/YPV3WKj/aA9e4iKXYxZyPdhfX4Z8TgZdD1wv9Z6e8DKS0H5kuybYrHKHaID5ki6t7qz3w==} + /@storybook/addon-measure@7.6.17: + resolution: {integrity: sha512-O5vnHZNkduvZ95jf1UssbOl6ivIxzl5tv+4EpScPYId7w700bxWsJH+QX7ip6KlrCf2o3iUhmPe8bm05ghG2KA==} dependencies: '@storybook/global': 5.0.0 - tiny-invariant: 1.3.1 + tiny-invariant: 1.3.3 dev: true - /@storybook/addon-outline@7.6.10: - resolution: {integrity: sha512-RVJrEoPArhI6zAIMNl1Gz0zrj84BTfEWYYz0yDWOTVgvN411ugsoIk1hw0671MOneXJ2RcQ9MFIeV/v6AVDQYg==} + /@storybook/addon-outline@7.6.17: + resolution: {integrity: sha512-9o9JXDsYjNaDgz/cY5+jv694+aik/1aiRGGvsCv68e1p/ob0glkGKav4lnJe2VJqD+gCmaARoD8GOJlhoQl8JQ==} dependencies: '@storybook/global': 5.0.0 ts-dedent: 2.2.0 dev: true - /@storybook/addon-storysource@7.6.10: - resolution: {integrity: sha512-ZtMiO26Bqd2oEovEeJ5ulvIL/rsAuHHpjAgBRZd/Byw25DQKY3GTqGtV474Wjm5tzj7HWhfk69fqAv87HnveCw==} + /@storybook/addon-storysource@7.6.17: + resolution: {integrity: sha512-8SZiIuIkRU9NQM3Y2mmE0m+bqtXQefzW8Z9DkPKwTJSJxVBvMZVMHjRiQcPn8ll6zhqQIaQiBj0ahlR8ZqrnqA==} dependencies: - '@storybook/source-loader': 7.6.10 + '@storybook/source-loader': 7.6.17 estraverse: 5.3.0 - tiny-invariant: 1.3.1 + tiny-invariant: 1.3.3 dev: true - /@storybook/addon-toolbars@7.6.10: - resolution: {integrity: sha512-PaXY/oj9yxF7/H0CNdQKcioincyCkfeHpISZriZbZqhyqsjn3vca7RFEmsB88Q+ou6rMeqyA9st+6e2cx/Ct6A==} + /@storybook/addon-toolbars@7.6.17: + resolution: {integrity: sha512-UMrchbUHiyWrh6WuGnpy34Jqzkx/63B+MSgb3CW7YsQaXz64kE0Rol0TNSznnB+mYXplcqH+ndI4r4kFsmgwDg==} dev: true - /@storybook/addon-viewport@7.6.10: - resolution: {integrity: sha512-+bA6juC/lH4vEhk+w0rXakaG8JgLG4MOYrIudk5vJKQaC6X58LIM9N4kzIS2KSExRhkExXBPrWsnMfCo7uxmKg==} + /@storybook/addon-viewport@7.6.17: + resolution: {integrity: sha512-sA0QCcf4QAMixWvn8uvRYPfkKCSl6JajJaAspoPqXSxHEpK7uwOlpg3kqFU5XJJPXD0X957M+ONgNvBzYqSpEw==} dependencies: memoizerific: 1.11.3 dev: true - /@storybook/blocks@7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-oSIukGC3yuF8pojABC/HLu5tv2axZvf60TaUs8eDg7+NiiKhzYSPoMQxs5uMrKngl+EJDB92ESgWT9vvsfvIPg==} + /@storybook/blocks@7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-PsNVoe0bX1mMn4Kk3nbKZ0ItDZZ0YJnYAFJ6toAbsyBAbgzg1sce88sQinzvbn58/RT9MPKeWMPB45ZS7ggiNg==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: - '@storybook/channels': 7.6.10 - '@storybook/client-logger': 7.6.10 - '@storybook/components': 7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@storybook/core-events': 7.6.10 + '@storybook/channels': 7.6.17 + '@storybook/client-logger': 7.6.17 + '@storybook/components': 7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@storybook/core-events': 7.6.17 '@storybook/csf': 0.1.2 - '@storybook/docs-tools': 7.6.10 + '@storybook/docs-tools': 7.6.17 '@storybook/global': 5.0.0 - '@storybook/manager-api': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/preview-api': 7.6.10 - '@storybook/theming': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/types': 7.6.10 + '@storybook/manager-api': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/preview-api': 7.6.17 + '@storybook/theming': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.6.17 '@types/lodash': 4.14.202 color-convert: 2.0.1 dequal: 2.0.3 lodash: 4.17.21 - markdown-to-jsx: 7.4.0(react@18.2.0) + markdown-to-jsx: 7.4.1(react@18.2.0) memoizerific: 1.11.3 - polished: 4.2.2 + polished: 4.3.1 react: 18.2.0 react-colorful: 5.6.1(react-dom@18.2.0)(react@18.2.0) react-dom: 18.2.0(react@18.2.0) @@ -4936,13 +5410,13 @@ packages: - supports-color dev: true - /@storybook/builder-manager@7.6.10: - resolution: {integrity: sha512-f+YrjZwohGzvfDtH8BHzqM3xW0p4vjjg9u7uzRorqUiNIAAKHpfNrZ/WvwPlPYmrpAHt4xX/nXRJae4rFSygPw==} + /@storybook/builder-manager@7.6.17: + resolution: {integrity: sha512-Sj8hcDYiPCCMfeLzus37czl0zdrAxAz4IyYam2jBjVymrIrcDAFyL1OCZvnq33ft179QYQWhUs9qwzVmlR/ZWg==} dependencies: '@fal-works/esbuild-plugin-global-externals': 2.1.2 - '@storybook/core-common': 7.6.10 - '@storybook/manager': 7.6.10 - '@storybook/node-logger': 7.6.10 + '@storybook/core-common': 7.6.17 + '@storybook/manager': 7.6.17 + '@storybook/node-logger': 7.6.17 '@types/ejs': 3.1.5 '@types/find-cache-dir': 3.2.1 '@yarnpkg/esbuild-plugin-pnp': 3.0.0-rc.15(esbuild@0.18.20) @@ -4960,8 +5434,8 @@ packages: - supports-color dev: true - /@storybook/builder-vite@7.6.10(typescript@5.3.3)(vite@5.0.12): - resolution: {integrity: sha512-qxe19axiNJVdIKj943e1ucAmADwU42fTGgMSdBzzrvfH3pSOmx2057aIxRzd8YtBRnj327eeqpgCHYIDTunMYQ==} + /@storybook/builder-vite@7.6.17(typescript@5.3.3)(vite@5.1.4): + resolution: {integrity: sha512-2Q32qalI401EsKKr9Hkk8TAOcHEerqwsjCpQgTNJnCu6GgCVKoVUcb99oRbR9Vyg0xh+jb19XiWqqQujFtLYlQ==} peerDependencies: '@preact/preset-vite': '*' typescript: '>= 4.3.x' @@ -4975,64 +5449,64 @@ packages: vite-plugin-glimmerx: optional: true dependencies: - '@storybook/channels': 7.6.10 - '@storybook/client-logger': 7.6.10 - '@storybook/core-common': 7.6.10 - '@storybook/csf-plugin': 7.6.10 - '@storybook/node-logger': 7.6.10 - '@storybook/preview': 7.6.10 - '@storybook/preview-api': 7.6.10 - '@storybook/types': 7.6.10 + '@storybook/channels': 7.6.17 + '@storybook/client-logger': 7.6.17 + '@storybook/core-common': 7.6.17 + '@storybook/csf-plugin': 7.6.17 + '@storybook/node-logger': 7.6.17 + '@storybook/preview': 7.6.17 + '@storybook/preview-api': 7.6.17 + '@storybook/types': 7.6.17 '@types/find-cache-dir': 3.2.1 browser-assert: 1.2.1 es-module-lexer: 0.9.3 express: 4.18.2 find-cache-dir: 3.3.2 fs-extra: 11.2.0 - magic-string: 0.30.5 + magic-string: 0.30.7 rollup: 3.29.4 typescript: 5.3.3 - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) transitivePeerDependencies: - encoding - supports-color dev: true - /@storybook/channels@7.6.10: - resolution: {integrity: sha512-ITCLhFuDBKgxetuKnWwYqMUWlU7zsfH3gEKZltTb+9/2OAWR7ez0iqU7H6bXP1ridm0DCKkt2UMWj2mmr9iQqg==} + /@storybook/channels@7.6.17: + resolution: {integrity: sha512-GFG40pzaSxk1hUr/J/TMqW5AFDDPUSu+HkeE/oqSWJbOodBOLJzHN6CReJS6y1DjYSZLNFt1jftPWZZInG/XUA==} dependencies: - '@storybook/client-logger': 7.6.10 - '@storybook/core-events': 7.6.10 + '@storybook/client-logger': 7.6.17 + '@storybook/core-events': 7.6.17 '@storybook/global': 5.0.0 qs: 6.11.2 telejson: 7.2.0 - tiny-invariant: 1.3.1 + tiny-invariant: 1.3.3 dev: true - /@storybook/cli@7.6.10: - resolution: {integrity: sha512-pK1MEseMm73OMO2OVoSz79QWX8ymxgIGM8IeZTCo9gImiVRChMNDFYcv8yPWkjuyesY8c15CoO48aR7pdA1OjQ==} + /@storybook/cli@7.6.17: + resolution: {integrity: sha512-1sCo+nCqyR+nKfTcEidVu8XzNoECC7Y1l+uW38/r7s2f/TdDorXaIGAVrpjbSaXSoQpx5DxYJVaKCcQuOgqwcA==} hasBin: true dependencies: - '@babel/core': 7.23.7 - '@babel/preset-env': 7.23.8(@babel/core@7.23.7) - '@babel/types': 7.23.6 + '@babel/core': 7.23.9 + '@babel/preset-env': 7.23.9(@babel/core@7.23.9) + '@babel/types': 7.23.9 '@ndelangen/get-tarball': 3.0.9 - '@storybook/codemod': 7.6.10 - '@storybook/core-common': 7.6.10 - '@storybook/core-events': 7.6.10 - '@storybook/core-server': 7.6.10 - '@storybook/csf-tools': 7.6.10 - '@storybook/node-logger': 7.6.10 - '@storybook/telemetry': 7.6.10 - '@storybook/types': 7.6.10 - '@types/semver': 7.5.6 + '@storybook/codemod': 7.6.17 + '@storybook/core-common': 7.6.17 + '@storybook/core-events': 7.6.17 + '@storybook/core-server': 7.6.17 + '@storybook/csf-tools': 7.6.17 + '@storybook/node-logger': 7.6.17 + '@storybook/telemetry': 7.6.17 + '@storybook/types': 7.6.17 + '@types/semver': 7.5.8 '@yarnpkg/fslib': 2.10.3 '@yarnpkg/libzip': 2.3.0 chalk: 4.1.2 commander: 6.2.1 cross-spawn: 7.0.3 detect-indent: 6.1.0 - envinfo: 7.11.0 + envinfo: 7.11.1 execa: 5.1.1 express: 4.18.2 find-up: 5.0.0 @@ -5041,14 +5515,14 @@ packages: get-port: 5.1.1 giget: 1.2.1 globby: 11.1.0 - jscodeshift: 0.15.1(@babel/preset-env@7.23.8) + jscodeshift: 0.15.2(@babel/preset-env@7.23.9) leven: 3.1.0 ora: 5.4.1 prettier: 2.8.8 prompts: 2.4.2 puppeteer-core: 2.1.1 read-pkg-up: 7.0.1 - semver: 7.5.4 + semver: 7.6.0 strip-json-comments: 3.1.1 tempy: 1.0.1 ts-dedent: 2.2.0 @@ -5060,26 +5534,26 @@ packages: - utf-8-validate dev: true - /@storybook/client-logger@7.6.10: - resolution: {integrity: sha512-U7bbpu21ntgePMz/mKM18qvCSWCUGCUlYru8mgVlXLCKqFqfTeP887+CsPEQf29aoE3cLgDrxqbRJ1wxX9kL9A==} + /@storybook/client-logger@7.6.17: + resolution: {integrity: sha512-6WBYqixAXNAXlSaBWwgljWpAu10tPRBJrcFvx2gPUne58EeMM20Gi/iHYBz2kMCY+JLAgeIH7ZxInqwO8vDwiQ==} dependencies: '@storybook/global': 5.0.0 dev: true - /@storybook/codemod@7.6.10: - resolution: {integrity: sha512-pzFR0nocBb94vN9QCJLC3C3dP734ZigqyPmd0ZCDj9Xce2ytfHK3v1lKB6TZWzKAZT8zztauECYxrbo4LVuagw==} + /@storybook/codemod@7.6.17: + resolution: {integrity: sha512-JuTmf2u3C4fCnjO7o3dqRgrq3ozNYfWlrRP8xuIdvT7niMap7a396hJtSKqS10FxCgKFcMAOsRgrCalH1dWxUg==} dependencies: - '@babel/core': 7.23.7 - '@babel/preset-env': 7.23.8(@babel/core@7.23.7) - '@babel/types': 7.23.6 + '@babel/core': 7.23.9 + '@babel/preset-env': 7.23.9(@babel/core@7.23.9) + '@babel/types': 7.23.9 '@storybook/csf': 0.1.2 - '@storybook/csf-tools': 7.6.10 - '@storybook/node-logger': 7.6.10 - '@storybook/types': 7.6.10 + '@storybook/csf-tools': 7.6.17 + '@storybook/node-logger': 7.6.17 + '@storybook/types': 7.6.17 '@types/cross-spawn': 6.0.6 cross-spawn: 7.0.3 globby: 11.1.0 - jscodeshift: 0.15.1(@babel/preset-env@7.23.8) + jscodeshift: 0.15.2(@babel/preset-env@7.23.9) lodash: 4.17.21 prettier: 2.8.8 recast: 0.23.4 @@ -5087,19 +5561,19 @@ packages: - supports-color dev: true - /@storybook/components@7.6.10(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-H5hF8pxwtbt0LxV24KMMsPlbYG9Oiui3ObvAQkvGu6q62EYxRPeNSrq3GBI5XEbI33OJY9bT24cVaZx18dXqwQ==} + /@storybook/components@7.6.17(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-lbh7GynMidA+CZcJnstVku6Nhs+YkqjYaZ+mKPugvlVhGVWv0DaaeQFVuZ8cJtUGJ/5FFU4Y+n+gylYUHkGBMA==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: - '@radix-ui/react-select': 1.2.2(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@radix-ui/react-toolbar': 1.0.4(@types/react-dom@18.2.18)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@storybook/client-logger': 7.6.10 + '@radix-ui/react-select': 1.2.2(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@radix-ui/react-toolbar': 1.0.4(@types/react-dom@18.2.19)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@storybook/client-logger': 7.6.17 '@storybook/csf': 0.1.2 '@storybook/global': 5.0.0 - '@storybook/theming': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/types': 7.6.10 + '@storybook/theming': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.6.17 memoizerific: 1.11.3 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) @@ -5110,21 +5584,21 @@ packages: - '@types/react-dom' dev: true - /@storybook/core-client@7.6.10: - resolution: {integrity: sha512-DjnzSzSNDmZyxyg6TxugzWQwOsW+n/iWVv6sHNEvEd5STr0mjuJjIEELmv58LIr5Lsre5+LEddqHsyuLyt8ubg==} + /@storybook/core-client@7.6.17: + resolution: {integrity: sha512-LuDbADK+DPNAOOCXOlvY09hdGVueXlDetsdOJ/DgYnSa9QSWv9Uv+F8QcEgR3QckZJbPlztKJIVLgP2n/Xkijw==} dependencies: - '@storybook/client-logger': 7.6.10 - '@storybook/preview-api': 7.6.10 + '@storybook/client-logger': 7.6.17 + '@storybook/preview-api': 7.6.17 dev: true - /@storybook/core-common@7.6.10: - resolution: {integrity: sha512-K3YWqjCKMnpvYsWNjOciwTH6zWbuuZzmOiipziZaVJ+sB1XYmH52Y3WGEm07TZI8AYK9DRgwA13dR/7W0nw72Q==} + /@storybook/core-common@7.6.17: + resolution: {integrity: sha512-me2TP3Q9/qzqCLoDHUSsUF+VS1MHxfHbTVF6vAz0D/COTxzsxLpu9TxTbzJoBCxse6XRb6wWI1RgF1mIcjic7g==} dependencies: - '@storybook/core-events': 7.6.10 - '@storybook/node-logger': 7.6.10 - '@storybook/types': 7.6.10 + '@storybook/core-events': 7.6.17 + '@storybook/node-logger': 7.6.17 + '@storybook/types': 7.6.17 '@types/find-cache-dir': 3.2.1 - '@types/node': 18.19.8 + '@types/node': 18.19.18 '@types/node-fetch': 2.6.11 '@types/pretty-hrtime': 1.0.3 chalk: 4.1.2 @@ -5148,34 +5622,34 @@ packages: - supports-color dev: true - /@storybook/core-events@7.6.10: - resolution: {integrity: sha512-yccDH67KoROrdZbRKwxgTswFMAco5nlCyxszCDASCLygGSV2Q2e+YuywrhchQl3U6joiWi3Ps1qWu56NeNafag==} + /@storybook/core-events@7.6.17: + resolution: {integrity: sha512-AriWMCm/k1cxlv10f+jZ1wavThTRpLaN3kY019kHWbYT9XgaSuLU67G7GPr3cGnJ6HuA6uhbzu8qtqVCd6OfXA==} dependencies: ts-dedent: 2.2.0 dev: true - /@storybook/core-server@7.6.10: - resolution: {integrity: sha512-2icnqJkn3vwq0eJPP0rNaHd7IOvxYf5q4lSVl2AWTxo/Ae19KhokI6j/2vvS2XQJMGQszwshlIwrZUNsj5p0yw==} + /@storybook/core-server@7.6.17: + resolution: {integrity: sha512-KWGhTTaL1Q14FolcoKKZgytlPJUbH6sbJ1Ptj/84EYWFewcnEgVs0Zlnh1VStRZg+Rd1WC1V4yVd/bbDzxrvQA==} dependencies: '@aw-web-design/x-default-browser': 1.4.126 '@discoveryjs/json-ext': 0.5.7 - '@storybook/builder-manager': 7.6.10 - '@storybook/channels': 7.6.10 - '@storybook/core-common': 7.6.10 - '@storybook/core-events': 7.6.10 + '@storybook/builder-manager': 7.6.17 + '@storybook/channels': 7.6.17 + '@storybook/core-common': 7.6.17 + '@storybook/core-events': 7.6.17 '@storybook/csf': 0.1.2 - '@storybook/csf-tools': 7.6.10 + '@storybook/csf-tools': 7.6.17 '@storybook/docs-mdx': 0.1.0 '@storybook/global': 5.0.0 - '@storybook/manager': 7.6.10 - '@storybook/node-logger': 7.6.10 - '@storybook/preview-api': 7.6.10 - '@storybook/telemetry': 7.6.10 - '@storybook/types': 7.6.10 + '@storybook/manager': 7.6.17 + '@storybook/node-logger': 7.6.17 + '@storybook/preview-api': 7.6.17 + '@storybook/telemetry': 7.6.17 + '@storybook/types': 7.6.17 '@types/detect-port': 1.3.5 - '@types/node': 18.19.8 + '@types/node': 18.19.18 '@types/pretty-hrtime': 1.0.3 - '@types/semver': 7.5.6 + '@types/semver': 7.5.8 better-opn: 3.0.2 chalk: 4.1.2 cli-table3: 0.6.3 @@ -5184,15 +5658,15 @@ packages: express: 4.18.2 fs-extra: 11.2.0 globby: 11.1.0 - ip: 2.0.0 + ip: 2.0.1 lodash: 4.17.21 open: 8.4.2 pretty-hrtime: 1.0.3 prompts: 2.4.2 read-pkg-up: 7.0.1 - semver: 7.5.4 + semver: 7.6.0 telejson: 7.2.0 - tiny-invariant: 1.3.1 + tiny-invariant: 1.3.3 ts-dedent: 2.2.0 util: 0.12.5 util-deprecate: 1.0.2 @@ -5205,24 +5679,24 @@ packages: - utf-8-validate dev: true - /@storybook/csf-plugin@7.6.10: - resolution: {integrity: sha512-Sc+zZg/BnPH2X28tthNaQBnDiFfO0QmfjVoOx0fGYM9SvY3P5ehzWwp5hMRBim6a/twOTzePADtqYL+t6GMqqg==} + /@storybook/csf-plugin@7.6.17: + resolution: {integrity: sha512-xTHv9BUh3bkDVCvcbmdfVF0/e96BdrEgqPJ3G3RmKbSzWLOkQ2U9yiPfHzT0KJWPhVwj12fjfZp0zunu+pcS6Q==} dependencies: - '@storybook/csf-tools': 7.6.10 - unplugin: 1.6.0 + '@storybook/csf-tools': 7.6.17 + unplugin: 1.7.1 transitivePeerDependencies: - supports-color dev: true - /@storybook/csf-tools@7.6.10: - resolution: {integrity: sha512-TnDNAwIALcN6SA4l00Cb67G02XMOrYU38bIpFJk5VMDX2dvgPjUtJNBuLmEbybGcOt7nPyyFIHzKcY5FCVGoWA==} + /@storybook/csf-tools@7.6.17: + resolution: {integrity: sha512-dAQtam0EBPeTJYcQPLxXgz4L9JFqD+HWbLFG9CmNIhMMjticrB0mpk1EFIS6vPXk/VsVWpBgMLD7dZlD6YMKcQ==} dependencies: '@babel/generator': 7.23.6 - '@babel/parser': 7.23.6 - '@babel/traverse': 7.23.7 - '@babel/types': 7.23.6 + '@babel/parser': 7.23.9 + '@babel/traverse': 7.23.9 + '@babel/types': 7.23.9 '@storybook/csf': 0.1.2 - '@storybook/types': 7.6.10 + '@storybook/types': 7.6.17 fs-extra: 11.2.0 recast: 0.23.4 ts-dedent: 2.2.0 @@ -5246,12 +5720,12 @@ packages: resolution: {integrity: sha512-JDaBR9lwVY4eSH5W8EGHrhODjygPd6QImRbwjAuJNEnY0Vw4ie3bPkeGfnacB3OBW6u/agqPv2aRlR46JcAQLg==} dev: true - /@storybook/docs-tools@7.6.10: - resolution: {integrity: sha512-UgbikducoXzqQHf2TozO0f2rshaeBNnShVbL5Ai4oW7pDymBmrfzdjGbF/milO7yxNKcoIByeoNmu384eBamgQ==} + /@storybook/docs-tools@7.6.17: + resolution: {integrity: sha512-bYrLoj06adqklyLkEwD32C0Ww6t+9ZVvrJHiVT42bIhTRpFiFPAetl1a9KPHtFLnfduh4n2IxIr1jv32ThPDTA==} dependencies: - '@storybook/core-common': 7.6.10 - '@storybook/preview-api': 7.6.10 - '@storybook/types': 7.6.10 + '@storybook/core-common': 7.6.17 + '@storybook/preview-api': 7.6.17 + '@storybook/types': 7.6.17 '@types/doctrine': 0.0.3 assert: 2.1.0 doctrine: 3.0.0 @@ -5265,33 +5739,21 @@ packages: resolution: {integrity: sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==} dev: true - /@storybook/instrumenter@7.6.10: - resolution: {integrity: sha512-9FYXW1CKXnZ7yYmy2A6U0seqJMe1F7g55J28Vslk3ZLoGATFJ2BR0eoQS+cgfBly6djehjaVeuV3IcUYGnQ/6Q==} + /@storybook/manager-api@7.6.17(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-IJIV1Yc6yw1dhCY4tReHCfBnUKDqEBnMyHp3mbXpsaHxnxJZrXO45WjRAZIKlQKhl/Ge1CrnznmHRCmYgqmrWg==} dependencies: - '@storybook/channels': 7.6.10 - '@storybook/client-logger': 7.6.10 - '@storybook/core-events': 7.6.10 - '@storybook/global': 5.0.0 - '@storybook/preview-api': 7.6.10 - '@vitest/utils': 0.34.7 - util: 0.12.5 - dev: true - - /@storybook/manager-api@7.6.10(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-8eGVpRlpunuFScDtc7nxpPJf/4kJBAAZlNdlhmX09j8M3voX6GpcxabBamSEX5pXZqhwxQCshD4IbqBmjvadlw==} - dependencies: - '@storybook/channels': 7.6.10 - '@storybook/client-logger': 7.6.10 - '@storybook/core-events': 7.6.10 + '@storybook/channels': 7.6.17 + '@storybook/client-logger': 7.6.17 + '@storybook/core-events': 7.6.17 '@storybook/csf': 0.1.2 '@storybook/global': 5.0.0 - '@storybook/router': 7.6.10 - '@storybook/theming': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/types': 7.6.10 + '@storybook/router': 7.6.17 + '@storybook/theming': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.6.17 dequal: 2.0.3 lodash: 4.17.21 memoizerific: 1.11.3 - store2: 2.14.2 + store2: 2.14.3 telejson: 7.2.0 ts-dedent: 2.2.0 transitivePeerDependencies: @@ -5299,31 +5761,31 @@ packages: - react-dom dev: true - /@storybook/manager@7.6.10: - resolution: {integrity: sha512-Co3sLCbNYY6O4iH2ggmRDLCPWLj03JE5s/DOG8OVoXc6vBwTc/Qgiyrsxxp6BHQnPpM0mxL6aKAxE3UjsW/Nog==} + /@storybook/manager@7.6.17: + resolution: {integrity: sha512-A1LDDIqMpwRzq/dqkbbiza0QI04o4ZHCl2a3UMDZUV/+QLc2nsr2DAaLk4CVL4/cIc5zGqmIcaOTvprx2YKVBw==} dev: true /@storybook/mdx2-csf@1.1.0: resolution: {integrity: sha512-TXJJd5RAKakWx4BtpwvSNdgTDkKM6RkXU8GK34S/LhidQ5Pjz3wcnqb0TxEkfhK/ztbP8nKHqXFwLfa2CYkvQw==} dev: true - /@storybook/node-logger@7.6.10: - resolution: {integrity: sha512-ZBuqrv4bjJzKXyfRGFkVIi+z6ekn6rOPoQao4KmsfLNQAUUsEdR8Baw/zMnnU417zw5dSEaZdpuwx75SCQAeOA==} + /@storybook/node-logger@7.6.17: + resolution: {integrity: sha512-w59MQuXhhUNrUVmVkXhMwIg2nvFWjdDczLTwYLorhfsE36CWeUOY5QCZWQy0Qf/h+jz8Uo7Evy64qn18v9C4wA==} dev: true - /@storybook/postinstall@7.6.10: - resolution: {integrity: sha512-SMdXtednPCy3+SRJ7oN1OPN1oVFhj3ih+ChOEX8/kZ5J3nfmV3wLPtsZvFGUCf0KWQEP1xL+1Urv48mzMKcV/w==} + /@storybook/postinstall@7.6.17: + resolution: {integrity: sha512-WaWqB8o9vUc9aaVls+povQSVirf1Xd1LZcVhUKfAocAF3mzYUsnJsVqvnbjRj/F96UFVihOyDt9Zjl/9OvrCvQ==} dev: true - /@storybook/preview-api@7.6.10: - resolution: {integrity: sha512-5A3etoIwZCx05yuv3KSTv1wynN4SR4rrzaIs/CTBp3BC4q1RBL+Or/tClk0IJPXQMlx/4Y134GtNIBbkiDofpw==} + /@storybook/preview-api@7.6.17: + resolution: {integrity: sha512-wLfDdI9RWo1f2zzFe54yRhg+2YWyxLZvqdZnSQ45mTs4/7xXV5Wfbv3QNTtcdw8tT3U5KRTrN1mTfTCiRJc0Kw==} dependencies: - '@storybook/channels': 7.6.10 - '@storybook/client-logger': 7.6.10 - '@storybook/core-events': 7.6.10 + '@storybook/channels': 7.6.17 + '@storybook/client-logger': 7.6.17 + '@storybook/core-events': 7.6.17 '@storybook/csf': 0.1.2 '@storybook/global': 5.0.0 - '@storybook/types': 7.6.10 + '@storybook/types': 7.6.17 '@types/qs': 6.9.11 dequal: 2.0.3 lodash: 4.17.21 @@ -5334,12 +5796,12 @@ packages: util-deprecate: 1.0.2 dev: true - /@storybook/preview@7.6.10: - resolution: {integrity: sha512-F07BzVXTD3byq+KTWtvsw3pUu3fQbyiBNLFr2CnfU4XSdLKja5lDt8VqDQq70TayVQOf5qfUTzRd4M6pQkjw1w==} + /@storybook/preview@7.6.17: + resolution: {integrity: sha512-LvkMYK/y6alGjwRVNDIKL1lFlbyZ0H0c8iAbcQkiMoaFiujMQyVswMDKlWcj42Upfr/B1igydiruomc+eUt0mw==} dev: true - /@storybook/react-dom-shim@7.6.10(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-M+N/h6ximacaFdIDjMN2waNoWwApeVYTpFeoDppiFTvdBTXChyIuiPgYX9QSg7gDz92OaA52myGOot4wGvXVzg==} + /@storybook/react-dom-shim@7.6.17(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-32Sa/G+WnvaPiQ1Wvjjw5UM9rr2c4GDohwCcWVv3/LJuiFPqNS6zglAtmnsrlIBnUwRBMLMh/ekCTdqMiUmfDw==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 @@ -5348,24 +5810,24 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: true - /@storybook/react-vite@7.6.10(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3)(vite@5.0.12): - resolution: {integrity: sha512-YE2+J1wy8nO+c6Nv/hBMu91Edew3K184L1KSnfoZV8vtq2074k1Me/8pfe0QNuq631AncpfCYNb37yBAXQ/80w==} + /@storybook/react-vite@7.6.17(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3)(vite@5.1.4): + resolution: {integrity: sha512-4dIm3CuRl44X1TLzN3WoZh/bChzJF7Ud28li9atj9C8db0bb/y0zl8cahrsRFoR7/LyfqdOVLqaztrnA5SsWfg==} engines: {node: '>=16'} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 vite: ^3.0.0 || ^4.0.0 || ^5.0.0 dependencies: - '@joshwooding/vite-plugin-react-docgen-typescript': 0.3.0(typescript@5.3.3)(vite@5.0.12) + '@joshwooding/vite-plugin-react-docgen-typescript': 0.3.0(typescript@5.3.3)(vite@5.1.4) '@rollup/pluginutils': 5.1.0 - '@storybook/builder-vite': 7.6.10(typescript@5.3.3)(vite@5.0.12) - '@storybook/react': 7.6.10(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3) - '@vitejs/plugin-react': 3.1.0(vite@5.0.12) - magic-string: 0.30.5 + '@storybook/builder-vite': 7.6.17(typescript@5.3.3)(vite@5.1.4) + '@storybook/react': 7.6.17(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3) + '@vitejs/plugin-react': 3.1.0(vite@5.1.4) + magic-string: 0.30.7 react: 18.2.0 react-docgen: 7.0.3 react-dom: 18.2.0(react@18.2.0) - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) transitivePeerDependencies: - '@preact/preset-vite' - encoding @@ -5375,8 +5837,8 @@ packages: - vite-plugin-glimmerx dev: true - /@storybook/react@7.6.10(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3): - resolution: {integrity: sha512-wwBn1cg2uZWW4peqqBjjU7XGmFq8HdkVUtWwh6dpfgmlY1Aopi+vPgZt7pY9KkWcTOq5+DerMdSfwxukpc3ajQ==} + /@storybook/react@7.6.17(react-dom@18.2.0)(react@18.2.0)(typescript@5.3.3): + resolution: {integrity: sha512-lVqzQSU03rRJWYW+gK2gq6mSo3/qtnVICY8B8oP7gc36jVu4ksDIu45bTfukM618ODkUZy0vZe6T4engK3azjA==} engines: {node: '>=16.0.0'} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 @@ -5386,16 +5848,16 @@ packages: typescript: optional: true dependencies: - '@storybook/client-logger': 7.6.10 - '@storybook/core-client': 7.6.10 - '@storybook/docs-tools': 7.6.10 + '@storybook/client-logger': 7.6.17 + '@storybook/core-client': 7.6.17 + '@storybook/docs-tools': 7.6.17 '@storybook/global': 5.0.0 - '@storybook/preview-api': 7.6.10 - '@storybook/react-dom-shim': 7.6.10(react-dom@18.2.0)(react@18.2.0) - '@storybook/types': 7.6.10 + '@storybook/preview-api': 7.6.17 + '@storybook/react-dom-shim': 7.6.17(react-dom@18.2.0)(react@18.2.0) + '@storybook/types': 7.6.17 '@types/escodegen': 0.0.6 '@types/estree': 0.0.51 - '@types/node': 18.19.8 + '@types/node': 18.19.18 acorn: 7.4.1 acorn-jsx: 5.3.2(acorn@7.4.1) acorn-walk: 7.2.0 @@ -5415,30 +5877,30 @@ packages: - supports-color dev: true - /@storybook/router@7.6.10: - resolution: {integrity: sha512-G/H4Jn2+y8PDe8Zbq4DVxF/TPn0/goSItdILts39JENucHiuGBCjKjSWGBe1rkwKi1tUbB3yhxJVrLagxFEPpQ==} + /@storybook/router@7.6.17: + resolution: {integrity: sha512-GnyC0j6Wi5hT4qRhSyT8NPtJfGmf82uZw97LQRWeyYu5gWEshUdM7aj40XlNiScd5cZDp0owO1idduVF2k2l2A==} dependencies: - '@storybook/client-logger': 7.6.10 + '@storybook/client-logger': 7.6.17 memoizerific: 1.11.3 qs: 6.11.2 dev: true - /@storybook/source-loader@7.6.10: - resolution: {integrity: sha512-S3nOWyj+sdpsqJqKGIN3DKE1q+Q0KYxEyPlPCawMFazozUH7tOodTIqmHBqJZCSNqdC4M1S/qcL8vpP4PfXhuA==} + /@storybook/source-loader@7.6.17: + resolution: {integrity: sha512-90v1es7dHmHgkGbflPlaRBYcn2+mqdC8OG4QtyYqOUq6xsLsyg+5CX2rupfHbuSLw9r0A3o1ViOII2J/kWtFow==} dependencies: '@storybook/csf': 0.1.2 - '@storybook/types': 7.6.10 + '@storybook/types': 7.6.17 estraverse: 5.3.0 lodash: 4.17.21 prettier: 2.8.8 dev: true - /@storybook/telemetry@7.6.10: - resolution: {integrity: sha512-p3mOSUtIyy2tF1z6pQXxNh1JzYFcAm97nUgkwLzF07GfEdVAPM+ftRSLFbD93zVvLEkmLTlsTiiKaDvOY/lQWg==} + /@storybook/telemetry@7.6.17: + resolution: {integrity: sha512-WOcOAmmengYnGInH98Px44F47DSpLyk20BM+Z/IIQDzfttGOLlxNqBBG1XTEhNRn+AYuk4aZ2JEed2lCjVIxcA==} dependencies: - '@storybook/client-logger': 7.6.10 - '@storybook/core-common': 7.6.10 - '@storybook/csf-tools': 7.6.10 + '@storybook/client-logger': 7.6.17 + '@storybook/core-common': 7.6.17 + '@storybook/csf-tools': 7.6.17 chalk: 4.1.2 detect-package-manager: 2.0.1 fetch-retry: 5.0.6 @@ -5449,53 +5911,31 @@ packages: - supports-color dev: true - /@storybook/test@7.6.10: - resolution: {integrity: sha512-dn/T+HcWOBlVh3c74BHurp++BaqBoQgNbSIaXlYDpJoZ+DzNIoEQVsWFYm5gCbtKK27iFd4n52RiQI3f6Vblqw==} - dependencies: - '@storybook/client-logger': 7.6.10 - '@storybook/core-events': 7.6.10 - '@storybook/instrumenter': 7.6.10 - '@storybook/preview-api': 7.6.10 - '@testing-library/dom': 9.3.4 - '@testing-library/jest-dom': 6.2.0 - '@testing-library/user-event': 14.3.0(@testing-library/dom@9.3.4) - '@types/chai': 4.3.11 - '@vitest/expect': 0.34.7 - '@vitest/spy': 0.34.7 - chai: 4.4.1 - util: 0.12.5 - transitivePeerDependencies: - - '@jest/globals' - - '@types/jest' - - jest - - vitest - dev: true - - /@storybook/theming@7.6.10(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-f5tuy7yV3TOP3fIboSqpgLHy0wKayAw/M8HxX0jVET4Z4fWlFK0BiHJabQ+XEdAfQM97XhPFHB2IPbwsqhCEcQ==} + /@storybook/theming@7.6.17(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-ZbaBt3KAbmBtfjNqgMY7wPMBshhSJlhodyMNQypv+95xLD/R+Az6aBYbpVAOygLaUQaQk4ar7H/Ww6lFIoiFbA==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: '@emotion/use-insertion-effect-with-fallbacks': 1.0.1(react@18.2.0) - '@storybook/client-logger': 7.6.10 + '@storybook/client-logger': 7.6.17 '@storybook/global': 5.0.0 memoizerific: 1.11.3 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: true - /@storybook/types@7.6.10: - resolution: {integrity: sha512-hcS2HloJblaMpCAj2axgGV+53kgSRYPT0a1PG1IHsZaYQILfHSMmBqM8XzXXYTsgf9250kz3dqFX1l0n3EqMlQ==} + /@storybook/types@7.6.17: + resolution: {integrity: sha512-GRY0xEJQ0PrL7DY2qCNUdIfUOE0Gsue6N+GBJw9ku1IUDFLJRDOF+4Dx2BvYcVCPI5XPqdWKlEyZdMdKjiQN7Q==} dependencies: - '@storybook/channels': 7.6.10 + '@storybook/channels': 7.6.17 '@types/babel__core': 7.20.5 '@types/express': 4.17.21 file-system-cache: 2.3.0 dev: true - /@swc/core-darwin-arm64@1.3.101: - resolution: {integrity: sha512-mNFK+uHNPRXSnfTOG34zJOeMl2waM4hF4a2NY7dkMXrPqw9CoJn4MwTXJcyMiSz1/BnNjjTCHF3Yhj0jPxmkzQ==} + /@swc/core-darwin-arm64@1.4.2: + resolution: {integrity: sha512-1uSdAn1MRK5C1m/TvLZ2RDvr0zLvochgrZ2xL+lRzugLlCTlSA+Q4TWtrZaOz+vnnFVliCpw7c7qu0JouhgQIw==} engines: {node: '>=10'} cpu: [arm64] os: [darwin] @@ -5503,8 +5943,8 @@ packages: dev: true optional: true - /@swc/core-darwin-x64@1.3.101: - resolution: {integrity: sha512-B085j8XOx73Fg15KsHvzYWG262bRweGr3JooO1aW5ec5pYbz5Ew9VS5JKYS03w2UBSxf2maWdbPz2UFAxg0whw==} + /@swc/core-darwin-x64@1.4.2: + resolution: {integrity: sha512-TYD28+dCQKeuxxcy7gLJUCFLqrwDZnHtC2z7cdeGfZpbI2mbfppfTf2wUPzqZk3gEC96zHd4Yr37V3Tvzar+lQ==} engines: {node: '>=10'} cpu: [x64] os: [darwin] @@ -5512,8 +5952,8 @@ packages: dev: true optional: true - /@swc/core-linux-arm-gnueabihf@1.3.101: - resolution: {integrity: sha512-9xLKRb6zSzRGPqdz52Hy5GuB1lSjmLqa0lST6MTFads3apmx4Vgs8Y5NuGhx/h2I8QM4jXdLbpqQlifpzTlSSw==} + /@swc/core-linux-arm-gnueabihf@1.4.2: + resolution: {integrity: sha512-Eyqipf7ZPGj0vplKHo8JUOoU1un2sg5PjJMpEesX0k+6HKE2T8pdyeyXODN0YTFqzndSa/J43EEPXm+rHAsLFQ==} engines: {node: '>=10'} cpu: [arm] os: [linux] @@ -5521,8 +5961,8 @@ packages: dev: true optional: true - /@swc/core-linux-arm64-gnu@1.3.101: - resolution: {integrity: sha512-oE+r1lo7g/vs96Weh2R5l971dt+ZLuhaUX+n3BfDdPxNHfObXgKMjO7E+QS5RbGjv/AwiPCxQmbdCp/xN5ICJA==} + /@swc/core-linux-arm64-gnu@1.4.2: + resolution: {integrity: sha512-wZn02DH8VYPv3FC0ub4my52Rttsus/rFw+UUfzdb3tHMHXB66LqN+rR0ssIOZrH6K+VLN6qpTw9VizjyoH0BxA==} engines: {node: '>=10'} cpu: [arm64] os: [linux] @@ -5530,8 +5970,8 @@ packages: dev: true optional: true - /@swc/core-linux-arm64-musl@1.3.101: - resolution: {integrity: sha512-OGjYG3H4BMOTnJWJyBIovCez6KiHF30zMIu4+lGJTCrxRI2fAjGLml3PEXj8tC3FMcud7U2WUn6TdG0/te2k6g==} + /@swc/core-linux-arm64-musl@1.4.2: + resolution: {integrity: sha512-3G0D5z9hUj9bXNcwmA1eGiFTwe5rWkuL3DsoviTj73TKLpk7u64ND0XjEfO0huVv4vVu9H1jodrKb7nvln/dlw==} engines: {node: '>=10'} cpu: [arm64] os: [linux] @@ -5539,8 +5979,8 @@ packages: dev: true optional: true - /@swc/core-linux-x64-gnu@1.3.101: - resolution: {integrity: sha512-/kBMcoF12PRO/lwa8Z7w4YyiKDcXQEiLvM+S3G9EvkoKYGgkkz4Q6PSNhF5rwg/E3+Hq5/9D2R+6nrkF287ihg==} + /@swc/core-linux-x64-gnu@1.4.2: + resolution: {integrity: sha512-LFxn9U8cjmYHw3jrdPNqPAkBGglKE3tCZ8rA7hYyp0BFxuo7L2ZcEnPm4RFpmSCCsExFH+LEJWuMGgWERoktvg==} engines: {node: '>=10'} cpu: [x64] os: [linux] @@ -5548,8 +5988,8 @@ packages: dev: true optional: true - /@swc/core-linux-x64-musl@1.3.101: - resolution: {integrity: sha512-kDN8lm4Eew0u1p+h1l3JzoeGgZPQ05qDE0czngnjmfpsH2sOZxVj1hdiCwS5lArpy7ktaLu5JdRnx70MkUzhXw==} + /@swc/core-linux-x64-musl@1.4.2: + resolution: {integrity: sha512-dp0fAmreeVVYTUcb4u9njTPrYzKnbIH0EhH2qvC9GOYNNREUu2GezSIDgonjOXkHiTCvopG4xU7y56XtXj4VrQ==} engines: {node: '>=10'} cpu: [x64] os: [linux] @@ -5557,8 +5997,8 @@ packages: dev: true optional: true - /@swc/core-win32-arm64-msvc@1.3.101: - resolution: {integrity: sha512-9Wn8TTLWwJKw63K/S+jjrZb9yoJfJwCE2RV5vPCCWmlMf3U1AXj5XuWOLUX+Rp2sGKau7wZKsvywhheWm+qndQ==} + /@swc/core-win32-arm64-msvc@1.4.2: + resolution: {integrity: sha512-HlVIiLMQkzthAdqMslQhDkoXJ5+AOLUSTV6fm6shFKZKqc/9cJvr4S8UveNERL9zUficA36yM3bbfo36McwnvQ==} engines: {node: '>=10'} cpu: [arm64] os: [win32] @@ -5566,8 +6006,8 @@ packages: dev: true optional: true - /@swc/core-win32-ia32-msvc@1.3.101: - resolution: {integrity: sha512-onO5KvICRVlu2xmr4//V2je9O2XgS1SGKpbX206KmmjcJhXN5EYLSxW9qgg+kgV5mip+sKTHTAu7IkzkAtElYA==} + /@swc/core-win32-ia32-msvc@1.4.2: + resolution: {integrity: sha512-WCF8faPGjCl4oIgugkp+kL9nl3nUATlzKXCEGFowMEmVVCFM0GsqlmGdPp1pjZoWc9tpYanoXQDnp5IvlDSLhA==} engines: {node: '>=10'} cpu: [ia32] os: [win32] @@ -5575,8 +6015,8 @@ packages: dev: true optional: true - /@swc/core-win32-x64-msvc@1.3.101: - resolution: {integrity: sha512-T3GeJtNQV00YmiVw/88/nxJ/H43CJvFnpvBHCVn17xbahiVUOPOduh3rc9LgAkKiNt/aV8vU3OJR+6PhfMR7UQ==} + /@swc/core-win32-x64-msvc@1.4.2: + resolution: {integrity: sha512-oV71rwiSpA5xre2C5570BhCsg1HF97SNLsZ/12xv7zayGzqr3yvFALFJN8tHKpqUdCB4FGPjoP3JFdV3i+1wUw==} engines: {node: '>=10'} cpu: [x64] os: [win32] @@ -5584,8 +6024,8 @@ packages: dev: true optional: true - /@swc/core@1.3.101: - resolution: {integrity: sha512-w5aQ9qYsd/IYmXADAnkXPGDMTqkQalIi+kfFf/MHRKTpaOL7DHjMXwPp/n8hJ0qNjRvchzmPtOqtPBiER50d8A==} + /@swc/core@1.4.2: + resolution: {integrity: sha512-vWgY07R/eqj1/a0vsRKLI9o9klGZfpLNOVEnrv4nrccxBgYPjcf22IWwAoaBJ+wpA7Q4fVjCUM8lP0m01dpxcg==} engines: {node: '>=10'} requiresBuild: true peerDependencies: @@ -5594,27 +6034,27 @@ packages: '@swc/helpers': optional: true dependencies: - '@swc/counter': 0.1.2 + '@swc/counter': 0.1.3 '@swc/types': 0.1.5 optionalDependencies: - '@swc/core-darwin-arm64': 1.3.101 - '@swc/core-darwin-x64': 1.3.101 - '@swc/core-linux-arm-gnueabihf': 1.3.101 - '@swc/core-linux-arm64-gnu': 1.3.101 - '@swc/core-linux-arm64-musl': 1.3.101 - '@swc/core-linux-x64-gnu': 1.3.101 - '@swc/core-linux-x64-musl': 1.3.101 - '@swc/core-win32-arm64-msvc': 1.3.101 - '@swc/core-win32-ia32-msvc': 1.3.101 - '@swc/core-win32-x64-msvc': 1.3.101 + '@swc/core-darwin-arm64': 1.4.2 + '@swc/core-darwin-x64': 1.4.2 + '@swc/core-linux-arm-gnueabihf': 1.4.2 + '@swc/core-linux-arm64-gnu': 1.4.2 + '@swc/core-linux-arm64-musl': 1.4.2 + '@swc/core-linux-x64-gnu': 1.4.2 + '@swc/core-linux-x64-musl': 1.4.2 + '@swc/core-win32-arm64-msvc': 1.4.2 + '@swc/core-win32-ia32-msvc': 1.4.2 + '@swc/core-win32-x64-msvc': 1.4.2 dev: true - /@swc/counter@0.1.2: - resolution: {integrity: sha512-9F4ys4C74eSTEUNndnER3VJ15oru2NumfQxS8geE+f3eB5xvfxpWyqE5XlVnxb/R14uoXi6SLbBwwiDSkv+XEw==} + /@swc/counter@0.1.3: + resolution: {integrity: sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==} dev: true - /@swc/helpers@0.5.3: - resolution: {integrity: sha512-FaruWX6KdudYloq1AHD/4nU+UsMTdNE8CKyrseXWEcgjDAbvkwJg2QGPAnfIJLIWsjZOSPLOAykK6fuYp4vp4A==} + /@swc/helpers@0.5.6: + resolution: {integrity: sha512-aYX01Ke9hunpoCexYAgQucEpARGQ5w/cqHFrIR+e9gdKb1QWTsVJuTJ2ozQzIAxLyRQe/m+2RqzkyOOGiMKRQA==} dependencies: tslib: 2.6.2 dev: false @@ -5623,70 +6063,15 @@ packages: resolution: {integrity: sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==} dev: true - /@testing-library/dom@9.3.4: - resolution: {integrity: sha512-FlS4ZWlp97iiNWig0Muq8p+3rVDjRiYE+YKGbAqXOu9nwJFFOdL00kFpz42M+4huzYi86vAK1sOOfyOG45muIQ==} - engines: {node: '>=14'} - dependencies: - '@babel/code-frame': 7.23.5 - '@babel/runtime': 7.23.8 - '@types/aria-query': 5.0.4 - aria-query: 5.1.3 - chalk: 4.1.2 - dom-accessibility-api: 0.5.16 - lz-string: 1.5.0 - pretty-format: 27.5.1 - dev: true - - /@testing-library/jest-dom@6.2.0: - resolution: {integrity: sha512-+BVQlJ9cmEn5RDMUS8c2+TU6giLvzaHZ8sU/x0Jj7fk+6/46wPdwlgOPcpxS17CjcanBi/3VmGMqVr2rmbUmNw==} - engines: {node: '>=14', npm: '>=6', yarn: '>=1'} - peerDependencies: - '@jest/globals': '>= 28' - '@types/jest': '>= 28' - jest: '>= 28' - vitest: '>= 0.32' - peerDependenciesMeta: - '@jest/globals': - optional: true - '@types/jest': - optional: true - jest: - optional: true - vitest: - optional: true - dependencies: - '@adobe/css-tools': 4.3.2 - '@babel/runtime': 7.23.8 - aria-query: 5.3.0 - chalk: 3.0.0 - css.escape: 1.5.1 - dom-accessibility-api: 0.6.3 - lodash: 4.17.21 - redent: 3.0.0 - dev: true - - /@testing-library/user-event@14.3.0(@testing-library/dom@9.3.4): - resolution: {integrity: sha512-P02xtBBa8yMaLhK8CzJCIns8rqwnF6FxhR9zs810flHOBXUYCFjLd8Io1rQrAkQRWEmW2PGdZIEdMxf/KLsqFA==} - engines: {node: '>=12', npm: '>=6'} - peerDependencies: - '@testing-library/dom': '>=7.21.4' - dependencies: - '@testing-library/dom': 9.3.4 - dev: true - /@types/argparse@1.0.38: resolution: {integrity: sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==} dev: true - /@types/aria-query@5.0.4: - resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} - dev: true - /@types/babel__core@7.20.5: resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} dependencies: - '@babel/parser': 7.23.6 - '@babel/types': 7.23.6 + '@babel/parser': 7.23.9 + '@babel/types': 7.23.9 '@types/babel__generator': 7.6.8 '@types/babel__template': 7.4.4 '@types/babel__traverse': 7.20.5 @@ -5695,43 +6080,39 @@ packages: /@types/babel__generator@7.6.8: resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@types/babel__template@7.4.4: resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} dependencies: - '@babel/parser': 7.23.6 - '@babel/types': 7.23.6 + '@babel/parser': 7.23.9 + '@babel/types': 7.23.9 dev: true /@types/babel__traverse@7.20.5: resolution: {integrity: sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ==} dependencies: - '@babel/types': 7.23.6 + '@babel/types': 7.23.9 dev: true /@types/body-parser@1.19.5: resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==} dependencies: '@types/connect': 3.4.38 - '@types/node': 20.11.5 - dev: true - - /@types/chai@4.3.11: - resolution: {integrity: sha512-qQR1dr2rGIHYlJulmr8Ioq3De0Le9E4MJ5AiaeAETJJpndT1uUNHsGFK3L/UIu+rbkQSdj8J/w2bCsBZc/Y5fQ==} + '@types/node': 20.11.20 dev: true /@types/connect@3.4.38: resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /@types/cross-spawn@6.0.6: resolution: {integrity: sha512-fXRhhUkG4H3TQk5dBhQ7m/JDdSNHKwR2BBia62lhwEIq9xGiQKLxd6LymNhn47SjXhsUEPmxi+PKw2OkW4LLjA==} dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /@types/d3-array@3.2.1: @@ -5762,7 +6143,7 @@ packages: resolution: {integrity: sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==} dependencies: '@types/d3-array': 3.2.1 - '@types/geojson': 7946.0.13 + '@types/geojson': 7946.0.14 dev: false /@types/d3-delaunay@6.0.4: @@ -5804,7 +6185,7 @@ packages: /@types/d3-geo@3.1.0: resolution: {integrity: sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==} dependencies: - '@types/geojson': 7946.0.13 + '@types/geojson': 7946.0.14 dev: false /@types/d3-hierarchy@3.1.6: @@ -5817,8 +6198,8 @@ packages: '@types/d3-color': 3.1.3 dev: false - /@types/d3-path@3.0.2: - resolution: {integrity: sha512-WAIEVlOCdd/NKRYTsqCpOMHQHemKBEINf8YXMYOtXH0GA7SY0dqMB78P3Uhgfy+4X+/Mlw2wDtlETkN6kQUCMA==} + /@types/d3-path@3.1.0: + resolution: {integrity: sha512-P2dlU/q51fkOc/Gfl3Ul9kicV7l+ra934qBFXCFhrZMOL6du1TM0pm1ThYvENukyOn5h9v+yMJ9Fn5JK4QozrQ==} dev: false /@types/d3-polygon@3.0.2: @@ -5850,7 +6231,7 @@ packages: /@types/d3-shape@3.1.6: resolution: {integrity: sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA==} dependencies: - '@types/d3-path': 3.0.2 + '@types/d3-path': 3.1.0 dev: false /@types/d3-time-format@4.0.3: @@ -5898,7 +6279,7 @@ packages: '@types/d3-geo': 3.1.0 '@types/d3-hierarchy': 3.1.6 '@types/d3-interpolate': 3.0.4 - '@types/d3-path': 3.0.2 + '@types/d3-path': 3.1.0 '@types/d3-polygon': 3.0.2 '@types/d3-quadtree': 3.0.6 '@types/d3-random': 3.0.3 @@ -5945,8 +6326,8 @@ packages: resolution: {integrity: sha512-AjwI4MvWx3HAOaZqYsjKWyEObT9lcVV0Y0V8nXo6cXzN8ZiMxVhf6F3d/UNvXVGKrEzL/Dluc5p+y9GkzlTWig==} dev: true - /@types/eslint@8.56.0: - resolution: {integrity: sha512-FlsN0p4FhuYRjIxpbdXovvHQhtlG05O1GG/RNWvdAxTboR438IOTwmrY/vLA+Xfgg06BTkP045M3vpFwTMv1dg==} + /@types/eslint@8.56.3: + resolution: {integrity: sha512-PvSf1wfv2wJpVIFUMSb+i4PvqNYkB9Rkp9ZDO3oaWzq4SKhsQk4mrMBr3ZH06I0hKrVGLBacmgl8JM4WVjb9dg==} dependencies: '@types/estree': 1.0.5 '@types/json-schema': 7.0.15 @@ -5960,10 +6341,10 @@ packages: resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} dev: true - /@types/express-serve-static-core@4.17.41: - resolution: {integrity: sha512-OaJ7XLaelTgrvlZD8/aa0vvvxZdUmlCn6MtWeB7TkiKW70BQLc9XEPpDLPdbo52ZhXUCrznlWdCHWxJWtdyajA==} + /@types/express-serve-static-core@4.17.43: + resolution: {integrity: sha512-oaYtiBirUOPQGSWNGPWnzyAFJ0BP3cwvN4oWZQY+zUBwpVIGsKUkpBpSztp74drYcjavs7SKFZ4DX1V2QeN8rg==} dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 '@types/qs': 6.9.11 '@types/range-parser': 1.2.7 '@types/send': 0.17.4 @@ -5973,7 +6354,7 @@ packages: resolution: {integrity: sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==} dependencies: '@types/body-parser': 1.19.5 - '@types/express-serve-static-core': 4.17.41 + '@types/express-serve-static-core': 4.17.43 '@types/qs': 6.9.11 '@types/serve-static': 1.15.5 dev: true @@ -5982,21 +6363,21 @@ packages: resolution: {integrity: sha512-frsJrz2t/CeGifcu/6uRo4b+SzAwT4NYCVPu1GN8IB9XTzrpPkGuV0tmh9mN+/L0PklAlsC3u5Fxt0ju00LXIw==} dev: true - /@types/geojson@7946.0.13: - resolution: {integrity: sha512-bmrNrgKMOhM3WsafmbGmC+6dsF2Z308vLFsQ3a/bT8X8Sv5clVYpPars/UPq+sAaJP+5OoLAYgwbkS5QEJdLUQ==} + /@types/geojson@7946.0.14: + resolution: {integrity: sha512-WCfD5Ht3ZesJUsONdhvm84dmzWOiOzOAqOncN0++w0lBw1o8OuDNJF2McvvCef/yBqb/HYRahp1BYtODFQ8bRg==} dev: false /@types/glob@7.2.0: resolution: {integrity: sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA==} dependencies: '@types/minimatch': 5.1.2 - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /@types/graceful-fs@4.1.9: resolution: {integrity: sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==} dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /@types/http-errors@2.0.4: @@ -6046,8 +6427,8 @@ packages: /@types/lodash@4.14.202: resolution: {integrity: sha512-OvlIYQK9tNneDlS0VN54LLd5uiPCBOp7gS5Z0f1mjoJYBrtStzgmJBxONW3U6OZqdtNzZPmn9BS/7WI7BFFcFQ==} - /@types/mdx@2.0.10: - resolution: {integrity: sha512-Rllzc5KHk0Al5/WANwgSPl1/CwjqCy+AZrGd78zuK+jO9aDM6ffblZ+zIjgPNAaEBmlO0RYDvLNh7wD0zKVgEg==} + /@types/mdx@2.0.11: + resolution: {integrity: sha512-HM5bwOaIQJIQbAYfax35HCKxx7a3KrK3nBtIqJgSOitivTD1y3oW9P3rxY9RkXYPUk7y/AjAohfHKmFpGE79zw==} dev: true /@types/mime-types@2.1.4: @@ -6069,18 +6450,18 @@ packages: /@types/node-fetch@2.6.11: resolution: {integrity: sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==} dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 form-data: 4.0.0 dev: true - /@types/node@18.19.8: - resolution: {integrity: sha512-g1pZtPhsvGVTwmeVoexWZLTQaOvXwoSq//pTL0DHeNzUDrFnir4fgETdhjhIxjVnN+hKOuh98+E1eMLnUXstFg==} + /@types/node@18.19.18: + resolution: {integrity: sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==} dependencies: undici-types: 5.26.5 dev: true - /@types/node@20.11.5: - resolution: {integrity: sha512-g557vgQjUUfN76MZAN/dt1z3dzcUsimuysco0KeluHgrPdJXkP/XdAURgyO2W9fZWHRtRBiVKzKn8vyOAwlG+w==} + /@types/node@20.11.20: + resolution: {integrity: sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==} dependencies: undici-types: 5.26.5 dev: true @@ -6093,6 +6474,10 @@ packages: resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} dev: false + /@types/picomatch@2.3.3: + resolution: {integrity: sha512-Yll76ZHikRFCyz/pffKGjrCwe/le2CDwOP5F210KQo27kpRE46U2rDnzikNlVn6/ezH3Mhn46bJMTfeVTtcYMg==} + dev: true + /@types/pretty-hrtime@1.0.3: resolution: {integrity: sha512-nj39q0wAIdhwn7DGUyT9irmsKK1tV0bd5WFEhgpqNTMFZ8cE+jieuTphCW0tfdm47S2zVT5mr09B28b1chmQMA==} dev: true @@ -6108,26 +6493,34 @@ packages: resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} dev: true - /@types/react-dom@18.2.18: - resolution: {integrity: sha512-TJxDm6OfAX2KJWJdMEVTwWke5Sc/E/RlnPGvGfS0W7+6ocy2xhDVQVh/KvC2Uf7kACs+gDytdusDSdWfWkaNzw==} + /@types/react-dom@18.2.19: + resolution: {integrity: sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==} dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 dev: true /@types/react-reconciler@0.28.8: resolution: {integrity: sha512-SN9c4kxXZonFhbX4hJrZy37yw9e7EIxcpHCxQv5JUS18wDE5ovkQKlqQEkufdJCCMfuI9BnjUJvhYeJ9x5Ra7g==} dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 dev: false /@types/react-transition-group@4.4.10: resolution: {integrity: sha512-hT/+s0VQs2ojCX823m60m5f0sL5idt9SO6Tj6Dg+rdphGPIeJbJ6CxvBYkgkGKrYeDjvIpKTR38UzmtHJOGW3Q==} dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 dev: false - /@types/react@18.2.48: - resolution: {integrity: sha512-qboRCl6Ie70DQQG9hhNREz81jqC1cs9EVNcjQ1AU+jH6NFfSAhVVbrrY/+nSF+Bsk4AOwm9Qa61InvMCyV+H3w==} + /@types/react@18.2.57: + resolution: {integrity: sha512-ZvQsktJgSYrQiMirAN60y4O/LRevIV8hUzSOSNB6gfR3/o3wCBFQx3sPwIYtuDMeiVgsSS3UzCV26tEzgnfvQw==} + dependencies: + '@types/prop-types': 15.7.11 + '@types/scheduler': 0.16.8 + csstype: 3.1.3 + dev: false + + /@types/react@18.2.59: + resolution: {integrity: sha512-DE+F6BYEC8VtajY85Qr7mmhTd/79rJKIHCg99MU9SWPB4xvLb6D1za2vYflgZfmPqQVEr6UqJTnLXEwzpVPuOg==} dependencies: '@types/prop-types': 15.7.11 '@types/scheduler': 0.16.8 @@ -6140,15 +6533,15 @@ packages: /@types/scheduler@0.16.8: resolution: {integrity: sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==} - /@types/semver@7.5.6: - resolution: {integrity: sha512-dn1l8LaMea/IjDoHNd9J52uBbInB796CDffS6VdIxvqYCPSG0V0DzHp76GpaWnlhg88uYyPbXCDIowa86ybd5A==} + /@types/semver@7.5.8: + resolution: {integrity: sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==} dev: true /@types/send@0.17.4: resolution: {integrity: sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==} dependencies: '@types/mime': 1.3.5 - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /@types/serve-static@1.15.5: @@ -6156,7 +6549,7 @@ packages: dependencies: '@types/http-errors': 2.0.4 '@types/mime': 3.0.4 - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /@types/unist@2.0.10: @@ -6167,8 +6560,8 @@ packages: resolution: {integrity: sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==} dev: false - /@types/uuid@9.0.7: - resolution: {integrity: sha512-WUtIVRUZ9i5dYXefDEAI7sh9/O7jGvHg7Df/5O/gtH3Yabe5odI3UWopVR1qbPXQtvOxWu3mM4XxlYeZtMWF4g==} + /@types/uuid@9.0.8: + resolution: {integrity: sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==} dev: true /@types/yargs-parser@21.0.3: @@ -6187,51 +6580,51 @@ packages: '@types/yargs-parser': 21.0.3 dev: true - /@typescript-eslint/eslint-plugin@6.19.0(@typescript-eslint/parser@6.19.0)(eslint@8.56.0)(typescript@5.3.3): - resolution: {integrity: sha512-DUCUkQNklCQYnrBSSikjVChdc84/vMPDQSgJTHBZ64G9bA9w0Crc0rd2diujKbTdp6w2J47qkeHQLoi0rpLCdg==} + /@typescript-eslint/eslint-plugin@7.1.0(@typescript-eslint/parser@7.1.0)(eslint@8.57.0)(typescript@5.3.3): + resolution: {integrity: sha512-j6vT/kCulhG5wBmGtstKeiVr1rdXE4nk+DT1k6trYkwlrvW9eOF5ZbgKnd/YR6PcM4uTEXa0h6Fcvf6X7Dxl0w==} engines: {node: ^16.0.0 || >=18.0.0} peerDependencies: - '@typescript-eslint/parser': ^6.0.0 || ^6.0.0-alpha - eslint: ^7.0.0 || ^8.0.0 + '@typescript-eslint/parser': ^7.0.0 + eslint: ^8.56.0 typescript: '*' peerDependenciesMeta: typescript: optional: true dependencies: '@eslint-community/regexpp': 4.10.0 - '@typescript-eslint/parser': 6.19.0(eslint@8.56.0)(typescript@5.3.3) - '@typescript-eslint/scope-manager': 6.19.0 - '@typescript-eslint/type-utils': 6.19.0(eslint@8.56.0)(typescript@5.3.3) - '@typescript-eslint/utils': 6.19.0(eslint@8.56.0)(typescript@5.3.3) - '@typescript-eslint/visitor-keys': 6.19.0 + '@typescript-eslint/parser': 7.1.0(eslint@8.57.0)(typescript@5.3.3) + '@typescript-eslint/scope-manager': 7.1.0 + '@typescript-eslint/type-utils': 7.1.0(eslint@8.57.0)(typescript@5.3.3) + '@typescript-eslint/utils': 7.1.0(eslint@8.57.0)(typescript@5.3.3) + '@typescript-eslint/visitor-keys': 7.1.0 debug: 4.3.4 - eslint: 8.56.0 + eslint: 8.57.0 graphemer: 1.4.0 - ignore: 5.3.0 + ignore: 5.3.1 natural-compare: 1.4.0 - semver: 7.5.4 - ts-api-utils: 1.0.3(typescript@5.3.3) + semver: 7.6.0 + ts-api-utils: 1.2.1(typescript@5.3.3) typescript: 5.3.3 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/parser@6.19.0(eslint@8.56.0)(typescript@5.3.3): - resolution: {integrity: sha512-1DyBLG5SH7PYCd00QlroiW60YJ4rWMuUGa/JBV0iZuqi4l4IK3twKPq5ZkEebmGqRjXWVgsUzfd3+nZveewgow==} + /@typescript-eslint/parser@7.1.0(eslint@8.57.0)(typescript@5.3.3): + resolution: {integrity: sha512-V1EknKUubZ1gWFjiOZhDSNToOjs63/9O0puCgGS8aDOgpZY326fzFu15QAUjwaXzRZjf/qdsdBrckYdv9YxB8w==} engines: {node: ^16.0.0 || >=18.0.0} peerDependencies: - eslint: ^7.0.0 || ^8.0.0 + eslint: ^8.56.0 typescript: '*' peerDependenciesMeta: typescript: optional: true dependencies: - '@typescript-eslint/scope-manager': 6.19.0 - '@typescript-eslint/types': 6.19.0 - '@typescript-eslint/typescript-estree': 6.19.0(typescript@5.3.3) - '@typescript-eslint/visitor-keys': 6.19.0 + '@typescript-eslint/scope-manager': 7.1.0 + '@typescript-eslint/types': 7.1.0 + '@typescript-eslint/typescript-estree': 7.1.0(typescript@5.3.3) + '@typescript-eslint/visitor-keys': 7.1.0 debug: 4.3.4 - eslint: 8.56.0 + eslint: 8.57.0 typescript: 5.3.3 transitivePeerDependencies: - supports-color @@ -6245,91 +6638,44 @@ packages: '@typescript-eslint/visitor-keys': 5.62.0 dev: true - /@typescript-eslint/scope-manager@6.19.0: - resolution: {integrity: sha512-dO1XMhV2ehBI6QN8Ufi7I10wmUovmLU0Oru3n5LVlM2JuzB4M+dVphCPLkVpKvGij2j/pHBWuJ9piuXx+BhzxQ==} + /@typescript-eslint/scope-manager@7.1.0: + resolution: {integrity: sha512-6TmN4OJiohHfoOdGZ3huuLhpiUgOGTpgXNUPJgeZOZR3DnIpdSgtt83RS35OYNNXxM4TScVlpVKC9jyQSETR1A==} engines: {node: ^16.0.0 || >=18.0.0} dependencies: - '@typescript-eslint/types': 6.19.0 - '@typescript-eslint/visitor-keys': 6.19.0 + '@typescript-eslint/types': 7.1.0 + '@typescript-eslint/visitor-keys': 7.1.0 dev: true - /@typescript-eslint/type-utils@6.19.0(eslint@8.56.0)(typescript@5.3.3): - resolution: {integrity: sha512-mcvS6WSWbjiSxKCwBcXtOM5pRkPQ6kcDds/juxcy/727IQr3xMEcwr/YLHW2A2+Fp5ql6khjbKBzOyjuPqGi/w==} + /@typescript-eslint/type-utils@7.1.0(eslint@8.57.0)(typescript@5.3.3): + resolution: {integrity: sha512-UZIhv8G+5b5skkcuhgvxYWHjk7FW7/JP5lPASMEUoliAPwIH/rxoUSQPia2cuOj9AmDZmwUl1usKm85t5VUMew==} engines: {node: ^16.0.0 || >=18.0.0} peerDependencies: - eslint: ^7.0.0 || ^8.0.0 + eslint: ^8.56.0 typescript: '*' peerDependenciesMeta: typescript: optional: true dependencies: - '@typescript-eslint/typescript-estree': 6.19.0(typescript@5.3.3) - '@typescript-eslint/utils': 6.19.0(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/typescript-estree': 7.1.0(typescript@5.3.3) + '@typescript-eslint/utils': 7.1.0(eslint@8.57.0)(typescript@5.3.3) debug: 4.3.4 - eslint: 8.56.0 - ts-api-utils: 1.0.3(typescript@5.3.3) + eslint: 8.57.0 + ts-api-utils: 1.2.1(typescript@5.3.3) typescript: 5.3.3 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/types@4.33.0: - resolution: {integrity: sha512-zKp7CjQzLQImXEpLt2BUw1tvOMPfNoTAfb8l51evhYbOEEzdWyQNmHWWGPR6hwKJDAi+1VXSBmnhL9kyVTTOuQ==} - engines: {node: ^8.10.0 || ^10.13.0 || >=11.10.1} - dev: true - /@typescript-eslint/types@5.62.0: resolution: {integrity: sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true - /@typescript-eslint/types@6.19.0: - resolution: {integrity: sha512-lFviGV/vYhOy3m8BJ/nAKoAyNhInTdXpftonhWle66XHAtT1ouBlkjL496b5H5hb8dWXHwtypTqgtb/DEa+j5A==} + /@typescript-eslint/types@7.1.0: + resolution: {integrity: sha512-qTWjWieJ1tRJkxgZYXx6WUYtWlBc48YRxgY2JN1aGeVpkhmnopq+SUC8UEVGNXIvWH7XyuTjwALfG6bFEgCkQA==} engines: {node: ^16.0.0 || >=18.0.0} dev: true - /@typescript-eslint/typescript-estree@4.33.0(typescript@3.9.10): - resolution: {integrity: sha512-rkWRY1MPFzjwnEVHsxGemDzqqddw2QbTJlICPD9p9I9LfsO8fdmfQPOX3uKfUaGRDFJbfrtm/sXhVXN4E+bzCA==} - engines: {node: ^10.12.0 || >=12.0.0} - peerDependencies: - typescript: '*' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - '@typescript-eslint/types': 4.33.0 - '@typescript-eslint/visitor-keys': 4.33.0 - debug: 4.3.4 - globby: 11.1.0 - is-glob: 4.0.3 - semver: 7.5.4 - tsutils: 3.21.0(typescript@3.9.10) - typescript: 3.9.10 - transitivePeerDependencies: - - supports-color - dev: true - - /@typescript-eslint/typescript-estree@5.62.0(typescript@4.9.5): - resolution: {integrity: sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==} - engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} - peerDependencies: - typescript: '*' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - '@typescript-eslint/types': 5.62.0 - '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.3.4 - globby: 11.1.0 - is-glob: 4.0.3 - semver: 7.5.4 - tsutils: 3.21.0(typescript@4.9.5) - typescript: 4.9.5 - transitivePeerDependencies: - - supports-color - dev: true - /@typescript-eslint/typescript-estree@5.62.0(typescript@5.3.3): resolution: {integrity: sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -6344,15 +6690,15 @@ packages: debug: 4.3.4 globby: 11.1.0 is-glob: 4.0.3 - semver: 7.5.4 + semver: 7.6.0 tsutils: 3.21.0(typescript@5.3.3) typescript: 5.3.3 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/typescript-estree@6.19.0(typescript@5.3.3): - resolution: {integrity: sha512-o/zefXIbbLBZ8YJ51NlkSAt2BamrK6XOmuxSR3hynMIzzyMY33KuJ9vuMdFSXW+H0tVvdF9qBPTHA91HDb4BIQ==} + /@typescript-eslint/typescript-estree@7.1.0(typescript@5.3.3): + resolution: {integrity: sha512-k7MyrbD6E463CBbSpcOnwa8oXRdHzH1WiVzOipK3L5KSML92ZKgUBrTlehdi7PEIMT8k0bQixHUGXggPAlKnOQ==} engines: {node: ^16.0.0 || >=18.0.0} peerDependencies: typescript: '*' @@ -6360,66 +6706,58 @@ packages: typescript: optional: true dependencies: - '@typescript-eslint/types': 6.19.0 - '@typescript-eslint/visitor-keys': 6.19.0 + '@typescript-eslint/types': 7.1.0 + '@typescript-eslint/visitor-keys': 7.1.0 debug: 4.3.4 globby: 11.1.0 is-glob: 4.0.3 minimatch: 9.0.3 - semver: 7.5.4 - ts-api-utils: 1.0.3(typescript@5.3.3) + semver: 7.6.0 + ts-api-utils: 1.2.1(typescript@5.3.3) typescript: 5.3.3 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/utils@5.62.0(eslint@8.56.0)(typescript@5.3.3): + /@typescript-eslint/utils@5.62.0(eslint@8.57.0)(typescript@5.3.3): resolution: {integrity: sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 dependencies: - '@eslint-community/eslint-utils': 4.4.0(eslint@8.56.0) + '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0) '@types/json-schema': 7.0.15 - '@types/semver': 7.5.6 + '@types/semver': 7.5.8 '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.3) - eslint: 8.56.0 + eslint: 8.57.0 eslint-scope: 5.1.1 - semver: 7.5.4 + semver: 7.6.0 transitivePeerDependencies: - supports-color - typescript dev: true - /@typescript-eslint/utils@6.19.0(eslint@8.56.0)(typescript@5.3.3): - resolution: {integrity: sha512-QR41YXySiuN++/dC9UArYOg4X86OAYP83OWTewpVx5ct1IZhjjgTLocj7QNxGhWoTqknsgpl7L+hGygCO+sdYw==} + /@typescript-eslint/utils@7.1.0(eslint@8.57.0)(typescript@5.3.3): + resolution: {integrity: sha512-WUFba6PZC5OCGEmbweGpnNJytJiLG7ZvDBJJoUcX4qZYf1mGZ97mO2Mps6O2efxJcJdRNpqweCistDbZMwIVHw==} engines: {node: ^16.0.0 || >=18.0.0} peerDependencies: - eslint: ^7.0.0 || ^8.0.0 + eslint: ^8.56.0 dependencies: - '@eslint-community/eslint-utils': 4.4.0(eslint@8.56.0) + '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0) '@types/json-schema': 7.0.15 - '@types/semver': 7.5.6 - '@typescript-eslint/scope-manager': 6.19.0 - '@typescript-eslint/types': 6.19.0 - '@typescript-eslint/typescript-estree': 6.19.0(typescript@5.3.3) - eslint: 8.56.0 - semver: 7.5.4 + '@types/semver': 7.5.8 + '@typescript-eslint/scope-manager': 7.1.0 + '@typescript-eslint/types': 7.1.0 + '@typescript-eslint/typescript-estree': 7.1.0(typescript@5.3.3) + eslint: 8.57.0 + semver: 7.6.0 transitivePeerDependencies: - supports-color - typescript dev: true - /@typescript-eslint/visitor-keys@4.33.0: - resolution: {integrity: sha512-uqi/2aSz9g2ftcHWf8uLPJA70rUv6yuMW5Bohw+bwcuzaxQIHaKFZCKGoGXIrc9vkTJ3+0txM73K0Hq3d5wgIg==} - engines: {node: ^8.10.0 || ^10.13.0 || >=11.10.1} - dependencies: - '@typescript-eslint/types': 4.33.0 - eslint-visitor-keys: 2.1.0 - dev: true - /@typescript-eslint/visitor-keys@5.62.0: resolution: {integrity: sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -6428,11 +6766,11 @@ packages: eslint-visitor-keys: 3.4.3 dev: true - /@typescript-eslint/visitor-keys@6.19.0: - resolution: {integrity: sha512-hZaUCORLgubBvtGpp1JEFEazcuEdfxta9j4iUwdSAr7mEsYYAp3EAUyCZk3VEEqGj6W+AV4uWyrDGtrlawAsgQ==} + /@typescript-eslint/visitor-keys@7.1.0: + resolution: {integrity: sha512-FhUqNWluiGNzlvnDZiXad4mZRhtghdoKW6e98GoEOYSu5cND+E39rG5KwJMUzeENwm1ztYBRqof8wMLP+wNPIA==} engines: {node: ^16.0.0 || >=18.0.0} dependencies: - '@typescript-eslint/types': 6.19.0 + '@typescript-eslint/types': 7.1.0 eslint-visitor-keys: 3.4.3 dev: true @@ -6440,51 +6778,68 @@ packages: resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==} dev: true - /@vitejs/plugin-react-swc@3.5.0(vite@5.0.12): - resolution: {integrity: sha512-1PrOvAaDpqlCV+Up8RkAh9qaiUjoDUcjtttyhXDKw53XA6Ve16SOp6cCOpRs8Dj8DqUQs6eTW5YkLcLJjrXAig==} + /@vitejs/plugin-react-swc@3.6.0(vite@5.1.4): + resolution: {integrity: sha512-XFRbsGgpGxGzEV5i5+vRiro1bwcIaZDIdBRP16qwm+jP68ue/S8FJTBEgOeojtVDYrbSua3XFp71kC8VJE6v+g==} peerDependencies: vite: ^4 || ^5 dependencies: - '@swc/core': 1.3.101 - vite: 5.0.12(@types/node@20.11.5) + '@swc/core': 1.4.2 + vite: 5.1.4(@types/node@20.11.20) transitivePeerDependencies: - '@swc/helpers' dev: true - /@vitejs/plugin-react@3.1.0(vite@5.0.12): + /@vitejs/plugin-react@3.1.0(vite@5.1.4): resolution: {integrity: sha512-AfgcRL8ZBhAlc3BFdigClmTUMISmmzHn7sB2h9U1odvc5U/MjWXsAaz18b/WoppUTDBzxOJwo2VdClfUcItu9g==} engines: {node: ^14.18.0 || >=16.0.0} peerDependencies: vite: ^4.1.0-beta.0 dependencies: - '@babel/core': 7.23.7 - '@babel/plugin-transform-react-jsx-self': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-react-jsx-source': 7.23.3(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/plugin-transform-react-jsx-self': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-react-jsx-source': 7.23.3(@babel/core@7.23.9) magic-string: 0.27.0 react-refresh: 0.14.0 - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) transitivePeerDependencies: - supports-color dev: true - /@vitest/expect@0.34.7: - resolution: {integrity: sha512-G9iEtwrD6ZQ4MVHZufif9Iqz3eLtuwBBNx971fNAGPaugM7ftAWjQN+ob2zWhtzURp8RK3zGXOxVb01mFo3zAQ==} + /@vitest/expect@1.3.1: + resolution: {integrity: sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==} dependencies: - '@vitest/spy': 0.34.7 - '@vitest/utils': 0.34.7 + '@vitest/spy': 1.3.1 + '@vitest/utils': 1.3.1 chai: 4.4.1 dev: true - /@vitest/spy@0.34.7: - resolution: {integrity: sha512-NMMSzOY2d8L0mcOt4XcliDOS1ISyGlAXuQtERWVOoVHnKwmG+kKhinAiGw3dTtMQWybfa89FG8Ucg9tiC/FhTQ==} + /@vitest/runner@1.3.1: + resolution: {integrity: sha512-5FzF9c3jG/z5bgCnjr8j9LNq/9OxV2uEBAITOXfoe3rdZJTdO7jzThth7FXv/6b+kdY65tpRQB7WaKhNZwX+Kg==} dependencies: - tinyspy: 2.2.0 + '@vitest/utils': 1.3.1 + p-limit: 5.0.0 + pathe: 1.1.2 dev: true - /@vitest/utils@0.34.7: - resolution: {integrity: sha512-ziAavQLpCYS9sLOorGrFFKmy2gnfiNU0ZJ15TsMz/K92NAPS/rp9K4z6AJQQk5Y8adCy4Iwpxy7pQumQ/psnRg==} + /@vitest/snapshot@1.3.1: + resolution: {integrity: sha512-EF++BZbt6RZmOlE3SuTPu/NfwBF6q4ABS37HHXzs2LUVPBLx2QoY/K0fKpRChSo8eLiuxcbCVfqKgx/dplCDuQ==} + dependencies: + magic-string: 0.30.7 + pathe: 1.1.2 + pretty-format: 29.7.0 + dev: true + + /@vitest/spy@1.3.1: + resolution: {integrity: sha512-xAcW+S099ylC9VLU7eZfdT9myV67Nor9w9zhf0mGCYJSO+zM2839tOeROTdikOi/8Qeusffvxb/MyBSOja1Uig==} + dependencies: + tinyspy: 2.2.1 + dev: true + + /@vitest/utils@1.3.1: + resolution: {integrity: sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ==} dependencies: diff-sequences: 29.6.3 + estree-walker: 3.0.3 loupe: 2.3.7 pretty-format: 29.7.0 dev: true @@ -6508,21 +6863,21 @@ packages: path-browserify: 1.0.1 dev: true - /@vue/compiler-core@3.4.15: - resolution: {integrity: sha512-XcJQVOaxTKCnth1vCxEChteGuwG6wqnUHxAm1DO3gCz0+uXKaJNx8/digSz4dLALCy8n2lKq24jSUs8segoqIw==} + /@vue/compiler-core@3.4.20: + resolution: {integrity: sha512-l7M+xUuL8hrGtRLkrf+62d9zucAdgqNBTbJ/NufCOIuJQhauhfyAKH9ra/qUctCXcULwmclGAVpvmxjbBO30qg==} dependencies: - '@babel/parser': 7.23.6 - '@vue/shared': 3.4.15 + '@babel/parser': 7.23.9 + '@vue/shared': 3.4.20 entities: 4.5.0 estree-walker: 2.0.2 source-map-js: 1.0.2 dev: true - /@vue/compiler-dom@3.4.15: - resolution: {integrity: sha512-wox0aasVV74zoXyblarOM3AZQz/Z+OunYcIHe1OsGclCHt8RsRm04DObjefaI82u6XDzv+qGWZ24tIsRAIi5MQ==} + /@vue/compiler-dom@3.4.20: + resolution: {integrity: sha512-/cSBGL79HFBYgDnqCNKErOav3bPde3n0sJwJM2Z09rXlkiowV/2SG1tgDAiWS1CatS4Cvo0o74e1vNeCK1R3RA==} dependencies: - '@vue/compiler-core': 3.4.15 - '@vue/shared': 3.4.15 + '@vue/compiler-core': 3.4.20 + '@vue/shared': 3.4.20 dev: true /@vue/language-core@1.8.27(typescript@5.3.3): @@ -6535,8 +6890,8 @@ packages: dependencies: '@volar/language-core': 1.11.1 '@volar/source-map': 1.11.1 - '@vue/compiler-dom': 3.4.15 - '@vue/shared': 3.4.15 + '@vue/compiler-dom': 3.4.20 + '@vue/shared': 3.4.20 computeds: 0.0.1 minimatch: 9.0.3 muggle-string: 0.3.1 @@ -6545,8 +6900,8 @@ packages: vue-template-compiler: 2.7.16 dev: true - /@vue/shared@3.4.15: - resolution: {integrity: sha512-KzfPTxVaWfB+eGcGdbSf4CWdaXcGDqckoeXUh7SB3fZdEtzPCK2Vq9B/lRRL3yutax/LWITz+SwvgyOxz5V75g==} + /@vue/shared@3.4.20: + resolution: {integrity: sha512-KTEngal0aiUvNJ6I1Chk5Ew5XqChsFsxP4GKAYXWb99zKJWjNU72p2FWEOmZWHxHcqtniOJsgnpd3zizdpfEag==} dev: true /@xobotyi/scrollbar-width@1.9.5: @@ -6693,10 +7048,10 @@ packages: /@zag-js/date-picker@0.32.1: resolution: {integrity: sha512-n/hYmF+/R4+NuyfPRzCgeuLT6LJihKSuKzK29STPWy3sC/tBBHiqhNv1/4UKbatHUJXdBW2XF+N8Rw08RffcFQ==} dependencies: - '@internationalized/date': 3.5.1 + '@internationalized/date': 3.5.2 '@zag-js/anatomy': 0.32.1 '@zag-js/core': 0.32.1 - '@zag-js/date-utils': 0.32.1(@internationalized/date@3.5.1) + '@zag-js/date-utils': 0.32.1(@internationalized/date@3.5.2) '@zag-js/dismissable': 0.32.1 '@zag-js/dom-event': 0.32.1 '@zag-js/dom-query': 0.32.1 @@ -6708,12 +7063,12 @@ packages: '@zag-js/utils': 0.32.1 dev: false - /@zag-js/date-utils@0.32.1(@internationalized/date@3.5.1): + /@zag-js/date-utils@0.32.1(@internationalized/date@3.5.2): resolution: {integrity: sha512-dbBDRSVr5pRUw3rXndyGuSshZiWqQI5JQO4D2KIFGkXzorj6WzoOpcO910Z7AdM/9cCAMpCjUrka8d8o9BpJBg==} peerDependencies: '@internationalized/date': '>=3.0.0' dependencies: - '@internationalized/date': 3.5.1 + '@internationalized/date': 3.5.2 dev: false /@zag-js/dialog@0.32.1: @@ -6856,7 +7211,7 @@ packages: /@zag-js/number-input@0.32.1: resolution: {integrity: sha512-atyIOvoMITb4hZtQym7yD6I7grvPW83UeMFO8hCQg3HWwd2zR4+63mouWuyMoWb4QrzVFRVQBaU8OG5xGlknEw==} dependencies: - '@internationalized/number': 3.5.0 + '@internationalized/number': 3.5.1 '@zag-js/anatomy': 0.32.1 '@zag-js/core': 0.32.1 '@zag-js/dom-event': 0.32.1 @@ -7140,6 +7495,18 @@ packages: resolution: {integrity: sha512-Vzieo4vNulzY/0zqmVfeYW/LcFJp5xtEoyUgR1FBctH8uBPBRhTIEXxKtoMablW6/vccOVo7zcu0UrR5Vx+eYQ==} dev: false + /@zkochan/retry@0.2.0: + resolution: {integrity: sha512-WhB+2B/ZPlW2Xy/kMJBrMbqecWXcbDDgn0K0wKBAgO2OlBTz1iLJrRWduo+DGGn0Akvz1Lu4Xvls7dJojximWw==} + engines: {node: '>=10'} + dev: true + + /@zkochan/rimraf@2.1.3: + resolution: {integrity: sha512-mCfR3gylCzPC+iqdxEA6z5SxJeOgzgbwmyxanKriIne5qZLswDe/M43aD3p5MNzwzXRhbZg/OX+MpES6Zk1a6A==} + engines: {node: '>=12.10'} + dependencies: + rimraf: 3.0.2 + dev: true + /accepts@1.3.8: resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} engines: {node: '>= 0.6'} @@ -7156,12 +7523,12 @@ packages: acorn: 7.4.1 dev: true - /acorn-jsx@5.3.2(acorn@8.11.2): + /acorn-jsx@5.3.2(acorn@8.11.3): resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 dependencies: - acorn: 8.11.2 + acorn: 8.11.3 dev: true /acorn-walk@7.2.0: @@ -7169,14 +7536,13 @@ packages: engines: {node: '>=0.4.0'} dev: true - /acorn@7.4.1: - resolution: {integrity: sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==} + /acorn-walk@8.3.2: + resolution: {integrity: sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==} engines: {node: '>=0.4.0'} - hasBin: true dev: true - /acorn@8.11.2: - resolution: {integrity: sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==} + /acorn@7.4.1: + resolution: {integrity: sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==} engines: {node: '>=0.4.0'} hasBin: true dev: true @@ -7252,10 +7618,6 @@ packages: engines: {node: '>=12'} dev: true - /any-promise@1.3.0: - resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} - dev: true - /anymatch@3.1.3: resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} engines: {node: '>= 8'} @@ -7264,10 +7626,6 @@ packages: picomatch: 2.3.1 dev: true - /app-module-path@2.2.0: - resolution: {integrity: sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ==} - dev: true - /app-root-dir@1.0.2: resolution: {integrity: sha512-jlpIfsOoNoafl92Sz//64uQHGSyMrD2vYG5d8o2a4qGvyNCvXur7bzIsWtAC/6flI2RYAp3kv8rsfBtaLm7w0g==} dev: true @@ -7288,23 +7646,16 @@ packages: dependencies: tslib: 2.6.2 - /aria-query@5.1.3: - resolution: {integrity: sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==} - dependencies: - deep-equal: 2.2.3 + /arity-n@1.0.4: + resolution: {integrity: sha512-fExL2kFDC1Q2DUOx3whE/9KoN66IzkY4b4zUHUBFM1ojEYjZZYDcUW3bek/ufGionX9giIKDC5redH2IlGqcQQ==} dev: true - /aria-query@5.3.0: - resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==} + /array-buffer-byte-length@1.0.1: + resolution: {integrity: sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==} + engines: {node: '>= 0.4'} dependencies: - dequal: 2.0.3 - dev: true - - /array-buffer-byte-length@1.0.0: - resolution: {integrity: sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==} - dependencies: - call-bind: 1.0.5 - is-array-buffer: 3.0.2 + call-bind: 1.0.7 + is-array-buffer: 3.0.4 dev: true /array-flatten@1.1.1: @@ -7315,36 +7666,54 @@ packages: resolution: {integrity: sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 - get-intrinsic: 1.2.2 + es-abstract: 1.22.4 + get-intrinsic: 1.2.4 is-string: 1.0.7 dev: true + /array-last@1.3.0: + resolution: {integrity: sha512-eOCut5rXlI6aCOS7Z7kCplKRKyiFQ6dHFBem4PwlwKeNFk2/XxTrhRh5T9PyaEWGy/NHTZWbY+nsZlNFJu9rYg==} + engines: {node: '>=0.10.0'} + dependencies: + is-number: 4.0.0 + dev: true + /array-union@2.1.0: resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} engines: {node: '>=8'} dev: true - /array.prototype.findlastindex@1.2.3: - resolution: {integrity: sha512-LzLoiOMAxvy+Gd3BAq3B7VeIgPdo+Q8hthvKtXybMvRV0jrXfJM/t8mw7nNlpEcVlVUnCnM2KSX4XU5HmpodOA==} + /array.prototype.filter@1.0.3: + resolution: {integrity: sha512-VizNcj/RGJiUyQBgzwxzE5oHdeuXY5hSbbmKMlphj1cy1Vl7Pn2asCGbSrru6hSQjmCzqTBPVWAF/whmEOVHbw==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 + es-array-method-boxes-properly: 1.0.0 + is-string: 1.0.7 + dev: true + + /array.prototype.findlastindex@1.2.4: + resolution: {integrity: sha512-hzvSHUshSpCflDR1QMUBLHGHP1VIEBegT4pix9H/Z92Xw3ySoy6c2qh7lJWTJnRJ8JCZ9bJNCgTyYaJGcJu6xQ==} + engines: {node: '>= 0.4'} + dependencies: + call-bind: 1.0.7 + define-properties: 1.2.1 + es-abstract: 1.22.4 + es-errors: 1.3.0 es-shim-unscopables: 1.0.2 - get-intrinsic: 1.2.2 dev: true /array.prototype.flat@1.3.2: resolution: {integrity: sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 es-shim-unscopables: 1.0.2 dev: true @@ -7352,39 +7721,40 @@ packages: resolution: {integrity: sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 es-shim-unscopables: 1.0.2 dev: true - /array.prototype.tosorted@1.1.2: - resolution: {integrity: sha512-HuQCHOlk1Weat5jzStICBCd83NxiIMwqDg/dHEsoefabn/hJRj5pVdWcPUSpRrwhwxZOsQassMpgN/xRYFBMIg==} + /array.prototype.tosorted@1.1.3: + resolution: {integrity: sha512-/DdH4TiTmOKzyQbp/eadcCVexiCb36xJg7HshYOYJnNZFDj33GEv0P7GxsynpShhq4OLYJzbGcBDkLsDt7MnNg==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 + es-errors: 1.3.0 es-shim-unscopables: 1.0.2 - get-intrinsic: 1.2.2 dev: true - /arraybuffer.prototype.slice@1.0.2: - resolution: {integrity: sha512-yMBKppFur/fbHu9/6USUe03bZ4knMYiwFBcyiaXB8Go0qNehwX6inYPzK9U0NeQvGxKthcmHcaR8P5MStSRBAw==} + /arraybuffer.prototype.slice@1.0.3: + resolution: {integrity: sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==} engines: {node: '>= 0.4'} dependencies: - array-buffer-byte-length: 1.0.0 - call-bind: 1.0.5 + array-buffer-byte-length: 1.0.1 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 - get-intrinsic: 1.2.2 - is-array-buffer: 3.0.2 - is-shared-array-buffer: 1.0.2 + es-abstract: 1.22.4 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 + is-array-buffer: 3.0.4 + is-shared-array-buffer: 1.0.3 dev: true /assert@2.1.0: resolution: {integrity: sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 is-nan: 1.3.2 object-is: 1.1.5 object.assign: 4.1.5 @@ -7395,20 +7765,6 @@ packages: resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} dev: true - /ast-module-types@2.7.1: - resolution: {integrity: sha512-Rnnx/4Dus6fn7fTqdeLEAn5vUll5w7/vts0RN608yFa6si/rDOUonlIIiwugHBFWjylHjxm9owoSZn71KwG4gw==} - dev: true - - /ast-module-types@3.0.0: - resolution: {integrity: sha512-CMxMCOCS+4D+DkOQfuZf+vLrSEmY/7xtORwdxs4wtcC1wVgvk2MqFFTwQCFhvWsI4KPU9lcWXPI8DgRiz+xetQ==} - engines: {node: '>=6.0'} - dev: true - - /ast-module-types@4.0.0: - resolution: {integrity: sha512-Kd0o8r6CDazJGCRzs8Ivpn0xj19oNKrULhoJFzhGjRsLpekF2zyZs9Ukz+JvZhWD6smszfepakTFhAaYpsI12g==} - engines: {node: '>=12.0'} - dev: true - /ast-types@0.16.1: resolution: {integrity: sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==} engines: {node: '>=4'} @@ -7439,17 +7795,19 @@ packages: engines: {node: '>=4'} dev: false - /available-typed-arrays@1.0.5: - resolution: {integrity: sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==} + /available-typed-arrays@1.0.7: + resolution: {integrity: sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==} engines: {node: '>= 0.4'} + dependencies: + possible-typed-array-names: 1.0.0 dev: true - /babel-core@7.0.0-bridge.0(@babel/core@7.23.7): + /babel-core@7.0.0-bridge.0(@babel/core@7.23.9): resolution: {integrity: sha512-poPX9mZH/5CSanm50Q+1toVci6pv5KSRv/5TWCwtzQS5XEwn40BcCrgIeMFWP9CKKIniKXNxoIOnOq4VVlGXhg==} peerDependencies: '@babel/core': ^7.0.0-0 dependencies: - '@babel/core': 7.23.7 + '@babel/core': 7.23.9 dev: true /babel-plugin-istanbul@6.1.1: @@ -7469,47 +7827,52 @@ packages: resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==} engines: {node: '>=10', npm: '>=6'} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 cosmiconfig: 7.1.0 resolve: 1.22.8 dev: false - /babel-plugin-polyfill-corejs2@0.4.8(@babel/core@7.23.7): + /babel-plugin-polyfill-corejs2@0.4.8(@babel/core@7.23.9): resolution: {integrity: sha512-OtIuQfafSzpo/LhnJaykc0R/MMnuLSSVjVYy9mHArIZ9qTCSZ6TpWCuEKZYVoN//t8HqBNScHrOtCrIK5IaGLg==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 dependencies: '@babel/compat-data': 7.23.5 - '@babel/core': 7.23.7 - '@babel/helper-define-polyfill-provider': 0.5.0(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-define-polyfill-provider': 0.5.0(@babel/core@7.23.9) semver: 6.3.1 transitivePeerDependencies: - supports-color dev: true - /babel-plugin-polyfill-corejs3@0.8.7(@babel/core@7.23.7): - resolution: {integrity: sha512-KyDvZYxAzkC0Aj2dAPyDzi2Ym15e5JKZSK+maI7NAwSqofvuFglbSsxE7wUOvTg9oFVnHMzVzBKcqEb4PJgtOA==} + /babel-plugin-polyfill-corejs3@0.9.0(@babel/core@7.23.9): + resolution: {integrity: sha512-7nZPG1uzK2Ymhy/NbaOWTg3uibM2BmGASS4vHS4szRZAIR8R6GwA/xAujpdrXU5iyklrimWnLWU+BLF9suPTqg==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-define-polyfill-provider': 0.4.4(@babel/core@7.23.7) - core-js-compat: 3.35.0 + '@babel/core': 7.23.9 + '@babel/helper-define-polyfill-provider': 0.5.0(@babel/core@7.23.9) + core-js-compat: 3.36.0 transitivePeerDependencies: - supports-color dev: true - /babel-plugin-polyfill-regenerator@0.5.5(@babel/core@7.23.7): + /babel-plugin-polyfill-regenerator@0.5.5(@babel/core@7.23.9): resolution: {integrity: sha512-OJGYZlhLqBh2DDHeqAxWB1XIvr49CxiJ2gIt61/PU55CQK4Z58OzMqjDe1zwQdQk+rBYsRc+1rJmdajM3gimHg==} peerDependencies: '@babel/core': ^7.4.0 || ^8.0.0-0 <8.0.0 dependencies: - '@babel/core': 7.23.7 - '@babel/helper-define-polyfill-provider': 0.5.0(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/helper-define-polyfill-provider': 0.5.0(@babel/core@7.23.9) transitivePeerDependencies: - supports-color dev: true + /babylon@6.18.0: + resolution: {integrity: sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==} + hasBin: true + dev: true + /balanced-match@1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} dev: true @@ -7563,6 +7926,13 @@ packages: - supports-color dev: true + /bole@5.0.11: + resolution: {integrity: sha512-KB0Ye0iMAW5BnNbnLfMSQcnI186hKUzE2fpkZWqcxsoTR7eqzlTidSOMYPHJOn/yR7VGH7uSZp37qH9q2Et0zQ==} + dependencies: + fast-safe-stringify: 2.1.1 + individual: 3.0.0 + dev: true + /boolean@3.2.0: resolution: {integrity: sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==} dev: false @@ -7604,15 +7974,15 @@ packages: pako: 0.2.9 dev: true - /browserslist@4.22.2: - resolution: {integrity: sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==} + /browserslist@4.23.0: + resolution: {integrity: sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==} engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} hasBin: true dependencies: - caniuse-lite: 1.0.30001579 - electron-to-chromium: 1.4.639 + caniuse-lite: 1.0.30001591 + electron-to-chromium: 1.4.682 node-releases: 2.0.14 - update-browserslist-db: 1.0.13(browserslist@4.22.2) + update-browserslist-db: 1.0.13(browserslist@4.23.0) dev: true /bser@2.1.1: @@ -7636,6 +8006,12 @@ packages: ieee754: 1.2.1 dev: true + /builtins@5.0.1: + resolution: {integrity: sha512-qwVpFEHNfhYJIzNRBvd2C1kyo6jz3ZSMPyyuR47OPdiKWlbYnZNyDWuyR175qDnAJLiCo5fBBqPb3RiXgWlkOQ==} + dependencies: + semver: 7.6.0 + dev: true + /bytes@3.0.0: resolution: {integrity: sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==} engines: {node: '>= 0.8'} @@ -7646,12 +8022,20 @@ packages: engines: {node: '>= 0.8'} dev: true - /call-bind@1.0.5: - resolution: {integrity: sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==} + /cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + dev: true + + /call-bind@1.0.7: + resolution: {integrity: sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==} + engines: {node: '>= 0.4'} dependencies: + es-define-property: 1.0.0 + es-errors: 1.3.0 function-bind: 1.1.2 - get-intrinsic: 1.2.2 - set-function-length: 1.1.1 + get-intrinsic: 1.2.4 + set-function-length: 1.2.1 dev: true /callsites@3.1.0: @@ -7663,8 +8047,8 @@ packages: engines: {node: '>=6'} dev: true - /caniuse-lite@1.0.30001579: - resolution: {integrity: sha512-u5AUVkixruKHJjw/pj9wISlcMpgFWzSrczLZbrqBSxukQixmg0SJ5sZTpvaFvxU0HoQKd4yoyAogyrAz9pzJnA==} + /caniuse-lite@1.0.30001591: + resolution: {integrity: sha512-PCzRMei/vXjJyL5mJtzNiUCKP59dm8Apqc3PH8gJkMnMXZGox93RbE76jHsmLwmIo6/3nsYIpJtx0O7u5PqFuQ==} dev: true /chai@4.4.1: @@ -7680,7 +8064,7 @@ packages: type-detect: 4.0.8 dev: true - /chakra-react-select@4.7.6(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.11.3)(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /chakra-react-select@4.7.6(@chakra-ui/form-control@2.2.0)(@chakra-ui/icon@3.2.0)(@chakra-ui/layout@2.3.1)(@chakra-ui/media-query@3.3.0)(@chakra-ui/menu@2.2.1)(@chakra-ui/spinner@2.1.0)(@chakra-ui/system@2.6.2)(@emotion/react@11.11.3)(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-ZL43hyXPnWf1g/HjsZDecbeJ4F2Q6tTPYJozlKWkrQ7lIX7ORP0aZYwmc5/Wly4UNzMimj2Vuosl6MmIXH+G2g==} peerDependencies: '@chakra-ui/form-control': ^2.0.0 @@ -7698,13 +8082,13 @@ packages: '@chakra-ui/icon': 3.2.0(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/layout': 2.3.1(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/media-query': 3.3.0(@chakra-ui/system@2.6.2)(react@18.2.0) - '@chakra-ui/menu': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@10.18.0)(react@18.2.0) + '@chakra-ui/menu': 2.2.1(@chakra-ui/system@2.6.2)(framer-motion@11.0.6)(react@18.2.0) '@chakra-ui/spinner': 2.1.0(@chakra-ui/system@2.6.2)(react@18.2.0) '@chakra-ui/system': 2.6.2(@emotion/react@11.11.3)(@emotion/styled@11.11.0)(react@18.2.0) - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) - react-select: 5.7.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + react-select: 5.7.7(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) transitivePeerDependencies: - '@types/react' dev: false @@ -7717,14 +8101,6 @@ packages: escape-string-regexp: 1.0.5 supports-color: 5.5.0 - /chalk@3.0.0: - resolution: {integrity: sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==} - engines: {node: '>=8'} - dependencies: - ansi-styles: 4.3.0 - supports-color: 7.2.0 - dev: true - /chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} @@ -7744,8 +8120,8 @@ packages: get-func-name: 2.0.2 dev: true - /chokidar@3.5.3: - resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + /chokidar@3.6.0: + resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} dependencies: anymatch: 3.1.3 @@ -7773,8 +8149,8 @@ packages: engines: {node: '>=8'} dev: true - /citty@0.1.5: - resolution: {integrity: sha512-AS7n5NSc0OQVMV9v6wt3ByujNIrne0/cTjiC2MYqhvao57VNfiuVksTSr2p17nVOhEr2KtqiAkGwHcgMC/qUuQ==} + /citty@0.1.6: + resolution: {integrity: sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==} dependencies: consola: 3.2.3 dev: true @@ -7830,6 +8206,7 @@ packages: /clone@1.0.4: resolution: {integrity: sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==} engines: {node: '>=0.8'} + requiresBuild: true dev: true /color-convert@1.9.3: @@ -7869,21 +8246,24 @@ packages: /commander@2.20.3: resolution: {integrity: sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==} + dev: false + + /commander@4.1.1: + resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} + engines: {node: '>= 6'} + dev: true /commander@6.2.1: resolution: {integrity: sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==} engines: {node: '>= 6'} dev: true - /commander@7.2.0: - resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} - engines: {node: '>= 10'} - dev: true - /commander@9.5.0: resolution: {integrity: sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==} engines: {node: ^12.20.0 || >=14} + requiresBuild: true dev: true + optional: true /commondir@1.0.1: resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} @@ -7893,6 +8273,12 @@ packages: resolution: {integrity: sha512-LNZQXhqUvqUTotpZ00qLSaify3b4VFD588aRr8MKFw4CMUr98ytzCW5wDH5qx/DEY5kCDXcbcRuCqL0szEf2tg==} dev: false + /compose-function@3.0.3: + resolution: {integrity: sha512-xzhzTJ5eC+gmIzvZq+C3kCJHsp9os6tJkrigDRZclyGtOKINbZtE8n1Tzmeh32jW+BUDPbvZpibwvJHBLGMVwg==} + dependencies: + arity-n: 1.0.4 + dev: true + /compressible@2.0.18: resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} engines: {node: '>= 0.6'} @@ -7993,10 +8379,10 @@ packages: toggle-selection: 1.0.6 dev: false - /core-js-compat@3.35.0: - resolution: {integrity: sha512-5blwFAddknKeNgsjBzilkdQ0+YK8L1PfqPYq40NOYMYFSS38qj+hpTcLLWwpIwA2A5bje/x5jmVn2tzUMg9IVw==} + /core-js-compat@3.36.0: + resolution: {integrity: sha512-iV9Pd/PsgjNWBXeq8XRtWVSgz2tKAfhfvBs7qxYty+RlRd+OCksaWmOnc4JKrTc1cToXL1N0s3l/vwlxPtdElw==} dependencies: - browserslist: 4.22.2 + browserslist: 4.23.0 dev: true /core-util-is@1.0.3: @@ -8039,7 +8425,7 @@ packages: /css-box-model@1.2.1: resolution: {integrity: sha512-a7Vr4Q/kd/aw96bnJG332W9V9LkJO69JRcaCYDUqjp6/z0w6VcZjgAcTbgFxEPfBgdnAwlh3iwu+hLopa+flJw==} dependencies: - tiny-invariant: 1.3.1 + tiny-invariant: 1.3.3 dev: false /css-in-js-utils@3.1.0: @@ -8056,10 +8442,6 @@ packages: source-map: 0.6.1 dev: false - /css.escape@1.5.1: - resolution: {integrity: sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==} - dev: true - /csstype@3.1.3: resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} @@ -8128,11 +8510,16 @@ packages: d3-transition: 3.0.1(d3-selection@3.0.0) dev: false + /data-uri-to-buffer@3.0.1: + resolution: {integrity: sha512-WboRycPNsVw3B3TL559F7kuBUM4d8CgMEvk6xEJlOp7OBPjt6G7z8WMWlD2rOFZLk6OYfFIUGsCOWzcQH9K2og==} + engines: {node: '>= 6'} + dev: true + /date-fns@2.30.0: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} dependencies: - '@babel/runtime': 7.23.6 + '@babel/runtime': 7.23.9 dev: true /dateformat@5.0.3: @@ -8189,33 +8576,8 @@ packages: type-detect: 4.0.8 dev: true - /deep-equal@2.2.3: - resolution: {integrity: sha512-ZIwpnevOurS8bpT4192sqAowWM76JDKSHYzMLty3BZGSswgq6pBaH3DhCSW5xVAZICZyKdOBPjwww5wfgT/6PA==} - engines: {node: '>= 0.4'} - dependencies: - array-buffer-byte-length: 1.0.0 - call-bind: 1.0.5 - es-get-iterator: 1.1.3 - get-intrinsic: 1.2.2 - is-arguments: 1.1.1 - is-array-buffer: 3.0.2 - is-date-object: 1.0.5 - is-regex: 1.1.4 - is-shared-array-buffer: 1.0.2 - isarray: 2.0.5 - object-is: 1.1.5 - object-keys: 1.1.1 - object.assign: 4.1.5 - regexp.prototype.flags: 1.5.1 - side-channel: 1.0.4 - which-boxed-primitive: 1.0.2 - which-collection: 1.0.1 - which-typed-array: 1.1.13 - dev: true - - /deep-extend@0.6.0: - resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} - engines: {node: '>=4.0.0'} + /deep-freeze@0.0.1: + resolution: {integrity: sha512-Z+z8HiAvsGwmjqlphnHW5oz6yWlOwu6EQfFTjmeTWlDeda3FS2yv3jhq35TX/ewmsnqB+RX2IdsIOyjJCQN5tg==} dev: true /deep-is@0.1.4: @@ -8232,17 +8594,18 @@ packages: /defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} + requiresBuild: true dependencies: clone: 1.0.4 dev: true - /define-data-property@1.1.1: - resolution: {integrity: sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==} + /define-data-property@1.1.4: + resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} engines: {node: '>= 0.4'} dependencies: - get-intrinsic: 1.2.2 + es-define-property: 1.0.0 + es-errors: 1.3.0 gopd: 1.0.1 - has-property-descriptors: 1.0.1 /define-lazy-prop@2.0.0: resolution: {integrity: sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==} @@ -8253,8 +8616,8 @@ packages: resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} engines: {node: '>= 0.4'} dependencies: - define-data-property: 1.1.1 - has-property-descriptors: 1.0.1 + define-data-property: 1.1.4 + has-property-descriptors: 1.0.2 object-keys: 1.1.1 /defu@6.1.4: @@ -8285,20 +8648,6 @@ packages: engines: {node: '>= 0.8'} dev: true - /dependency-tree@9.0.0: - resolution: {integrity: sha512-osYHZJ1fBSon3lNLw70amAXsQ+RGzXsPvk9HbBgTLbp/bQBmpH5mOmsUvqXU+YEWVU0ZLewsmzOET/8jWswjDQ==} - engines: {node: ^10.13 || ^12 || >=14} - hasBin: true - dependencies: - commander: 2.20.3 - debug: 4.3.4 - filing-cabinet: 3.3.1 - precinct: 9.2.1 - typescript: 4.9.5 - transitivePeerDependencies: - - supports-color - dev: true - /dequal@2.0.3: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} @@ -8334,160 +8683,6 @@ packages: - supports-color dev: true - /detective-amd@3.1.2: - resolution: {integrity: sha512-jffU26dyqJ37JHR/o44La6CxtrDf3Rt9tvd2IbImJYxWKTMdBjctp37qoZ6ZcY80RHg+kzWz4bXn39e4P7cctQ==} - engines: {node: '>=6.0'} - hasBin: true - dependencies: - ast-module-types: 3.0.0 - escodegen: 2.1.0 - get-amd-module-type: 3.0.2 - node-source-walk: 4.3.0 - dev: true - - /detective-amd@4.2.0: - resolution: {integrity: sha512-RbuEJHz78A8nW7CklkqTzd8lDCN42En53dgEIsya0DilpkwslamSZDasLg8dJyxbw46OxhSQeY+C2btdSkCvQQ==} - engines: {node: '>=12'} - hasBin: true - dependencies: - ast-module-types: 4.0.0 - escodegen: 2.1.0 - get-amd-module-type: 4.1.0 - node-source-walk: 5.0.2 - dev: true - - /detective-cjs@3.1.3: - resolution: {integrity: sha512-ljs7P0Yj9MK64B7G0eNl0ThWSYjhAaSYy+fQcpzaKalYl/UoQBOzOeLCSFEY1qEBhziZ3w7l46KG/nH+s+L7BQ==} - engines: {node: '>=6.0'} - dependencies: - ast-module-types: 3.0.0 - node-source-walk: 4.3.0 - dev: true - - /detective-cjs@4.1.0: - resolution: {integrity: sha512-QxzMwt5MfPLwS7mG30zvnmOvHLx5vyVvjsAV6gQOyuMoBR5G1DhS1eJZ4P10AlH+HSnk93mTcrg3l39+24XCtg==} - engines: {node: '>=12'} - dependencies: - ast-module-types: 4.0.0 - node-source-walk: 5.0.2 - dev: true - - /detective-es6@2.2.2: - resolution: {integrity: sha512-eZUKCUsbHm8xoeoCM0z6JFwvDfJ5Ww5HANo+jPR7AzkFpW9Mun3t/TqIF2jjeWa2TFbAiGaWESykf2OQp3oeMw==} - engines: {node: '>=6.0'} - dependencies: - node-source-walk: 4.3.0 - dev: true - - /detective-es6@3.0.1: - resolution: {integrity: sha512-evPeYIEdK1jK3Oji5p0hX4sPV/1vK+o4ihcWZkMQE6voypSW/cIBiynOLxQk5KOOQbdP8oOAsYqouMTYO5l1sw==} - engines: {node: '>=12'} - dependencies: - node-source-walk: 5.0.2 - dev: true - - /detective-less@1.0.2: - resolution: {integrity: sha512-Rps1xDkEEBSq3kLdsdnHZL1x2S4NGDcbrjmd4q+PykK5aJwDdP5MBgrJw1Xo+kyUHuv3JEzPqxr+Dj9ryeDRTA==} - engines: {node: '>= 6.0'} - dependencies: - debug: 4.3.4 - gonzales-pe: 4.3.0 - node-source-walk: 4.3.0 - transitivePeerDependencies: - - supports-color - dev: true - - /detective-postcss@4.0.0: - resolution: {integrity: sha512-Fwc/g9VcrowODIAeKRWZfVA/EufxYL7XfuqJQFroBKGikKX83d2G7NFw6kDlSYGG3LNQIyVa+eWv1mqre+v4+A==} - engines: {node: ^10 || ^12 || >=14} - dependencies: - debug: 4.3.4 - is-url: 1.2.4 - postcss: 8.4.33 - postcss-values-parser: 2.0.1 - transitivePeerDependencies: - - supports-color - dev: true - - /detective-postcss@6.1.3: - resolution: {integrity: sha512-7BRVvE5pPEvk2ukUWNQ+H2XOq43xENWbH0LcdCE14mwgTBEAMoAx+Fc1rdp76SmyZ4Sp48HlV7VedUnP6GA1Tw==} - engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} - dependencies: - is-url: 1.2.4 - postcss: 8.4.33 - postcss-values-parser: 6.0.2(postcss@8.4.33) - dev: true - - /detective-sass@3.0.2: - resolution: {integrity: sha512-DNVYbaSlmti/eztFGSfBw4nZvwsTaVXEQ4NsT/uFckxhJrNRFUh24d76KzoCC3aarvpZP9m8sC2L1XbLej4F7g==} - engines: {node: '>=6.0'} - dependencies: - gonzales-pe: 4.3.0 - node-source-walk: 4.3.0 - dev: true - - /detective-sass@4.1.3: - resolution: {integrity: sha512-xGRbwGaGte57gvEqM8B9GDiURY3El/H49vA6g9wFkxq9zalmTlTAuqWu+BsH0iwonGPruLt55tZZDEZqPc6lag==} - engines: {node: '>=12'} - dependencies: - gonzales-pe: 4.3.0 - node-source-walk: 5.0.2 - dev: true - - /detective-scss@2.0.2: - resolution: {integrity: sha512-hDWnWh/l0tht/7JQltumpVea/inmkBaanJUcXRB9kEEXVwVUMuZd6z7eusQ6GcBFrfifu3pX/XPyD7StjbAiBg==} - engines: {node: '>=6.0'} - dependencies: - gonzales-pe: 4.3.0 - node-source-walk: 4.3.0 - dev: true - - /detective-scss@3.1.1: - resolution: {integrity: sha512-FWkfru1jZBhUeuBsOeGKXKAVDrzYFSQFK2o2tuG/nCCFQ0U/EcXC157MNAcR5mmj+mCeneZzlkBOFJTesDjrww==} - engines: {node: '>=12'} - dependencies: - gonzales-pe: 4.3.0 - node-source-walk: 5.0.2 - dev: true - - /detective-stylus@1.0.3: - resolution: {integrity: sha512-4/bfIU5kqjwugymoxLXXLltzQNeQfxGoLm2eIaqtnkWxqbhap9puDVpJPVDx96hnptdERzS5Cy6p9N8/08A69Q==} - dev: true - - /detective-stylus@2.0.1: - resolution: {integrity: sha512-/Tvs1pWLg8eYwwV6kZQY5IslGaYqc/GACxjcaGudiNtN5nKCH6o2WnJK3j0gA3huCnoQcbv8X7oz/c1lnvE3zQ==} - engines: {node: '>=6.0'} - dev: true - - /detective-stylus@3.0.0: - resolution: {integrity: sha512-1xYTzbrduExqMYmte7Qk99IRA3Aa6oV7PYzd+3yDcQXkmENvyGF/arripri6lxRDdNYEb4fZFuHtNRAXbz3iAA==} - engines: {node: '>=12'} - dev: true - - /detective-typescript@7.0.2: - resolution: {integrity: sha512-unqovnhxzvkCz3m1/W4QW4qGsvXCU06aU2BAm8tkza+xLnp9SOFnob2QsTxUv5PdnQKfDvWcv9YeOeFckWejwA==} - engines: {node: ^10.13 || >=12.0.0} - dependencies: - '@typescript-eslint/typescript-estree': 4.33.0(typescript@3.9.10) - ast-module-types: 2.7.1 - node-source-walk: 4.3.0 - typescript: 3.9.10 - transitivePeerDependencies: - - supports-color - dev: true - - /detective-typescript@9.1.1: - resolution: {integrity: sha512-Uc1yVutTF0RRm1YJ3g//i1Cn2vx1kwHj15cnzQP6ff5koNzQ0idc1zAC73ryaWEulA0ElRXFTq6wOqe8vUQ3MA==} - engines: {node: ^12.20.0 || ^14.14.0 || >=16.0.0} - dependencies: - '@typescript-eslint/typescript-estree': 5.62.0(typescript@4.9.5) - ast-module-types: 4.0.0 - node-source-walk: 5.0.2 - typescript: 4.9.5 - transitivePeerDependencies: - - supports-color - dev: true - /diff-match-patch@1.0.5: resolution: {integrity: sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw==} dev: false @@ -8522,18 +8717,10 @@ packages: esutils: 2.0.3 dev: true - /dom-accessibility-api@0.5.16: - resolution: {integrity: sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==} - dev: true - - /dom-accessibility-api@0.6.3: - resolution: {integrity: sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==} - dev: true - /dom-helpers@5.2.1: resolution: {integrity: sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==} dependencies: - '@babel/runtime': 7.23.7 + '@babel/runtime': 7.23.9 csstype: 3.1.3 dev: false @@ -8542,11 +8729,24 @@ packages: engines: {node: '>=12'} dev: true - /dotenv@16.3.1: - resolution: {integrity: sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ==} + /dotenv@16.4.5: + resolution: {integrity: sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==} engines: {node: '>=12'} dev: true + /dpdm@3.14.0: + resolution: {integrity: sha512-YJzsFSyEtj88q5eTELg3UWU7TVZkG1dpbF4JDQ3t1b07xuzXmdoGeSz9TKOke1mUuOpWlk4q+pBh+aHzD6GBTg==} + hasBin: true + dependencies: + chalk: 4.1.2 + fs-extra: 11.2.0 + glob: 10.3.10 + ora: 5.4.1 + tslib: 2.6.2 + typescript: 5.3.3 + yargs: 17.7.2 + dev: true + /duplexify@3.7.1: resolution: {integrity: sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==} dependencies: @@ -8560,6 +8760,14 @@ packages: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} dev: true + /easy-table@1.2.0: + resolution: {integrity: sha512-OFzVOv03YpvtcWGe5AayU5G2hgybsg3iqA6drU8UaoZyB9jLGMTrz9+asnLp/E+6qPh88yEI1gvyZFZ41dmgww==} + dependencies: + ansi-regex: 5.0.1 + optionalDependencies: + wcwidth: 1.0.1 + dev: true + /ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} dev: true @@ -8572,8 +8780,8 @@ packages: jake: 10.8.7 dev: true - /electron-to-chromium@1.4.639: - resolution: {integrity: sha512-CkKf3ZUVZchr+zDpAlNLEEy2NJJ9T64ULWaDgy3THXXlPVPkLu3VOs9Bac44nebVtdwl2geSj6AxTtGDOxoXhg==} + /electron-to-chromium@1.4.682: + resolution: {integrity: sha512-oCglfs8yYKs9RQjJFOHonSnhikPK3y+0SvSYc/YpYJV//6rqc0/hbwd0c7vgK4vrl6y2gJAwjkhkSGWK+z4KRA==} dev: true /emoji-regex@8.0.0: @@ -8584,6 +8792,13 @@ packages: resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} dev: true + /encode-registry@3.0.1: + resolution: {integrity: sha512-6qOwkl1g0fv0DN3Y3ggr2EaZXN71aoAqPp3p/pVaWSBSIo+YjLOWN61Fva43oVyQNPf7kgm8lkudzlzojwE2jw==} + engines: {node: '>=10'} + dependencies: + mem: 8.1.1 + dev: true + /encodeurl@1.0.2: resolution: {integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==} engines: {node: '>= 0.8'} @@ -8600,7 +8815,7 @@ packages: dependencies: '@socket.io/component-emitter': 3.1.0 debug: 4.3.4 - engine.io-parser: 5.2.1 + engine.io-parser: 5.2.2 ws: 8.11.0 xmlhttprequest-ssl: 2.0.0 transitivePeerDependencies: @@ -8609,30 +8824,26 @@ packages: - utf-8-validate dev: false - /engine.io-parser@5.2.1: - resolution: {integrity: sha512-9JktcM3u18nU9N2Lz3bWeBgxVgOKpw7yhRaoxQA3FUDZzzw+9WlA6p4G4u0RixNkg14fH7EfEc/RhpurtiROTQ==} + /engine.io-parser@5.2.2: + resolution: {integrity: sha512-RcyUFKA93/CXH20l4SoVvzZfrSDMOTUS3bWVpTt2FuFP+XYrL8i8oonHP7WInRyVHXh0n/ORtoeiE1os+8qkSw==} engines: {node: '>=10.0.0'} dev: false - /enhanced-resolve@5.15.0: - resolution: {integrity: sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==} - engines: {node: '>=10.13.0'} - dependencies: - graceful-fs: 4.2.11 - tapable: 2.2.1 - dev: true - /entities@4.5.0: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} dev: true - /envinfo@7.11.0: - resolution: {integrity: sha512-G9/6xF1FPbIw0TtalAMaVPpiq2aDEuKLXM314jPVAO9r2fo2a4BLqMNkmRS7O/xPPZ+COAhGIz3ETvHEV3eUcg==} + /envinfo@7.11.1: + resolution: {integrity: sha512-8PiZgZNIB4q/Lw4AhOvAfB/ityHAd2bli3lESSWmWSzSsl5dKpy5N1d1Rfkd2teq/g9xN90lc6o98DOjMeYHpg==} engines: {node: '>=4'} hasBin: true dev: true + /err-code@2.0.3: + resolution: {integrity: sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==} + dev: true + /error-ex@1.3.2: resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} dependencies: @@ -8644,101 +8855,105 @@ packages: stackframe: 1.3.4 dev: false - /es-abstract@1.22.3: - resolution: {integrity: sha512-eiiY8HQeYfYH2Con2berK+To6GrK2RxbPawDkGq4UiCQQfZHb6wX9qQqkbpPqaxQFcl8d9QzZqo0tGE0VcrdwA==} + /es-abstract@1.22.4: + resolution: {integrity: sha512-vZYJlk2u6qHYxBOTjAeg7qUxHdNfih64Uu2J8QqWgXZ2cri0ZpJAkzDUK/q593+mvKwlxyaxr6F1Q+3LKoQRgg==} engines: {node: '>= 0.4'} dependencies: - array-buffer-byte-length: 1.0.0 - arraybuffer.prototype.slice: 1.0.2 - available-typed-arrays: 1.0.5 - call-bind: 1.0.5 - es-set-tostringtag: 2.0.2 + array-buffer-byte-length: 1.0.1 + arraybuffer.prototype.slice: 1.0.3 + available-typed-arrays: 1.0.7 + call-bind: 1.0.7 + es-define-property: 1.0.0 + es-errors: 1.3.0 + es-set-tostringtag: 2.0.3 es-to-primitive: 1.2.1 function.prototype.name: 1.1.6 - get-intrinsic: 1.2.2 - get-symbol-description: 1.0.0 + get-intrinsic: 1.2.4 + get-symbol-description: 1.0.2 globalthis: 1.0.3 gopd: 1.0.1 - has-property-descriptors: 1.0.1 - has-proto: 1.0.1 + has-property-descriptors: 1.0.2 + has-proto: 1.0.3 has-symbols: 1.0.3 - hasown: 2.0.0 - internal-slot: 1.0.6 - is-array-buffer: 3.0.2 + hasown: 2.0.1 + internal-slot: 1.0.7 + is-array-buffer: 3.0.4 is-callable: 1.2.7 - is-negative-zero: 2.0.2 + is-negative-zero: 2.0.3 is-regex: 1.1.4 - is-shared-array-buffer: 1.0.2 + is-shared-array-buffer: 1.0.3 is-string: 1.0.7 - is-typed-array: 1.1.12 + is-typed-array: 1.1.13 is-weakref: 1.0.2 object-inspect: 1.13.1 object-keys: 1.1.1 object.assign: 4.1.5 - regexp.prototype.flags: 1.5.1 - safe-array-concat: 1.0.1 - safe-regex-test: 1.0.0 + regexp.prototype.flags: 1.5.2 + safe-array-concat: 1.1.0 + safe-regex-test: 1.0.3 string.prototype.trim: 1.2.8 string.prototype.trimend: 1.0.7 string.prototype.trimstart: 1.0.7 - typed-array-buffer: 1.0.0 - typed-array-byte-length: 1.0.0 - typed-array-byte-offset: 1.0.0 - typed-array-length: 1.0.4 + typed-array-buffer: 1.0.2 + typed-array-byte-length: 1.0.1 + typed-array-byte-offset: 1.0.2 + typed-array-length: 1.0.5 unbox-primitive: 1.0.2 - which-typed-array: 1.1.13 + which-typed-array: 1.1.14 dev: true - /es-get-iterator@1.1.3: - resolution: {integrity: sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==} + /es-array-method-boxes-properly@1.0.0: + resolution: {integrity: sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA==} + dev: true + + /es-define-property@1.0.0: + resolution: {integrity: sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==} + engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 - has-symbols: 1.0.3 - is-arguments: 1.1.1 - is-map: 2.0.2 - is-set: 2.0.2 - is-string: 1.0.7 - isarray: 2.0.5 - stop-iteration-iterator: 1.0.0 - dev: true + get-intrinsic: 1.2.4 - /es-iterator-helpers@1.0.15: - resolution: {integrity: sha512-GhoY8uYqd6iwUl2kgjTm4CZAf6oo5mHK7BPqx3rKgx893YSsy0LGHV6gfqqQvZt/8xM8xeOnfXBCfqclMKkJ5g==} + /es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + /es-iterator-helpers@1.0.17: + resolution: {integrity: sha512-lh7BsUqelv4KUbR5a/ZTaGGIMLCjPGPqJ6q+Oq24YP0RdyptX1uzm4vvaqzk7Zx3bpl/76YLTTDj9L7uYQ92oQ==} + engines: {node: '>= 0.4'} dependencies: asynciterator.prototype: 1.0.0 - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 - es-set-tostringtag: 2.0.2 + es-abstract: 1.22.4 + es-errors: 1.3.0 + es-set-tostringtag: 2.0.3 function-bind: 1.1.2 - get-intrinsic: 1.2.2 + get-intrinsic: 1.2.4 globalthis: 1.0.3 - has-property-descriptors: 1.0.1 - has-proto: 1.0.1 + has-property-descriptors: 1.0.2 + has-proto: 1.0.3 has-symbols: 1.0.3 - internal-slot: 1.0.6 + internal-slot: 1.0.7 iterator.prototype: 1.1.2 - safe-array-concat: 1.0.1 + safe-array-concat: 1.1.0 dev: true /es-module-lexer@0.9.3: resolution: {integrity: sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ==} dev: true - /es-set-tostringtag@2.0.2: - resolution: {integrity: sha512-BuDyupZt65P9D2D2vA/zqcI3G5xRsklm5N3xCwuiy+/vKy8i0ifdsQP1sLgO4tZDSCaQUSnmC48khknGMV3D2Q==} + /es-set-tostringtag@2.0.3: + resolution: {integrity: sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==} engines: {node: '>= 0.4'} dependencies: - get-intrinsic: 1.2.2 - has-tostringtag: 1.0.0 - hasown: 2.0.0 + get-intrinsic: 1.2.4 + has-tostringtag: 1.0.2 + hasown: 2.0.1 dev: true /es-shim-unscopables@1.0.2: resolution: {integrity: sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==} dependencies: - hasown: 2.0.0 + hasown: 2.0.1 dev: true /es-to-primitive@1.2.1: @@ -8795,39 +9010,39 @@ packages: '@esbuild/win32-x64': 0.18.20 dev: true - /esbuild@0.19.11: - resolution: {integrity: sha512-HJ96Hev2hX/6i5cDVwcqiJBBtuo9+FeIJOtZ9W1kA5M6AMJRHUZlpYZ1/SbEwtO0ioNAW8rUooVpC/WehY2SfA==} + /esbuild@0.19.12: + resolution: {integrity: sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==} engines: {node: '>=12'} hasBin: true requiresBuild: true optionalDependencies: - '@esbuild/aix-ppc64': 0.19.11 - '@esbuild/android-arm': 0.19.11 - '@esbuild/android-arm64': 0.19.11 - '@esbuild/android-x64': 0.19.11 - '@esbuild/darwin-arm64': 0.19.11 - '@esbuild/darwin-x64': 0.19.11 - '@esbuild/freebsd-arm64': 0.19.11 - '@esbuild/freebsd-x64': 0.19.11 - '@esbuild/linux-arm': 0.19.11 - '@esbuild/linux-arm64': 0.19.11 - '@esbuild/linux-ia32': 0.19.11 - '@esbuild/linux-loong64': 0.19.11 - '@esbuild/linux-mips64el': 0.19.11 - '@esbuild/linux-ppc64': 0.19.11 - '@esbuild/linux-riscv64': 0.19.11 - '@esbuild/linux-s390x': 0.19.11 - '@esbuild/linux-x64': 0.19.11 - '@esbuild/netbsd-x64': 0.19.11 - '@esbuild/openbsd-x64': 0.19.11 - '@esbuild/sunos-x64': 0.19.11 - '@esbuild/win32-arm64': 0.19.11 - '@esbuild/win32-ia32': 0.19.11 - '@esbuild/win32-x64': 0.19.11 + '@esbuild/aix-ppc64': 0.19.12 + '@esbuild/android-arm': 0.19.12 + '@esbuild/android-arm64': 0.19.12 + '@esbuild/android-x64': 0.19.12 + '@esbuild/darwin-arm64': 0.19.12 + '@esbuild/darwin-x64': 0.19.12 + '@esbuild/freebsd-arm64': 0.19.12 + '@esbuild/freebsd-x64': 0.19.12 + '@esbuild/linux-arm': 0.19.12 + '@esbuild/linux-arm64': 0.19.12 + '@esbuild/linux-ia32': 0.19.12 + '@esbuild/linux-loong64': 0.19.12 + '@esbuild/linux-mips64el': 0.19.12 + '@esbuild/linux-ppc64': 0.19.12 + '@esbuild/linux-riscv64': 0.19.12 + '@esbuild/linux-s390x': 0.19.12 + '@esbuild/linux-x64': 0.19.12 + '@esbuild/netbsd-x64': 0.19.12 + '@esbuild/openbsd-x64': 0.19.12 + '@esbuild/sunos-x64': 0.19.12 + '@esbuild/win32-arm64': 0.19.12 + '@esbuild/win32-ia32': 0.19.12 + '@esbuild/win32-x64': 0.19.12 dev: true - /escalade@3.1.1: - resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + /escalade@3.1.2: + resolution: {integrity: sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==} engines: {node: '>=6'} dev: true @@ -8855,13 +9070,13 @@ packages: source-map: 0.6.1 dev: true - /eslint-config-prettier@9.1.0(eslint@8.56.0): + /eslint-config-prettier@9.1.0(eslint@8.57.0): resolution: {integrity: sha512-NSWl5BFQWEPi1j4TjVNItzYV7dZXZ+wP6I6ZhrBGpChQhZRUaElihE9uRRkcbRnNb76UMKDF3r+WTmNcGPKsqw==} hasBin: true peerDependencies: eslint: '>=7.0.0' dependencies: - eslint: 8.56.0 + eslint: 8.57.0 dev: true /eslint-import-resolver-node@0.3.9: @@ -8874,8 +9089,8 @@ packages: - supports-color dev: true - /eslint-module-utils@2.8.0(@typescript-eslint/parser@6.19.0)(eslint-import-resolver-node@0.3.9)(eslint@8.56.0): - resolution: {integrity: sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==} + /eslint-module-utils@2.8.1(@typescript-eslint/parser@7.1.0)(eslint-import-resolver-node@0.3.9)(eslint@8.57.0): + resolution: {integrity: sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==} engines: {node: '>=4'} peerDependencies: '@typescript-eslint/parser': '*' @@ -8895,9 +9110,9 @@ packages: eslint-import-resolver-webpack: optional: true dependencies: - '@typescript-eslint/parser': 6.19.0(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/parser': 7.1.0(eslint@8.57.0)(typescript@5.3.3) debug: 3.2.7 - eslint: 8.56.0 + eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 transitivePeerDependencies: - supports-color @@ -8911,7 +9126,7 @@ packages: requireindex: 1.1.0 dev: true - /eslint-plugin-import@2.29.1(@typescript-eslint/parser@6.19.0)(eslint@8.56.0): + /eslint-plugin-import@2.29.1(@typescript-eslint/parser@7.1.0)(eslint@8.57.0): resolution: {integrity: sha512-BbPC0cuExzhiMo4Ff1BTVwHpjjv28C5R+btTOGaCRC7UEz801up0JadwkeSk5Ued6TG34uaczuVuH6qyy5YUxw==} engines: {node: '>=4'} peerDependencies: @@ -8921,22 +9136,22 @@ packages: '@typescript-eslint/parser': optional: true dependencies: - '@typescript-eslint/parser': 6.19.0(eslint@8.56.0)(typescript@5.3.3) + '@typescript-eslint/parser': 7.1.0(eslint@8.57.0)(typescript@5.3.3) array-includes: 3.1.7 - array.prototype.findlastindex: 1.2.3 + array.prototype.findlastindex: 1.2.4 array.prototype.flat: 1.3.2 array.prototype.flatmap: 1.3.2 debug: 3.2.7 doctrine: 2.1.0 - eslint: 8.56.0 + eslint: 8.57.0 eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.8.0(@typescript-eslint/parser@6.19.0)(eslint-import-resolver-node@0.3.9)(eslint@8.56.0) - hasown: 2.0.0 + eslint-module-utils: 2.8.1(@typescript-eslint/parser@7.1.0)(eslint-import-resolver-node@0.3.9)(eslint@8.57.0) + hasown: 2.0.1 is-core-module: 2.13.1 is-glob: 4.0.3 minimatch: 3.1.2 object.fromentries: 2.0.7 - object.groupby: 1.0.1 + object.groupby: 1.0.2 object.values: 1.1.7 semver: 6.3.1 tsconfig-paths: 3.15.0 @@ -8946,34 +9161,34 @@ packages: - supports-color dev: true - /eslint-plugin-path@1.2.4(eslint@8.56.0): + /eslint-plugin-path@1.2.4(eslint@8.57.0): resolution: {integrity: sha512-CKEoNpXfqVDTIwSSrBv64a/kk6V1sTyyo5jS1UFZ/+z8+aFw4Bg2g4LmmLEAkP6LxMlCbg6kf3DmQ0Xkl/UpTA==} engines: {node: '>= 12.22.0'} peerDependencies: eslint: '>=6.0.0' dependencies: - eslint: 8.56.0 + eslint: 8.57.0 load-tsconfig: 0.2.5 dev: true - /eslint-plugin-react-hooks@4.6.0(eslint@8.56.0): + /eslint-plugin-react-hooks@4.6.0(eslint@8.57.0): resolution: {integrity: sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==} engines: {node: '>=10'} peerDependencies: eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 dependencies: - eslint: 8.56.0 + eslint: 8.57.0 dev: true - /eslint-plugin-react-refresh@0.4.5(eslint@8.56.0): + /eslint-plugin-react-refresh@0.4.5(eslint@8.57.0): resolution: {integrity: sha512-D53FYKJa+fDmZMtriODxvhwrO+IOqrxoEo21gMA0sjHdU6dPVH4OhyFip9ypl8HOF5RV5KdTo+rBQLvnY2cO8w==} peerDependencies: eslint: '>=7' dependencies: - eslint: 8.56.0 + eslint: 8.57.0 dev: true - /eslint-plugin-react@7.33.2(eslint@8.56.0): + /eslint-plugin-react@7.33.2(eslint@8.57.0): resolution: {integrity: sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw==} engines: {node: '>=4'} peerDependencies: @@ -8981,10 +9196,10 @@ packages: dependencies: array-includes: 3.1.7 array.prototype.flatmap: 1.3.2 - array.prototype.tosorted: 1.1.2 + array.prototype.tosorted: 1.1.3 doctrine: 2.1.0 - es-iterator-helpers: 1.0.15 - eslint: 8.56.0 + es-iterator-helpers: 1.0.17 + eslint: 8.57.0 estraverse: 5.3.0 jsx-ast-utils: 3.3.5 minimatch: 3.1.2 @@ -8998,23 +9213,23 @@ packages: string.prototype.matchall: 4.0.10 dev: true - /eslint-plugin-simple-import-sort@10.0.0(eslint@8.56.0): - resolution: {integrity: sha512-AeTvO9UCMSNzIHRkg8S6c3RPy5YEwKWSQPx3DYghLedo2ZQxowPFLGDN1AZ2evfg6r6mjBSZSLxLFsWSu3acsw==} + /eslint-plugin-simple-import-sort@12.0.0(eslint@8.57.0): + resolution: {integrity: sha512-8o0dVEdAkYap0Cn5kNeklaKcT1nUsa3LITWEuFk3nJifOoD+5JQGoyDUW2W/iPWwBsNBJpyJS9y4je/BgxLcyQ==} peerDependencies: eslint: '>=5.0.0' dependencies: - eslint: 8.56.0 + eslint: 8.57.0 dev: true - /eslint-plugin-storybook@0.6.15(eslint@8.56.0)(typescript@5.3.3): - resolution: {integrity: sha512-lAGqVAJGob47Griu29KXYowI4G7KwMoJDOkEip8ujikuDLxU+oWJ1l0WL6F2oDO4QiyUFXvtDkEkISMOPzo+7w==} - engines: {node: 12.x || 14.x || >= 16} + /eslint-plugin-storybook@0.8.0(eslint@8.57.0)(typescript@5.3.3): + resolution: {integrity: sha512-CZeVO5EzmPY7qghO2t64oaFM+8FTaD4uzOEjHKp516exyTKo+skKAL9GI3QALS2BXhyALJjNtwbmr1XinGE8bA==} + engines: {node: '>= 18'} peerDependencies: eslint: '>=6' dependencies: '@storybook/csf': 0.0.1 - '@typescript-eslint/utils': 5.62.0(eslint@8.56.0)(typescript@5.3.3) - eslint: 8.56.0 + '@typescript-eslint/utils': 5.62.0(eslint@8.57.0)(typescript@5.3.3) + eslint: 8.57.0 requireindex: 1.2.0 ts-dedent: 2.2.0 transitivePeerDependencies: @@ -9022,18 +9237,18 @@ packages: - typescript dev: true - /eslint-plugin-unused-imports@3.0.0(@typescript-eslint/eslint-plugin@6.19.0)(eslint@8.56.0): - resolution: {integrity: sha512-sduiswLJfZHeeBJ+MQaG+xYzSWdRXoSw61DpU13mzWumCkR0ufD0HmO4kdNokjrkluMHpj/7PJeN35pgbhW3kw==} + /eslint-plugin-unused-imports@3.1.0(@typescript-eslint/eslint-plugin@7.1.0)(eslint@8.57.0): + resolution: {integrity: sha512-9l1YFCzXKkw1qtAru1RWUtG2EVDZY0a0eChKXcL+EZ5jitG7qxdctu4RnvhOJHv4xfmUf7h+JJPINlVpGhZMrw==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: - '@typescript-eslint/eslint-plugin': ^6.0.0 - eslint: ^8.0.0 + '@typescript-eslint/eslint-plugin': 6 - 7 + eslint: '8' peerDependenciesMeta: '@typescript-eslint/eslint-plugin': optional: true dependencies: - '@typescript-eslint/eslint-plugin': 6.19.0(@typescript-eslint/parser@6.19.0)(eslint@8.56.0)(typescript@5.3.3) - eslint: 8.56.0 + '@typescript-eslint/eslint-plugin': 7.1.0(@typescript-eslint/parser@7.1.0)(eslint@8.57.0)(typescript@5.3.3) + eslint: 8.57.0 eslint-rule-composer: 0.3.0 dev: true @@ -9058,26 +9273,21 @@ packages: estraverse: 5.3.0 dev: true - /eslint-visitor-keys@2.1.0: - resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==} - engines: {node: '>=10'} - dev: true - /eslint-visitor-keys@3.4.3: resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true - /eslint@8.56.0: - resolution: {integrity: sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==} + /eslint@8.57.0: + resolution: {integrity: sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} hasBin: true dependencies: - '@eslint-community/eslint-utils': 4.4.0(eslint@8.56.0) + '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0) '@eslint-community/regexpp': 4.10.0 '@eslint/eslintrc': 2.1.4 - '@eslint/js': 8.56.0 - '@humanwhocodes/config-array': 0.11.13 + '@eslint/js': 8.57.0 + '@humanwhocodes/config-array': 0.11.14 '@humanwhocodes/module-importer': 1.0.1 '@nodelib/fs.walk': 1.2.8 '@ungap/structured-clone': 1.2.0 @@ -9098,7 +9308,7 @@ packages: glob-parent: 6.0.2 globals: 13.24.0 graphemer: 1.4.0 - ignore: 5.3.0 + ignore: 5.3.1 imurmurhash: 0.1.4 is-glob: 4.0.3 is-path-inside: 3.0.3 @@ -9119,8 +9329,8 @@ packages: resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dependencies: - acorn: 8.11.2 - acorn-jsx: 5.3.2(acorn@8.11.2) + acorn: 8.11.3 + acorn-jsx: 5.3.2(acorn@8.11.3) eslint-visitor-keys: 3.4.3 dev: true @@ -9158,6 +9368,12 @@ packages: resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} dev: true + /estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + dependencies: + '@types/estree': 1.0.5 + dev: true + /esutils@2.0.3: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} @@ -9192,7 +9408,7 @@ packages: human-signals: 5.0.0 is-stream: 3.0.0 merge-stream: 2.0.0 - npm-run-path: 5.2.0 + npm-run-path: 5.3.0 onetime: 6.0.0 signal-exit: 4.1.0 strip-final-newline: 3.0.0 @@ -9286,6 +9502,10 @@ packages: boolean: 3.2.0 dev: false + /fast-safe-stringify@2.1.1: + resolution: {integrity: sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==} + dev: true + /fast-shallow-equal@1.0.0: resolution: {integrity: sha512-HPtaa38cPgWvaCFmRNhlc6NG7pv6NUHqjPgVAkWGoB9mQMwYB27/K0CvOM5Czy+qpT3e8XJ6Q4aPAnzpNpzNaw==} dev: false @@ -9294,8 +9514,8 @@ packages: resolution: {integrity: sha512-bijHueCGd0LqqNK9b5oCMHc0MluJAx0cwqASgbWMvkO01lCYgIhacVRLcaDz3QnyYIRNJRDwMb41VuT6pHJ91Q==} dev: false - /fastq@1.16.0: - resolution: {integrity: sha512-ifCoaXsDrsdkWTtiNJX5uzHDsrck5TzfKKDcuFFTIrrc/BS076qgEIfoIy1VeZqViznfKiysPYTh/QeHtnIsYA==} + /fastq@1.17.1: + resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} dependencies: reusify: 1.0.4 dev: true @@ -9312,6 +9532,16 @@ packages: pend: 1.2.0 dev: true + /fetch-blob@2.1.2: + resolution: {integrity: sha512-YKqtUDwqLyfyMnmbw8XD6Q8j9i/HggKtPEI+pZ1+8bvheBu78biSmNaXWusx1TauGqtUUGx/cBb1mKdq2rLYow==} + engines: {node: ^10.17.0 || >=12.3.0} + peerDependencies: + domexception: '*' + peerDependenciesMeta: + domexception: + optional: true + dev: true + /fetch-retry@5.0.6: resolution: {integrity: sha512-3yurQZ2hD9VISAhJJP9bpYFNQrHHBXE2JxxjY5aLEcDi46RmAzJE2OC9FAde0yis5ElW0jTTzs0zfg/Cca4XqQ==} dev: true @@ -9343,28 +9573,6 @@ packages: minimatch: 5.1.6 dev: true - /filing-cabinet@3.3.1: - resolution: {integrity: sha512-renEK4Hh6DUl9Vl22Y3cxBq1yh8oNvbAdXnhih0wVpmea+uyKjC9K4QeRjUaybIiIewdzfum+Fg15ZqJ/GyCaA==} - engines: {node: '>=10.13.0'} - hasBin: true - dependencies: - app-module-path: 2.2.0 - commander: 2.20.3 - debug: 4.3.4 - enhanced-resolve: 5.15.0 - is-relative-path: 1.0.2 - module-definition: 3.4.0 - module-lookup-amd: 7.0.1 - resolve: 1.22.8 - resolve-dependency-path: 2.0.0 - sass-lookup: 3.0.0 - stylus-lookup: 3.0.2 - tsconfig-paths: 3.15.0 - typescript: 3.9.10 - transitivePeerDependencies: - - supports-color - dev: true - /fill-range@7.0.1: resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} engines: {node: '>=8'} @@ -9372,6 +9580,15 @@ packages: to-regex-range: 5.0.1 dev: true + /filter-iterator@0.0.1: + resolution: {integrity: sha512-v4lhL7Qa8XpbW3LN46CEnmhGk3eHZwxfNl5at20aEkreesht4YKb/Ba3BUIbnPhAC/r3dmu7ABaGk6MAvh2alA==} + dev: true + + /filter-obj@1.1.0: + resolution: {integrity: sha512-8rXg1ZnX7xzy2NGDVkBVaAy+lSlPNwad13BtgSlLuxfIslyt5Vg64U7tFcCt4WS1R0hvtnQybT/IyCkGZ3DpXQ==} + engines: {node: '>=0.10.0'} + dev: true + /filter-obj@5.1.0: resolution: {integrity: sha512-qWeTREPoT7I0bifpPUXtxkZJ1XJzxWtfoWWkdVGqa+eCr3SHW/Ocp89o8vLvbUuQnadybJpjOKu4V+RwO6sGng==} engines: {node: '>=14.16'} @@ -9441,27 +9658,22 @@ packages: resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} engines: {node: ^10.12.0 || >=12.0.0} dependencies: - flatted: 3.2.9 + flatted: 3.3.1 keyv: 4.5.4 rimraf: 3.0.2 dev: true - /flatted@3.2.9: - resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==} + /flatted@3.3.1: + resolution: {integrity: sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==} dev: true - /flatten@1.0.3: - resolution: {integrity: sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg==} - deprecated: flatten is deprecated in favor of utility frameworks such as lodash. - dev: true - - /flow-parser@0.227.0: - resolution: {integrity: sha512-nOygtGKcX/siZK/lFzpfdHEfOkfGcTW7rNroR1Zsz6T/JxSahPALXVt5qVHq/fgvMJuv096BTKbgxN3PzVBaDA==} + /flow-parser@0.229.2: + resolution: {integrity: sha512-T72XV2Izvl7yV6dhHhLaJ630Y6vOZJl6dnOS6dN0bPW9ExuREu7xGAf3omtcxX76POTuux9TJPu9ZpS48a/rdw==} engines: {node: '>=0.4.0'} dev: true - /focus-lock@1.0.0: - resolution: {integrity: sha512-a8Ge6cdKh9za/GZR/qtigTAk7SrGore56EFcoMshClsh7FLk1zwszc/ltuMfKhx56qeuyL/jWQ4J4axou0iJ9w==} + /focus-lock@1.3.3: + resolution: {integrity: sha512-hfXkZha7Xt4RQtrL1HBfspAuIj89Y0fb6GX0dfJilb8S2G/lvL4akPAcHq6xoD2NuZnDMCnZL/zQesMyeu6Psg==} engines: {node: '>=10'} dependencies: tslib: 2.6.2 @@ -9519,6 +9731,42 @@ packages: '@emotion/is-prop-valid': 0.8.8 dev: false + /framer-motion@11.0.5(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-Lb0EYbQcSK/pgyQUJm+KzsQrKrJRX9sFRyzl9hSr9gFG4Mk8yP7BjhuxvRXzblOM/+JxycrJdCDVmOQBsjpYlw==} + peerDependencies: + react: ^18.0.0 + react-dom: ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + tslib: 2.6.2 + optionalDependencies: + '@emotion/is-prop-valid': 0.8.8 + dev: false + + /framer-motion@11.0.6(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-BpO3mWF8UwxzO3Ca5AmSkrg14QYTeJa9vKgoLOoBdBdTPj0e81i1dMwnX6EQJXRieUx20uiDBXq8bA6y7N6b8Q==} + peerDependencies: + react: ^18.0.0 + react-dom: ^18.0.0 + peerDependenciesMeta: + react: + optional: true + react-dom: + optional: true + dependencies: + react: 18.2.0 + react-dom: 18.2.0(react@18.2.0) + tslib: 2.6.2 + optionalDependencies: + '@emotion/is-prop-valid': 0.8.8 + dev: false + /framesync@6.1.2: resolution: {integrity: sha512-jBTqhX6KaQVDyus8muwZbBeGGP0XgujBRbQ7gM7BRdS3CadCZIHiawyzYLnafYcvZIh5j8WE7cxZKFn7dXhu9g==} dependencies: @@ -9534,6 +9782,15 @@ packages: resolution: {integrity: sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==} dev: true + /fs-extra@10.1.0: + resolution: {integrity: sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==} + engines: {node: '>=12'} + dependencies: + graceful-fs: 4.2.11 + jsonfile: 6.1.0 + universalify: 2.0.1 + dev: true + /fs-extra@11.1.1: resolution: {integrity: sha512-MGIE4HOvQCeUCzmlHs0vXpih4ysz4wg9qiSAu6cd42lVwPbTM1TjV7RusoyQqMmk/95gdQZX72u+YW+c3eEpFQ==} engines: {node: '>=14.14'} @@ -9587,9 +9844,9 @@ packages: resolution: {integrity: sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 functions-have-names: 1.2.3 dev: true @@ -9602,22 +9859,6 @@ packages: engines: {node: '>=6.9.0'} dev: true - /get-amd-module-type@3.0.2: - resolution: {integrity: sha512-PcuKwB8ouJnKuAPn6Hk3UtdfKoUV3zXRqVEvj8XGIXqjWfgd1j7QGdXy5Z9OdQfzVt1Sk29HVe/P+X74ccOuqw==} - engines: {node: '>=6.0'} - dependencies: - ast-module-types: 3.0.0 - node-source-walk: 4.3.0 - dev: true - - /get-amd-module-type@4.1.0: - resolution: {integrity: sha512-0e/eK6vTGCnSfQ6eYs3wtH05KotJYIP7ZIZEueP/KlA+0dIAEs8bYFvOd/U56w1vfjhJqBagUxVMyy9Tr/cViQ==} - engines: {node: '>=12'} - dependencies: - ast-module-types: 4.0.0 - node-source-walk: 5.0.2 - dev: true - /get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -9627,13 +9868,15 @@ packages: resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} dev: true - /get-intrinsic@1.2.2: - resolution: {integrity: sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==} + /get-intrinsic@1.2.4: + resolution: {integrity: sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==} + engines: {node: '>= 0.4'} dependencies: + es-errors: 1.3.0 function-bind: 1.1.2 - has-proto: 1.0.1 + has-proto: 1.0.3 has-symbols: 1.0.3 - hasown: 2.0.0 + hasown: 2.0.1 /get-nonce@1.0.1: resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==} @@ -9644,10 +9887,6 @@ packages: engines: {node: '>=12.17'} dev: true - /get-own-enumerable-property-symbols@3.0.2: - resolution: {integrity: sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==} - dev: true - /get-package-type@0.1.0: resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} engines: {node: '>=8.0.0'} @@ -9668,23 +9907,24 @@ packages: engines: {node: '>=16'} dev: true - /get-symbol-description@1.0.0: - resolution: {integrity: sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==} + /get-symbol-description@1.0.2: + resolution: {integrity: sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 + call-bind: 1.0.7 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 dev: true /giget@1.2.1: resolution: {integrity: sha512-4VG22mopWtIeHwogGSy1FViXVo0YT+m6BrqZfz0JJFwbSsePsCdOzdLIIli5BtMp7Xe8f/o2OmBpQX2NBOC24g==} hasBin: true dependencies: - citty: 0.1.5 + citty: 0.1.6 consola: 3.2.3 defu: 6.1.4 - node-fetch-native: 1.6.1 - nypm: 0.3.4 + node-fetch-native: 1.6.2 + nypm: 0.3.6 ohash: 1.1.3 pathe: 1.1.2 tar: 6.2.0 @@ -9770,7 +10010,7 @@ packages: array-union: 2.1.0 dir-glob: 3.0.1 fast-glob: 3.3.2 - ignore: 5.3.0 + ignore: 5.3.1 merge2: 1.4.1 slash: 3.0.0 dev: true @@ -9779,18 +10019,10 @@ packages: resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} dev: true - /gonzales-pe@4.3.0: - resolution: {integrity: sha512-otgSPpUmdWJ43VXyiNgEYE4luzHCL2pz4wQ0OnDluC6Eg4Ko3Vexy/SrSynglw/eR+OhkzmqFCZa/OFa/RgAOQ==} - engines: {node: '>=0.6.0'} - hasBin: true - dependencies: - minimist: 1.2.8 - dev: true - /gopd@1.0.1: resolution: {integrity: sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==} dependencies: - get-intrinsic: 1.2.2 + get-intrinsic: 1.2.4 /graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} @@ -9838,28 +10070,32 @@ packages: engines: {node: '>=8'} dev: true - /has-property-descriptors@1.0.1: - resolution: {integrity: sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==} - dependencies: - get-intrinsic: 1.2.2 + /has-own-property@0.1.0: + resolution: {integrity: sha512-14qdBKoonU99XDhWcFKZTShK+QV47qU97u8zzoVo9cL5TZ3BmBHXogItSt9qJjR0KUMFRhcCW8uGIGl8nkl7Aw==} + dev: true - /has-proto@1.0.1: - resolution: {integrity: sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==} + /has-property-descriptors@1.0.2: + resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} + dependencies: + es-define-property: 1.0.0 + + /has-proto@1.0.3: + resolution: {integrity: sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==} engines: {node: '>= 0.4'} /has-symbols@1.0.3: resolution: {integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==} engines: {node: '>= 0.4'} - /has-tostringtag@1.0.0: - resolution: {integrity: sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==} + /has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} engines: {node: '>= 0.4'} dependencies: has-symbols: 1.0.3 dev: true - /hasown@2.0.0: - resolution: {integrity: sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==} + /hasown@2.0.1: + resolution: {integrity: sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==} engines: {node: '>= 0.4'} dependencies: function-bind: 1.1.2 @@ -9879,6 +10115,20 @@ packages: resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} dev: true + /hosted-git-info@4.1.0: + resolution: {integrity: sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==} + engines: {node: '>=10'} + dependencies: + lru-cache: 6.0.0 + dev: true + + /hosted-git-info@7.0.1: + resolution: {integrity: sha512-+K84LB1DYwMHoHSgaOY/Jfhw3ucPmSET5v98Ke/HdNSw4a0UktWzyW1mjhjpuxxTqOOsfWT/7iVshHmVZ4IpOA==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + lru-cache: 10.2.0 + dev: true + /html-parse-stringify@3.0.1: resolution: {integrity: sha512-KknJ50kTInJ7qIScF3jeaFRpMpE8/lfiTdzf/twXyPBLAGrLRTmkz3AdTnKeh40X8k9L2fdYwEp/42WGXIRGcg==} dependencies: @@ -9925,18 +10175,18 @@ packages: resolution: {integrity: sha512-ygGZLjmXfPHj+ZWh6LwbC37l43MhfztxetbFCoYTM2VjkIUpeHgSNn7QIyVFj7YQ1Wl9Cbw5sholVJPzWvC2MQ==} dev: false - /i18next-http-backend@2.4.2: - resolution: {integrity: sha512-wKrgGcaFQ4EPjfzBTjzMU0rbFTYpa0S5gv9N/d8WBmWS64+IgJb7cHddMvV+tUkse7vUfco3eVs2lB+nJhPo3w==} + /i18next-http-backend@2.5.0: + resolution: {integrity: sha512-Z/aQsGZk1gSxt2/DztXk92DuDD20J+rNudT7ZCdTrNOiK8uQppfvdjq9+DFQfpAnFPn3VZS+KQIr1S/W1KxhpQ==} dependencies: cross-fetch: 4.0.0 transitivePeerDependencies: - encoding dev: false - /i18next@23.7.16: - resolution: {integrity: sha512-SrqFkMn9W6Wb43ZJ9qrO6U2U4S80RsFMA7VYFSqp7oc7RllQOYDCdRfsse6A7Cq/V8MnpxKvJCYgM8++27n4Fw==} + /i18next@23.10.0: + resolution: {integrity: sha512-/TgHOqsa7/9abUKJjdPeydoyDc0oTi/7u9F8lMSj6ufg4cbC1Oj3f/Jja7zj7WRIhEQKB7Q4eN6y68I9RDxxGQ==} dependencies: - '@babel/runtime': 7.23.7 + '@babel/runtime': 7.23.9 dev: false /iconv-lite@0.4.24: @@ -9950,12 +10200,16 @@ packages: resolution: {integrity: sha512-8Sb3veuYCyrZL+VBt9LJfZjLUPWVvqn8tG28VqYNFCo43KHcKuq+b4EiXGeuaLAQWL2YmyDgMp2aSpH9JHsEQg==} dev: false + /identity-function@1.0.0: + resolution: {integrity: sha512-kNrgUK0qI+9qLTBidsH85HjDLpZfrrS0ElquKKe/fJFdB3D7VeKdXXEvOPDUHSHOzdZKCAAaQIWWyp0l2yq6pw==} + dev: true + /ieee754@1.2.1: resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} dev: true - /ignore@5.3.0: - resolution: {integrity: sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==} + /ignore@5.3.1: + resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==} engines: {node: '>= 4'} dev: true @@ -9985,8 +10239,8 @@ packages: engines: {node: '>=8'} dev: true - /indexes-of@1.0.1: - resolution: {integrity: sha512-bup+4tap3Hympa+JBJUG7XuOsdNQ6fxt0MHyXMKuLBKn0OqsTfvUxkUrroEX1+B2VsSHvCjiIcZVxRtYa4nllA==} + /individual@3.0.0: + resolution: {integrity: sha512-rUY5vtT748NMRbEMrTNiFfy29BgGZwGXUi2NFUVMWQrogSLzlJvQV9eeMWi+g1aVaQ53tpyLAQtd5x/JH0Nh1g==} dev: true /inflight@1.0.6: @@ -10000,10 +10254,6 @@ packages: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} dev: true - /ini@1.3.8: - resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} - dev: true - /inline-style-prefixer@7.0.0: resolution: {integrity: sha512-I7GEdScunP1dQ6IM2mQWh6v0mOYdYmH3Bp31UecKdrcUgcURTcctSe1IECdUznSHKSmsHtjrT3CwCPI1pyxfUQ==} dependencies: @@ -10011,13 +10261,13 @@ packages: fast-loops: 1.1.3 dev: false - /internal-slot@1.0.6: - resolution: {integrity: sha512-Xj6dv+PsbtwyPpEflsejS+oIZxmMlV44zAhG479uYu89MsjcYOhCFnNyKrkJrihbsiasQyY0afoCl/9BLR65bg==} + /internal-slot@1.0.7: + resolution: {integrity: sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==} engines: {node: '>= 0.4'} dependencies: - get-intrinsic: 1.2.2 - hasown: 2.0.0 - side-channel: 1.0.4 + es-errors: 1.3.0 + hasown: 2.0.1 + side-channel: 1.0.5 dev: true /invariant@2.2.4: @@ -10025,8 +10275,8 @@ packages: dependencies: loose-envify: 1.4.0 - /ip@2.0.0: - resolution: {integrity: sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==} + /ip@2.0.1: + resolution: {integrity: sha512-lJUL9imLTNi1ZfXT+DU6rBBdbiKGBuay9B6xGSPVjUeQwaH1RIGqef8RZkUtHioLmSNpPR5M4HVKJGm1j8FWVQ==} dev: true /ipaddr.js@1.9.1: @@ -10043,16 +10293,16 @@ packages: resolution: {integrity: sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - has-tostringtag: 1.0.0 + call-bind: 1.0.7 + has-tostringtag: 1.0.2 dev: true - /is-array-buffer@3.0.2: - resolution: {integrity: sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==} + /is-array-buffer@3.0.4: + resolution: {integrity: sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==} + engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 - is-typed-array: 1.1.12 + call-bind: 1.0.7 + get-intrinsic: 1.2.4 dev: true /is-arrayish@0.2.1: @@ -10062,7 +10312,7 @@ packages: resolution: {integrity: sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==} engines: {node: '>= 0.4'} dependencies: - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 dev: true /is-bigint@1.0.4: @@ -10082,8 +10332,8 @@ packages: resolution: {integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - has-tostringtag: 1.0.0 + call-bind: 1.0.7 + has-tostringtag: 1.0.2 dev: true /is-callable@1.2.7: @@ -10094,13 +10344,13 @@ packages: /is-core-module@2.13.1: resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==} dependencies: - hasown: 2.0.0 + hasown: 2.0.1 /is-date-object@1.0.5: resolution: {integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==} engines: {node: '>= 0.4'} dependencies: - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 dev: true /is-deflate@1.0.0: @@ -10121,7 +10371,7 @@ packages: /is-finalizationregistry@1.0.2: resolution: {integrity: sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 dev: true /is-fullwidth-code-point@3.0.0: @@ -10133,7 +10383,7 @@ packages: resolution: {integrity: sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==} engines: {node: '>= 0.4'} dependencies: - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 dev: true /is-glob@4.0.3: @@ -10153,6 +10403,11 @@ packages: engines: {node: '>=8'} dev: true + /is-iterable@1.1.1: + resolution: {integrity: sha512-EdOZCr0NsGE00Pot+x1ZFx9MJK3C6wy91geZpXwvwexDLJvA4nzYyZf7r+EIwSeVsOLDdBz7ATg9NqKTzuNYuQ==} + engines: {node: '>= 4'} + dev: true + /is-map@2.0.2: resolution: {integrity: sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==} dev: true @@ -10161,12 +10416,12 @@ packages: resolution: {integrity: sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 dev: true - /is-negative-zero@2.0.2: - resolution: {integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==} + /is-negative-zero@2.0.3: + resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} engines: {node: '>= 0.4'} dev: true @@ -10174,7 +10429,12 @@ packages: resolution: {integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==} engines: {node: '>= 0.4'} dependencies: - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 + dev: true + + /is-number@4.0.0: + resolution: {integrity: sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==} + engines: {node: '>=0.10.0'} dev: true /is-number@7.0.0: @@ -10182,11 +10442,6 @@ packages: engines: {node: '>=0.12.0'} dev: true - /is-obj@1.0.1: - resolution: {integrity: sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==} - engines: {node: '>=0.10.0'} - dev: true - /is-path-cwd@2.2.0: resolution: {integrity: sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==} engines: {node: '>=6'} @@ -10213,27 +10468,19 @@ packages: resolution: {integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - has-tostringtag: 1.0.0 - dev: true - - /is-regexp@1.0.0: - resolution: {integrity: sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==} - engines: {node: '>=0.10.0'} - dev: true - - /is-relative-path@1.0.2: - resolution: {integrity: sha512-i1h+y50g+0hRbBD+dbnInl3JlJ702aar58snAeX+MxBAPvzXGej7sYoPMhlnykabt0ZzCJNBEyzMlekuQZN7fA==} + call-bind: 1.0.7 + has-tostringtag: 1.0.2 dev: true /is-set@2.0.2: resolution: {integrity: sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==} dev: true - /is-shared-array-buffer@1.0.2: - resolution: {integrity: sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==} + /is-shared-array-buffer@1.0.3: + resolution: {integrity: sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==} + engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 dev: true /is-stream@2.0.1: @@ -10250,7 +10497,7 @@ packages: resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} engines: {node: '>= 0.4'} dependencies: - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 dev: true /is-symbol@1.0.4: @@ -10260,11 +10507,11 @@ packages: has-symbols: 1.0.3 dev: true - /is-typed-array@1.1.12: - resolution: {integrity: sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==} + /is-typed-array@1.1.13: + resolution: {integrity: sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==} engines: {node: '>= 0.4'} dependencies: - which-typed-array: 1.1.13 + which-typed-array: 1.1.14 dev: true /is-unicode-supported@0.1.0: @@ -10272,15 +10519,6 @@ packages: engines: {node: '>=10'} dev: true - /is-url-superb@4.0.0: - resolution: {integrity: sha512-GI+WjezhPPcbM+tqE9LnmsY5qqjwHzTvjJ36wxYX5ujNXefSUJ/T17r5bqDV8yLhcgB59KTPNOc9O9cmHTPWsA==} - engines: {node: '>=10'} - dev: true - - /is-url@1.2.4: - resolution: {integrity: sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==} - dev: true - /is-weakmap@2.0.1: resolution: {integrity: sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==} dev: true @@ -10288,14 +10526,14 @@ packages: /is-weakref@1.0.2: resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 dev: true /is-weakset@2.0.2: resolution: {integrity: sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 + call-bind: 1.0.7 + get-intrinsic: 1.2.4 dev: true /is-wsl@2.2.0: @@ -10317,6 +10555,11 @@ packages: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} dev: true + /isexe@3.1.1: + resolution: {integrity: sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==} + engines: {node: '>=16'} + dev: true + /isobject@3.0.1: resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} engines: {node: '>=0.10.0'} @@ -10331,8 +10574,8 @@ packages: resolution: {integrity: sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==} engines: {node: '>=8'} dependencies: - '@babel/core': 7.23.7 - '@babel/parser': 7.23.6 + '@babel/core': 7.23.9 + '@babel/parser': 7.23.9 '@istanbuljs/schema': 0.1.3 istanbul-lib-coverage: 3.2.2 semver: 6.3.1 @@ -10340,14 +10583,19 @@ packages: - supports-color dev: true + /iterable-lookahead@1.0.0: + resolution: {integrity: sha512-hJnEP2Xk4+44DDwJqUQGdXal5VbyeWLaPyDl2AQc242Zr7iqz4DgpQOrEzglWVMGHMDCkguLHEKxd1+rOsmgSQ==} + engines: {node: '>=4'} + dev: true + /iterator.prototype@1.1.2: resolution: {integrity: sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==} dependencies: define-properties: 1.2.1 - get-intrinsic: 1.2.2 + get-intrinsic: 1.2.4 has-symbols: 1.0.3 - reflect.getprototypeof: 1.0.4 - set-function-name: 2.0.1 + reflect.getprototypeof: 1.0.5 + set-function-name: 2.0.2 dev: true /its-fine@1.1.1(react@18.2.0): @@ -10385,7 +10633,7 @@ packages: dependencies: '@jest/types': 29.6.3 '@types/graceful-fs': 4.1.9 - '@types/node': 20.11.5 + '@types/node': 20.11.20 anymatch: 3.1.3 fb-watchman: 2.0.2 graceful-fs: 4.2.11 @@ -10403,7 +10651,7 @@ packages: engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} dependencies: '@jest/types': 27.5.1 - '@types/node': 20.11.5 + '@types/node': 20.11.20 dev: true /jest-regex-util@29.6.3: @@ -10416,7 +10664,7 @@ packages: engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} dependencies: '@jest/types': 29.6.3 - '@types/node': 20.11.5 + '@types/node': 20.11.20 chalk: 4.1.2 ci-info: 3.9.0 graceful-fs: 4.2.11 @@ -10427,12 +10675,17 @@ packages: resolution: {integrity: sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} dependencies: - '@types/node': 20.11.5 + '@types/node': 20.11.20 jest-util: 29.7.0 merge-stream: 2.0.0 supports-color: 8.1.1 dev: true + /jiti@1.21.0: + resolution: {integrity: sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==} + hasBin: true + dev: true + /jju@1.4.0: resolution: {integrity: sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==} dev: true @@ -10444,6 +10697,10 @@ packages: /js-tokens@4.0.0: resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + /js-tokens@8.0.3: + resolution: {integrity: sha512-UfJMcSJc+SEXEl9lH/VLHSZbThQyLpw1vLO1Lb+j4RWDvG3N2f7yj3PVQA3cmkTBNldJ9eFnM+xEXxHIXrYiJw==} + dev: true + /js-yaml@3.14.1: resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} hasBin: true @@ -10459,8 +10716,8 @@ packages: argparse: 2.0.1 dev: true - /jscodeshift@0.15.1(@babel/preset-env@7.23.8): - resolution: {integrity: sha512-hIJfxUy8Rt4HkJn/zZPU9ChKfKZM1342waJ1QC2e2YsPcWhM+3BJ4dcfQCzArTrk1jJeNLB341H+qOcEHRxJZg==} + /jscodeshift@0.15.2(@babel/preset-env@7.23.9): + resolution: {integrity: sha512-FquR7Okgmc4Sd0aEDwqho3rEiKR3BdvuG9jfdHjLJ6JQoWSMpavug3AoIfnfWhxFlf+5pzQh8qjqz0DWFrNQzA==} hasBin: true peerDependencies: '@babel/preset-env': ^7.1.6 @@ -10468,20 +10725,20 @@ packages: '@babel/preset-env': optional: true dependencies: - '@babel/core': 7.23.7 - '@babel/parser': 7.23.6 - '@babel/plugin-transform-class-properties': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.7) - '@babel/plugin-transform-nullish-coalescing-operator': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-optional-chaining': 7.23.4(@babel/core@7.23.7) - '@babel/plugin-transform-private-methods': 7.23.3(@babel/core@7.23.7) - '@babel/preset-env': 7.23.8(@babel/core@7.23.7) - '@babel/preset-flow': 7.23.3(@babel/core@7.23.7) - '@babel/preset-typescript': 7.23.3(@babel/core@7.23.7) - '@babel/register': 7.23.7(@babel/core@7.23.7) - babel-core: 7.0.0-bridge.0(@babel/core@7.23.7) + '@babel/core': 7.23.9 + '@babel/parser': 7.23.9 + '@babel/plugin-transform-class-properties': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-modules-commonjs': 7.23.3(@babel/core@7.23.9) + '@babel/plugin-transform-nullish-coalescing-operator': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-optional-chaining': 7.23.4(@babel/core@7.23.9) + '@babel/plugin-transform-private-methods': 7.23.3(@babel/core@7.23.9) + '@babel/preset-env': 7.23.9(@babel/core@7.23.9) + '@babel/preset-flow': 7.23.3(@babel/core@7.23.9) + '@babel/preset-typescript': 7.23.3(@babel/core@7.23.9) + '@babel/register': 7.23.7(@babel/core@7.23.9) + babel-core: 7.0.0-bridge.0(@babel/core@7.23.9) chalk: 4.1.2 - flow-parser: 0.227.0 + flow-parser: 0.229.2 graceful-fs: 4.2.11 micromatch: 4.0.5 neo-async: 2.6.2 @@ -10511,6 +10768,11 @@ packages: /json-parse-even-better-errors@2.3.1: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + /json-parse-even-better-errors@3.0.1: + resolution: {integrity: sha512-aatBvbL26wVUCLmbWdCpeu9iF5wOyWpagiKkInA+kfws3sWdBrTnsvN2CKcyCYyUrc7rebNBlK6+kteg7ksecg==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dev: true + /json-schema-traverse@0.4.1: resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} dev: true @@ -10519,6 +10781,10 @@ packages: resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} dev: true + /json-stringify-safe@5.0.1: + resolution: {integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==} + dev: true + /json5@1.0.2: resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==} hasBin: true @@ -10532,6 +10798,10 @@ packages: hasBin: true dev: true + /jsonc-parser@3.2.1: + resolution: {integrity: sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==} + dev: true + /jsondiffpatch@0.6.0: resolution: {integrity: sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==} engines: {node: ^18.0.0 || >=20.0.0} @@ -10587,12 +10857,50 @@ packages: engines: {node: '>= 8'} dev: false + /knip@5.0.2(@types/node@20.11.20)(typescript@5.3.3): + resolution: {integrity: sha512-ylmXi/CaR1rjOl8KqLsei7075cCsHttMNFwoYlBM7WMvd2Rd4oOfic9KqJFkDONXedsEsiiwnI3+u6GT6KOUuw==} + engines: {node: '>=18.6.0'} + hasBin: true + peerDependencies: + '@types/node': '>=18' + typescript: '>=5.0.4' + dependencies: + '@ericcornelissen/bash-parser': 0.5.2 + '@nodelib/fs.walk': 2.0.0 + '@npmcli/map-workspaces': 3.0.4 + '@npmcli/package-json': 5.0.0 + '@pnpm/logger': 5.0.0 + '@pnpm/workspace.pkgs-graph': 2.0.15(@pnpm/logger@5.0.0) + '@snyk/github-codeowners': 1.1.0 + '@types/node': 20.11.20 + '@types/picomatch': 2.3.3 + easy-table: 1.2.0 + fast-glob: 3.3.2 + jiti: 1.21.0 + js-yaml: 4.1.0 + micromatch: 4.0.5 + minimist: 1.2.8 + picocolors: 1.0.0 + picomatch: 4.0.1 + pretty-ms: 9.0.0 + semver: 7.6.0 + smol-toml: 1.1.4 + strip-json-comments: 5.0.1 + summary: 2.1.0 + typescript: 5.3.3 + zod: 3.22.4 + zod-validation-error: 3.0.2(zod@3.22.4) + transitivePeerDependencies: + - bluebird + - domexception + dev: true + /kolorist@1.8.0: resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==} dev: true - /konva@9.3.1: - resolution: {integrity: sha512-KXHJVUrYVWFIJUbnlw8QUZDBGC1jx6wwRsGaByPm/2yk78xw7hKquCMNEd9EtVqGz/jUkKFJAWom77TLB+zVOA==} + /konva@9.3.3: + resolution: {integrity: sha512-cg/AHxnfawZ1rKxygCnzx0TZY7hQiQiAKgAHPinEwMn49MVrBkeKLj2d0EaleoFG/0y0XhEKTD0dFZiPPdWlCQ==} dev: false /lazy-universal-dotenv@4.0.0: @@ -10600,7 +10908,7 @@ packages: engines: {node: '>=14.0.0'} dependencies: app-root-dir: 1.0.2 - dotenv: 16.3.1 + dotenv: 16.4.5 dotenv-expand: 10.0.0 dev: true @@ -10628,11 +10936,29 @@ packages: ts-error: 1.0.6 dev: false + /load-json-file@6.2.0: + resolution: {integrity: sha512-gUD/epcRms75Cw8RT1pUdHugZYM5ce64ucs2GEISABwkRsOQr0q2wm/MV2TKThycIe5e0ytRweW2RZxclogCdQ==} + engines: {node: '>=8'} + dependencies: + graceful-fs: 4.2.11 + parse-json: 5.2.0 + strip-bom: 4.0.0 + type-fest: 0.6.0 + dev: true + /load-tsconfig@0.2.5: resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} dev: true + /local-pkg@0.5.0: + resolution: {integrity: sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==} + engines: {node: '>=14'} + dependencies: + mlly: 1.6.1 + pkg-types: 1.0.3 + dev: true + /locate-path@3.0.0: resolution: {integrity: sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==} engines: {node: '>=6'} @@ -10659,6 +10985,10 @@ packages: resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} dev: false + /lodash.curry@4.1.1: + resolution: {integrity: sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==} + dev: true + /lodash.debounce@4.0.8: resolution: {integrity: sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==} dev: true @@ -10703,8 +11033,8 @@ packages: get-func-name: 2.0.2 dev: true - /lru-cache@10.1.0: - resolution: {integrity: sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==} + /lru-cache@10.2.0: + resolution: {integrity: sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==} engines: {node: 14 || >=16.14} dev: true @@ -10721,46 +11051,10 @@ packages: yallist: 4.0.0 dev: true - /lz-string@1.5.0: - resolution: {integrity: sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==} - hasBin: true - dev: true - - /madge@6.1.0(typescript@5.3.3): - resolution: {integrity: sha512-irWhT5RpFOc6lkzGHKLihonCVgM0YtfNUh4IrFeW3EqHpnt/JHUG3z26j8PeJEktCGB4tmGOOOJi1Rl/ACWucQ==} - engines: {node: '>=14'} - hasBin: true - peerDependencies: - typescript: ^3.9.5 || ^4.9.5 || ^5 - peerDependenciesMeta: - typescript: - optional: true + /magic-string@0.16.0: + resolution: {integrity: sha512-c4BEos3y6G2qO0B9X7K0FVLOPT9uGrjYwYRLFmDqyl5YMboUviyecnXWp94fJTSMwPw2/sf+CEYt5AGpmklkkQ==} dependencies: - chalk: 4.1.2 - commander: 7.2.0 - commondir: 1.0.1 - debug: 4.3.4 - dependency-tree: 9.0.0 - detective-amd: 4.2.0 - detective-cjs: 4.1.0 - detective-es6: 3.0.1 - detective-less: 1.0.2 - detective-postcss: 6.1.3 - detective-sass: 4.1.3 - detective-scss: 3.1.1 - detective-stylus: 2.0.1 - detective-typescript: 9.1.1 - ora: 5.4.1 - pluralize: 8.0.0 - precinct: 8.3.1 - pretty-ms: 7.0.1 - rc: 1.2.8 - stream-to-array: 2.3.0 - ts-graphviz: 1.8.1 - typescript: 5.3.3 - walkdir: 0.4.1 - transitivePeerDependencies: - - supports-color + vlq: 0.2.3 dev: true /magic-string@0.27.0: @@ -10770,8 +11064,8 @@ packages: '@jridgewell/sourcemap-codec': 1.4.15 dev: true - /magic-string@0.30.5: - resolution: {integrity: sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==} + /magic-string@0.30.7: + resolution: {integrity: sha512-8vBuFF/I/+OSLRmdf2wwFCJCz+nSn0m6DPvGH1fS/KiQoSaR+sETbov0eIk9KhEKy8CYqIkIAnbohxT/4H0kuA==} engines: {node: '>=12'} dependencies: '@jridgewell/sourcemap-codec': 1.4.15 @@ -10798,12 +11092,24 @@ packages: tmpl: 1.0.5 dev: true + /map-age-cleaner@0.1.3: + resolution: {integrity: sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==} + engines: {node: '>=6'} + dependencies: + p-defer: 1.0.0 + dev: true + + /map-obj@2.0.0: + resolution: {integrity: sha512-TzQSV2DiMYgoF5RycneKVUzIa9bQsj/B3tTgsE3dOGqlzHnGIDaC7XBE7grnA+8kZPnfqSGFe95VHc2oc0VFUQ==} + engines: {node: '>=4'} + dev: true + /map-or-similar@1.5.0: resolution: {integrity: sha512-0aF7ZmVon1igznGI4VS30yugpduQW3y3GkcgGJOp7d8x8QrizhigUxjI/m2UojsXXto+jLAH3KSz+xOJTiORjg==} dev: true - /markdown-to-jsx@7.4.0(react@18.2.0): - resolution: {integrity: sha512-zilc+MIkVVXPyTb4iIUTIz9yyqfcWjszGXnwF9K/aiBWcHXFcmdEMTkG01/oQhwSCH7SY1BnG6+ev5BzWmbPrg==} + /markdown-to-jsx@7.4.1(react@18.2.0): + resolution: {integrity: sha512-GbrbkTnHp9u6+HqbPRFJbObi369AgJNXi/sGqq5HRsoZW063xR1XDCaConqq+whfEIAlzB1YPnOgsPc7B7bc/A==} engines: {node: '>= 10'} peerDependencies: react: '>= 0.14.0' @@ -10830,6 +11136,22 @@ packages: engines: {node: '>= 0.6'} dev: true + /mem@6.1.1: + resolution: {integrity: sha512-Ci6bIfq/UgcxPTYa8dQQ5FY3BzKkT894bwXWXxC/zqs0XgMO2cT20CGkOqda7gZNkmK5VP4x89IGZ6K7hfbn3Q==} + engines: {node: '>=8'} + dependencies: + map-age-cleaner: 0.1.3 + mimic-fn: 3.1.0 + dev: true + + /mem@8.1.1: + resolution: {integrity: sha512-qFCFUDs7U3b8mBDPyz5EToEKoAkgCzqquIgi9nkkR9bixxOVOre+09lbuH7+9Kn2NFpm56M3GUWVbU2hQgdACA==} + engines: {node: '>=10'} + dependencies: + map-age-cleaner: 0.1.3 + mimic-fn: 3.1.0 + dev: true + /memoize-one@6.0.0: resolution: {integrity: sha512-rkpe71W0N0c0Xz6QD0eJETuWAJGnJ9afsl1srmwPrI+yBCkge5EycXXbYRyvL29zZVUWQCY7InPRCv3GDXuZNw==} dev: false @@ -10895,6 +11217,11 @@ packages: engines: {node: '>=6'} dev: true + /mimic-fn@3.1.0: + resolution: {integrity: sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ==} + engines: {node: '>=8'} + dev: true + /mimic-fn@4.0.0: resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} engines: {node: '>=12'} @@ -10971,36 +11298,13 @@ packages: hasBin: true dev: true - /module-definition@3.4.0: - resolution: {integrity: sha512-XxJ88R1v458pifaSkPNLUTdSPNVGMP2SXVncVmApGO+gAfrLANiYe6JofymCzVceGOMwQE2xogxBSc8uB7XegA==} - engines: {node: '>=6.0'} - hasBin: true + /mlly@1.6.1: + resolution: {integrity: sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==} dependencies: - ast-module-types: 3.0.0 - node-source-walk: 4.3.0 - dev: true - - /module-definition@4.1.0: - resolution: {integrity: sha512-rHXi/DpMcD2qcKbPCTklDbX9lBKJrUSl971TW5l6nMpqKCIlzJqmQ8cfEF5M923h2OOLHPDVlh5pJxNyV+AJlw==} - engines: {node: '>=12'} - hasBin: true - dependencies: - ast-module-types: 4.0.0 - node-source-walk: 5.0.2 - dev: true - - /module-lookup-amd@7.0.1: - resolution: {integrity: sha512-w9mCNlj0S8qviuHzpakaLVc+/7q50jl9a/kmJ/n8bmXQZgDPkQHnPBb8MUOYh3WpAYkXuNc2c+khsozhIp/amQ==} - engines: {node: '>=10.13.0'} - hasBin: true - dependencies: - commander: 2.20.3 - debug: 4.3.4 - glob: 7.2.3 - requirejs: 2.3.6 - requirejs-config-file: 4.0.0 - transitivePeerDependencies: - - supports-color + acorn: 8.11.3 + pathe: 1.1.2 + pkg-types: 1.0.3 + ufo: 1.4.0 dev: true /moo@0.5.2: @@ -11046,6 +11350,11 @@ packages: hasBin: true dev: true + /nanostores@0.10.0: + resolution: {integrity: sha512-Poy5+9wFXOD0jAstn4kv9n686U2BFw48z/W8lms8cS8lcbRz7BU20JxZ3e/kkKQVfRrkm4yLWCUA6GQINdvJCQ==} + engines: {node: ^18.0.0 || >=20.0.0} + dev: false + /nanostores@0.9.5: resolution: {integrity: sha512-Z+p+g8E7yzaWwOe5gEUB2Ox0rCEeXWYIZWmYvw/ajNYX8DlXdMvMDj8DWfM/subqPAcsf8l8Td4iAwO1DeIIRQ==} engines: {node: ^16.0.0 || ^18.0.0 || >=20.0.0} @@ -11055,6 +11364,18 @@ packages: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} dev: true + /ndjson@2.0.0: + resolution: {integrity: sha512-nGl7LRGrzugTtaFcJMhLbpzJM6XdivmbkdlaGcrk/LXg2KL/YBC6z1g70xh0/al+oFuVFP8N8kiWRucmeEH/qQ==} + engines: {node: '>=10'} + hasBin: true + dependencies: + json-stringify-safe: 5.0.1 + minimist: 1.2.8 + readable-stream: 3.6.2 + split2: 3.2.2 + through2: 4.0.2 + dev: true + /nearley@2.20.1: resolution: {integrity: sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ==} hasBin: true @@ -11086,8 +11407,8 @@ packages: minimatch: 3.1.2 dev: true - /node-fetch-native@1.6.1: - resolution: {integrity: sha512-bW9T/uJDPAJB2YNYEpWzE54U5O3MQidXsOyTfnbKYtTtFexRvGzb1waphBN4ZwP6EcIvYYEOwW0b72BpAqydTw==} + /node-fetch-native@1.6.2: + resolution: {integrity: sha512-69mtXOFZ6hSkYiXAVB5SqaRvrbITC/NPyqv7yuu/qw0nmgPyYbIMYYNIDhNtwPrzk0ptrimrLz/hhjvm4w5Z+w==} dev: true /node-fetch@2.7.0: @@ -11101,6 +11422,16 @@ packages: dependencies: whatwg-url: 5.0.0 + /node-fetch@3.0.0-beta.9: + resolution: {integrity: sha512-RdbZCEynH2tH46+tj0ua9caUHVWrd/RHnRfvly2EVdqGmI3ndS1Vn/xjm5KuGejDt2RNDQsVRLPNd2QPwcewVg==} + engines: {node: ^10.17 || >=12.3} + dependencies: + data-uri-to-buffer: 3.0.1 + fetch-blob: 2.1.2 + transitivePeerDependencies: + - domexception + dev: true + /node-int64@0.4.0: resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} dev: true @@ -11109,20 +11440,6 @@ packages: resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==} dev: true - /node-source-walk@4.3.0: - resolution: {integrity: sha512-8Q1hXew6ETzqKRAs3jjLioSxNfT1cx74ooiF8RlAONwVMcfq+UdzLC2eB5qcPldUxaE5w3ytLkrmV1TGddhZTA==} - engines: {node: '>=6.0'} - dependencies: - '@babel/parser': 7.23.6 - dev: true - - /node-source-walk@5.0.2: - resolution: {integrity: sha512-Y4jr/8SRS5hzEdZ7SGuvZGwfORvNsSsNRwDXx5WisiqzsVfeftDvRgfeqWNgZvWSJbgubTRVRYBzK6UO+ErqjA==} - engines: {node: '>=12'} - dependencies: - '@babel/parser': 7.23.6 - dev: true - /normalize-package-data@2.5.0: resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} dependencies: @@ -11132,11 +11449,53 @@ packages: validate-npm-package-license: 3.0.4 dev: true + /normalize-package-data@6.0.0: + resolution: {integrity: sha512-UL7ELRVxYBHBgYEtZCXjxuD5vPxnmvMGq0jp/dGPKKrN7tfsBh2IY7TlJ15WWwdjRWD3RJbnsygUurTK3xkPkg==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + hosted-git-info: 7.0.1 + is-core-module: 2.13.1 + semver: 7.6.0 + validate-npm-package-license: 3.0.4 + dev: true + /normalize-path@3.0.0: resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} engines: {node: '>=0.10.0'} dev: true + /npm-install-checks@6.3.0: + resolution: {integrity: sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dependencies: + semver: 7.6.0 + dev: true + + /npm-normalize-package-bin@3.0.1: + resolution: {integrity: sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dev: true + + /npm-package-arg@11.0.1: + resolution: {integrity: sha512-M7s1BD4NxdAvBKUPqqRW957Xwcl/4Zvo8Aj+ANrzvIPzGJZElrH7Z//rSaec2ORcND6FHHLnZeY8qgTpXDMFQQ==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + hosted-git-info: 7.0.1 + proc-log: 3.0.0 + semver: 7.6.0 + validate-npm-package-name: 5.0.0 + dev: true + + /npm-pick-manifest@9.0.0: + resolution: {integrity: sha512-VfvRSs/b6n9ol4Qb+bDwNGUXutpy76x6MARw/XssevE0TnctIKcmklJZM5Z7nqs5z5aW+0S63pgCNbpkUNNXBg==} + engines: {node: ^16.14.0 || >=18.0.0} + dependencies: + npm-install-checks: 6.3.0 + npm-normalize-package-bin: 3.0.1 + npm-package-arg: 11.0.1 + semver: 7.6.0 + dev: true + /npm-run-path@4.0.1: resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} engines: {node: '>=8'} @@ -11144,22 +11503,22 @@ packages: path-key: 3.1.1 dev: true - /npm-run-path@5.2.0: - resolution: {integrity: sha512-W4/tgAXFqFA0iL7fk0+uQ3g7wkL8xJmx3XdK0VGb4cHW//eZTtKGvFBBoRKVTpY7n6ze4NL9ly7rgXcHufqXKg==} + /npm-run-path@5.3.0: + resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} dependencies: path-key: 4.0.0 dev: true - /nypm@0.3.4: - resolution: {integrity: sha512-1JLkp/zHBrkS3pZ692IqOaIKSYHmQXgqfELk6YTOfVBnwealAmPA1q2kKK7PHJAHSMBozerThEFZXP3G6o7Ukg==} + /nypm@0.3.6: + resolution: {integrity: sha512-2CATJh3pd6CyNfU5VZM7qSwFu0ieyabkEdnogE30Obn1czrmOYiZ8DOZLe1yBdLKWoyD3Mcy2maUs+0MR3yVjQ==} engines: {node: ^14.16.0 || >=16.10.0} hasBin: true dependencies: - citty: 0.1.5 + citty: 0.1.6 execa: 8.0.1 pathe: 1.1.2 - ufo: 1.3.2 + ufo: 1.4.0 dev: true /object-assign@4.1.1: @@ -11174,7 +11533,7 @@ packages: resolution: {integrity: sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 dev: true @@ -11182,11 +11541,20 @@ packages: resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} engines: {node: '>= 0.4'} + /object-pairs@0.1.0: + resolution: {integrity: sha512-3ECr6K831I4xX/Mduxr9UC+HPOz/d6WKKYj9p4cmC8Lg8p7g8gitzsxNX5IWlSIgFWN/a4JgrJaoAMKn20oKwA==} + dev: true + + /object-values@1.0.0: + resolution: {integrity: sha512-+8hwcz/JnQ9EpLIXzN0Rs7DLsBpJNT/xYehtB/jU93tHYr5BFEO8E+JGQNOSqE7opVzz5cGksKFHt7uUJVLSjQ==} + engines: {node: '>=0.10.0'} + dev: true + /object.assign@4.1.5: resolution: {integrity: sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 has-symbols: 1.0.3 object-keys: 1.1.1 @@ -11196,43 +11564,44 @@ packages: resolution: {integrity: sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true /object.fromentries@2.0.7: resolution: {integrity: sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true - /object.groupby@1.0.1: - resolution: {integrity: sha512-HqaQtqLnp/8Bn4GL16cj+CUYbnpe1bh0TtEaWvybszDG4tgxCJuRpV8VGuvNaI1fAnI4lUJzDG55MXcOH4JZcQ==} + /object.groupby@1.0.2: + resolution: {integrity: sha512-bzBq58S+x+uo0VjurFT0UktpKHOZmv4/xePiOA1nbB9pMqpGK7rUPNgf+1YC+7mE+0HzhTMqNUuCqvKhj6FnBw==} dependencies: - call-bind: 1.0.5 + array.prototype.filter: 1.0.3 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 - get-intrinsic: 1.2.2 + es-abstract: 1.22.4 + es-errors: 1.3.0 dev: true /object.hasown@1.1.3: resolution: {integrity: sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA==} dependencies: define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true /object.values@1.1.7: resolution: {integrity: sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true /ohash@1.1.3: @@ -11284,15 +11653,15 @@ packages: resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==} dev: true - /openapi-typescript@6.7.3: - resolution: {integrity: sha512-es3mGcDXV6TKPo6n3aohzHm0qxhLyR39MhF6mkD1FwFGjhxnqMqfSIgM0eCpInZvqatve4CxmXcMZw3jnnsaXw==} + /openapi-typescript@6.7.4: + resolution: {integrity: sha512-EZyeW9Wy7UDCKv0iYmKrq2pVZtquXiD/YHiUClAKqiMi42nodx/EQH11K6fLqjt1IZlJmVokrAsExsBMM2RROQ==} hasBin: true dependencies: ansi-colors: 4.1.3 fast-glob: 3.3.2 js-yaml: 4.1.0 supports-color: 9.4.0 - undici: 5.28.2 + undici: 5.28.3 yargs-parser: 21.1.1 dev: true @@ -11323,33 +11692,24 @@ packages: wcwidth: 1.0.1 dev: true - /overlayscrollbars-react@0.5.3(overlayscrollbars@2.4.6)(react@18.2.0): - resolution: {integrity: sha512-mq9D9tbfSeq0cti1kKMf3B3AzsEGwHcRIDX/K49CvYkHz/tKeU38GiahDkIPKTMEAp6lzKCo4x1eJZA6ZFYOxQ==} - peerDependencies: - overlayscrollbars: ^2.0.0 - react: '>=16.8.0' - dependencies: - overlayscrollbars: 2.4.6 - react: 18.2.0 - dev: false - - /overlayscrollbars-react@0.5.4(overlayscrollbars@2.4.7)(react@18.2.0): + /overlayscrollbars-react@0.5.4(overlayscrollbars@2.5.0)(react@18.2.0): resolution: {integrity: sha512-FPKx9XnXovTnI4+2JXig5uEaTLSEJ6svOwPzIfBBXTHBRNsz2+WhYUmfM0K/BNYxjgDEwuPm+NQhEoOA0RoG1g==} peerDependencies: overlayscrollbars: ^2.0.0 react: '>=16.8.0' dependencies: - overlayscrollbars: 2.4.7 + overlayscrollbars: 2.5.0 react: 18.2.0 dev: false - /overlayscrollbars@2.4.6: - resolution: {integrity: sha512-C7tmhetwMv9frEvIT/RfkAVEgbjRNz/Gh2zE8BVmN+jl35GRaAnz73rlGQCMRoC2arpACAXyMNnJkzHb7GBrcA==} + /overlayscrollbars@2.5.0: + resolution: {integrity: sha512-CWVC2dwS07XZfLHDm5GmZN1iYggiJ8Vufnvzwt0gwR9Yz1hVckKeTxg7VILZeYVGhDYJHZ1Xc8Xfys5dWZ1qiA==} dev: false - /overlayscrollbars@2.4.7: - resolution: {integrity: sha512-02X2/nHno35dzebCx+EO2tRDaKAOltZqUKdUqvq3Pt8htCuhJbYi+mjr0CYerVeGRRoZ2Uo6/8XrNg//DJJ+GA==} - dev: false + /p-defer@1.0.0: + resolution: {integrity: sha512-wB3wfAxZpk2AzOfUMJNL+d36xothRSyj8EXOa4f6GMqYDN9BJaaSISbsk+wS9abmnebVw95C2Kb5t85UmpCxuw==} + engines: {node: '>=4'} + dev: true /p-limit@2.3.0: resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} @@ -11365,6 +11725,13 @@ packages: yocto-queue: 0.1.0 dev: true + /p-limit@5.0.0: + resolution: {integrity: sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==} + engines: {node: '>=18'} + dependencies: + yocto-queue: 1.0.0 + dev: true + /p-locate@3.0.0: resolution: {integrity: sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==} engines: {node: '>=6'} @@ -11393,6 +11760,14 @@ packages: aggregate-error: 3.1.0 dev: true + /p-memoize@4.0.1: + resolution: {integrity: sha512-km0sP12uE0dOZ5qP+s7kGVf07QngxyG0gS8sYFvFWhqlgzOsSy+m71aUejf/0akxj5W7gE//2G74qTv6b4iMog==} + engines: {node: '>=10'} + dependencies: + mem: 6.1.1 + mimic-fn: 3.1.0 + dev: true + /p-try@2.2.0: resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} engines: {node: '>=6'} @@ -11417,9 +11792,16 @@ packages: json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 - /parse-ms@2.1.0: - resolution: {integrity: sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==} - engines: {node: '>=6'} + /parse-ms@4.0.0: + resolution: {integrity: sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==} + engines: {node: '>=18'} + dev: true + + /parse-npm-tarball-url@3.0.0: + resolution: {integrity: sha512-InpdgIdNe5xWMEUcrVQUniQKwnggBtJ7+SCwh7zQAZwbbIYZV9XdgJyhtmDSSvykFyQXoe4BINnzKTfCwWLs5g==} + engines: {node: '>=8.15'} + dependencies: + semver: 6.3.1 dev: true /parseurl@1.3.3: @@ -11463,10 +11845,17 @@ packages: resolution: {integrity: sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==} engines: {node: '>=16 || 14 >=14.17'} dependencies: - lru-cache: 10.1.0 + lru-cache: 10.2.0 minipass: 7.0.4 dev: true + /path-temp@2.1.0: + resolution: {integrity: sha512-cMMJTAZlion/RWRRC48UbrDymEIt+/YSD/l8NqjneyDw2rDOBQcP5yRkMB4CYGn47KMhZvbblBP7Z79OsMw72w==} + engines: {node: '>=8.15'} + dependencies: + unique-string: 2.0.0 + dev: true + /path-to-regexp@0.1.7: resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==} dev: true @@ -11504,6 +11893,11 @@ packages: engines: {node: '>=8.6'} dev: true + /picomatch@4.0.1: + resolution: {integrity: sha512-xUXwsxNjwTQ8K3GnT4pCJm+xq3RUPQbmkYJTP5aFIfNIvbcc/4MUxgBaaRSZJ6yGJZiGSyYlM6MzwTsRk8SYCg==} + engines: {node: '>=12'} + dev: true + /pify@4.0.1: resolution: {integrity: sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==} engines: {node: '>=6'} @@ -11535,41 +11929,28 @@ packages: find-up: 5.0.0 dev: true - /pluralize@8.0.0: - resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==} - engines: {node: '>=4'} + /pkg-types@1.0.3: + resolution: {integrity: sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==} + dependencies: + jsonc-parser: 3.2.1 + mlly: 1.6.1 + pathe: 1.1.2 dev: true - /polished@4.2.2: - resolution: {integrity: sha512-Sz2Lkdxz6F2Pgnpi9U5Ng/WdWAUZxmHrNPoVlm3aAemxoy2Qy7LGjQg4uf8qKelDAUW94F4np3iH2YPf2qefcQ==} + /polished@4.3.1: + resolution: {integrity: sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==} engines: {node: '>=10'} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 dev: true - /postcss-values-parser@2.0.1: - resolution: {integrity: sha512-2tLuBsA6P4rYTNKCXYG/71C7j1pU6pK503suYOmn4xYrQIzW+opD+7FAFNuGSdZC/3Qfy334QbeMu7MEb8gOxg==} - engines: {node: '>=6.14.4'} - dependencies: - flatten: 1.0.3 - indexes-of: 1.0.1 - uniq: 1.0.1 + /possible-typed-array-names@1.0.0: + resolution: {integrity: sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==} + engines: {node: '>= 0.4'} dev: true - /postcss-values-parser@6.0.2(postcss@8.4.33): - resolution: {integrity: sha512-YLJpK0N1brcNJrs9WatuJFtHaV9q5aAOj+S4DI5S7jgHlRfm0PIbDCAFRYMQD5SHq7Fy6xsDhyutgS0QOAs0qw==} - engines: {node: '>=10'} - peerDependencies: - postcss: ^8.2.9 - dependencies: - color-name: 1.1.4 - is-url-superb: 4.0.0 - postcss: 8.4.33 - quote-unquote: 1.0.0 - dev: true - - /postcss@8.4.33: - resolution: {integrity: sha512-Kkpbhhdjw2qQs2O2DGX+8m5OVqEcbB9HRBvuYM9pgrjEFUg30A9LmXNlTAUj4S9kgtGyrMbTzVjH7E+s5Re2yg==} + /postcss@8.4.35: + resolution: {integrity: sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==} engines: {node: ^10 || ^12 || >=14} dependencies: nanoid: 3.3.7 @@ -11577,49 +11958,6 @@ packages: source-map-js: 1.0.2 dev: true - /precinct@8.3.1: - resolution: {integrity: sha512-pVppfMWLp2wF68rwHqBIpPBYY8Kd12lDhk8LVQzOwqllifVR15qNFyod43YLyFpurKRZQKnE7E4pofAagDOm2Q==} - engines: {node: ^10.13 || ^12 || >=14} - hasBin: true - dependencies: - commander: 2.20.3 - debug: 4.3.4 - detective-amd: 3.1.2 - detective-cjs: 3.1.3 - detective-es6: 2.2.2 - detective-less: 1.0.2 - detective-postcss: 4.0.0 - detective-sass: 3.0.2 - detective-scss: 2.0.2 - detective-stylus: 1.0.3 - detective-typescript: 7.0.2 - module-definition: 3.4.0 - node-source-walk: 4.3.0 - transitivePeerDependencies: - - supports-color - dev: true - - /precinct@9.2.1: - resolution: {integrity: sha512-uzKHaTyiVejWW7VJtHInb9KBUq9yl9ojxXGujhjhDmPon2wgZPBKQIKR+6csGqSlUeGXAA4MEFnU6DesxZib+A==} - engines: {node: ^12.20.0 || ^14.14.0 || >=16.0.0} - hasBin: true - dependencies: - '@dependents/detective-less': 3.0.2 - commander: 9.5.0 - detective-amd: 4.2.0 - detective-cjs: 4.1.0 - detective-es6: 3.0.1 - detective-postcss: 6.1.3 - detective-sass: 4.1.3 - detective-scss: 3.1.1 - detective-stylus: 3.0.0 - detective-typescript: 9.1.1 - module-definition: 4.1.0 - node-source-walk: 5.0.2 - transitivePeerDependencies: - - supports-color - dev: true - /prelude-ls@1.2.1: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} @@ -11631,21 +11969,12 @@ packages: hasBin: true dev: true - /prettier@3.2.4: - resolution: {integrity: sha512-FWu1oLHKCrtpO1ypU6J0SbK2d9Ckwysq6bHj/uaCP26DxrPpppCLQRGVuqAxSTvhF00AcvDRyYrLNW7ocBhFFQ==} + /prettier@3.2.5: + resolution: {integrity: sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A==} engines: {node: '>=14'} hasBin: true dev: true - /pretty-format@27.5.1: - resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - dependencies: - ansi-regex: 5.0.1 - ansi-styles: 5.2.0 - react-is: 17.0.2 - dev: true - /pretty-format@29.7.0: resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -11660,11 +11989,16 @@ packages: engines: {node: '>= 0.8'} dev: true - /pretty-ms@7.0.1: - resolution: {integrity: sha512-973driJZvxiGOQ5ONsFhOF/DtzPMOMtgC11kCpUrPGMTgqp2q/1gwzCquocrN33is0VZ5GFHXZYMM9l6h67v2Q==} - engines: {node: '>=10'} + /pretty-ms@9.0.0: + resolution: {integrity: sha512-E9e9HJ9R9NasGOgPaPE8VMeiPKAyWR5jcFpNnwIejslIhWqdqOrb2wShBsncMPUb+BcCd2OPYfh7p2W6oemTng==} + engines: {node: '>=18'} dependencies: - parse-ms: 2.1.0 + parse-ms: 4.0.0 + dev: true + + /proc-log@3.0.0: + resolution: {integrity: sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} dev: true /process-nextick-args@2.0.1: @@ -11681,6 +12015,23 @@ packages: engines: {node: '>=0.4.0'} dev: true + /promise-inflight@1.0.1: + resolution: {integrity: sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==} + peerDependencies: + bluebird: '*' + peerDependenciesMeta: + bluebird: + optional: true + dev: true + + /promise-retry@2.0.1: + resolution: {integrity: sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==} + engines: {node: '>=10'} + dependencies: + err-code: 2.0.3 + retry: 0.12.0 + dev: true + /prompts@2.4.2: resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} engines: {node: '>= 6'} @@ -11763,19 +12114,19 @@ packages: resolution: {integrity: sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==} engines: {node: '>=0.6'} dependencies: - side-channel: 1.0.4 + side-channel: 1.0.5 dev: true /qs@6.11.2: resolution: {integrity: sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==} engines: {node: '>=0.6'} dependencies: - side-channel: 1.0.4 + side-channel: 1.0.5 dev: true - /query-string@8.1.0: - resolution: {integrity: sha512-BFQeWxJOZxZGix7y+SByG3F36dA0AbTy9o6pSmKFcFz7DAj0re9Frkty3saBn3nHo3D0oZJ/+rx3r8H8r8Jbpw==} - engines: {node: '>=14.16'} + /query-string@9.0.0: + resolution: {integrity: sha512-4EWwcRGsO2H+yzq6ddHcVqkCQ2EFUSfDMEjF8ryp8ReymyZhIuaFRGLomeOQLkrzacMHoyky2HW0Qe30UbzkKw==} + engines: {node: '>=18'} dependencies: decode-uri-component: 0.4.1 filter-obj: 5.1.0 @@ -11786,10 +12137,6 @@ packages: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} dev: true - /quote-unquote@1.0.0: - resolution: {integrity: sha512-twwRO/ilhlG/FIgYeKGFqyHhoEhqgnKVkcmqMKi2r524gz3ZbDTcyFt38E9xjJI2vT+KbRNHVbnJ/e0I25Azwg==} - dev: true - /railroad-diagrams@1.0.0: resolution: {integrity: sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A==} dev: false @@ -11821,22 +12168,12 @@ packages: unpipe: 1.0.0 dev: true - /rc@1.2.8: - resolution: {integrity: sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==} - hasBin: true - dependencies: - deep-extend: 0.6.0 - ini: 1.3.8 - minimist: 1.2.8 - strip-json-comments: 2.0.1 - dev: true - /react-clientside-effect@1.2.6(react@18.2.0): resolution: {integrity: sha512-XGGGRQAKY+q25Lz9a/4EPqom7WRjz3z9R2k4jhVKA/puQFH/5Nt27vFZYql4m4NVNdUvX8PS3O7r/Zzm7cjUlg==} peerDependencies: react: ^15.3.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 react: 18.2.0 dev: false @@ -11861,9 +12198,9 @@ packages: resolution: {integrity: sha512-i8aF1nyKInZnANZ4uZrH49qn1paRgBZ7wZiCNBMnenlPzEv0mRl+ShpTVEI6wZNl8sSc79xZkivtgLKQArcanQ==} engines: {node: '>=16.14.0'} dependencies: - '@babel/core': 7.23.7 - '@babel/traverse': 7.23.7 - '@babel/types': 7.23.6 + '@babel/core': 7.23.9 + '@babel/traverse': 7.23.9 + '@babel/types': 7.23.9 '@types/babel__core': 7.20.5 '@types/babel__traverse': 7.20.5 '@types/doctrine': 0.0.9 @@ -11914,7 +12251,7 @@ packages: peerDependencies: react: '>=16.13.1' dependencies: - '@babel/runtime': 7.23.6 + '@babel/runtime': 7.23.9 react: 18.2.0 dev: false @@ -11922,8 +12259,8 @@ packages: resolution: {integrity: sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==} dev: false - /react-focus-lock@2.9.6(@types/react@18.2.48)(react@18.2.0): - resolution: {integrity: sha512-B7gYnCjHNrNYwY2juS71dHbf0+UpXXojt02svxybj8N5bxceAkzPChKEncHuratjUHkIFNCn06k2qj1DRlzTug==} + /react-focus-lock@2.11.1(@types/react@18.2.57)(react@18.2.0): + resolution: {integrity: sha512-IXLwnTBrLTlKTpASZXqqXJ8oymWrgAlOfuuDYN4XCuN1YJ72dwX198UCaF1QqGUk5C3QOnlMik//n3ufcfe8Ig==} peerDependencies: '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 react: ^16.8.0 || ^17.0.0 || ^18.0.0 @@ -11931,27 +12268,46 @@ packages: '@types/react': optional: true dependencies: - '@babel/runtime': 7.23.8 - '@types/react': 18.2.48 - focus-lock: 1.0.0 + '@babel/runtime': 7.23.9 + '@types/react': 18.2.57 + focus-lock: 1.3.3 prop-types: 15.8.1 react: 18.2.0 react-clientside-effect: 1.2.6(react@18.2.0) - use-callback-ref: 1.3.1(@types/react@18.2.48)(react@18.2.0) - use-sidecar: 1.1.2(@types/react@18.2.48)(react@18.2.0) + use-callback-ref: 1.3.1(@types/react@18.2.57)(react@18.2.0) + use-sidecar: 1.1.2(@types/react@18.2.57)(react@18.2.0) dev: false - /react-hook-form@7.49.3(react@18.2.0): - resolution: {integrity: sha512-foD6r3juidAT1cOZzpmD/gOKt7fRsDhXXZ0y28+Al1CHgX+AY1qIN9VSIIItXRq1dN68QrRwl1ORFlwjBaAqeQ==} - engines: {node: '>=18', pnpm: '8'} + /react-focus-lock@2.11.1(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-IXLwnTBrLTlKTpASZXqqXJ8oymWrgAlOfuuDYN4XCuN1YJ72dwX198UCaF1QqGUk5C3QOnlMik//n3ufcfe8Ig==} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@babel/runtime': 7.23.9 + '@types/react': 18.2.59 + focus-lock: 1.3.3 + prop-types: 15.8.1 + react: 18.2.0 + react-clientside-effect: 1.2.6(react@18.2.0) + use-callback-ref: 1.3.1(@types/react@18.2.59)(react@18.2.0) + use-sidecar: 1.1.2(@types/react@18.2.59)(react@18.2.0) + dev: false + + /react-hook-form@7.50.1(react@18.2.0): + resolution: {integrity: sha512-3PCY82oE0WgeOgUtIr3nYNNtNvqtJ7BZjsbxh6TnYNbXButaD5WpjOmTjdxZfheuHKR68qfeFnEDVYoSSFPMTQ==} + engines: {node: '>=12.22.0'} peerDependencies: react: ^16.8.0 || ^17 || ^18 dependencies: react: 18.2.0 dev: false - /react-hotkeys-hook@4.4.4(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-wzZmqb/Obr0ds9Myc1sIFPJ52GA/Eeg/vXBWV0HA1LvHlVAW5Va3KB0q6EZNlNSHQWscWZ2K8+6w0GYSie2o7A==} + /react-hotkeys-hook@4.5.0(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-Samb85GSgAWFQNvVt3PS90LPPGSf9mkH/r4au81ZP1yOIFayLC3QAvqTgGtJ8YEDMXtPmaVBs6NgipHO6h4Mug==} peerDependencies: react: '>=16.8.1' react-dom: '>=16.8.1' @@ -11960,8 +12316,8 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: false - /react-i18next@14.0.0(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-OCrS8rHNAmnr8ggGRDxjakzihrMW7HCbsplduTm3EuuQ6fyvWGT41ksZpqbduYoqJurBmEsEVZ1pILSUWkHZng==} + /react-i18next@14.0.5(i18next@23.10.0)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-5+bQSeEtgJrMBABBL5lO7jPdSNAbeAZ+MlFWDw//7FnVacuVu3l9EeWFzBQvZsKy+cihkbThWOAThEdH8YjGEw==} peerDependencies: i18next: '>= 23.2.3' react: '>= 16.8.0' @@ -11973,29 +12329,9 @@ packages: react-native: optional: true dependencies: - '@babel/runtime': 7.23.7 + '@babel/runtime': 7.23.9 html-parse-stringify: 3.0.1 - i18next: 23.7.16 - react: 18.2.0 - react-dom: 18.2.0(react@18.2.0) - dev: false - - /react-i18next@14.0.1(i18next@23.7.16)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-TMV8hFismBmpMdIehoFHin/okfvgjFhp723RYgIqB4XyhDobVMyukyM3Z8wtTRmajyFMZrBl/OaaXF2P6WjUAw==} - peerDependencies: - i18next: '>= 23.2.3' - react: '>= 16.8.0' - react-dom: '*' - react-native: '*' - peerDependenciesMeta: - react-dom: - optional: true - react-native: - optional: true - dependencies: - '@babel/runtime': 7.23.8 - html-parse-stringify: 3.0.1 - i18next: 23.7.16 + i18next: 23.10.0 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) dev: false @@ -12011,10 +12347,6 @@ packages: /react-is@16.13.1: resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} - /react-is@17.0.2: - resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==} - dev: true - /react-is@18.1.0: resolution: {integrity: sha512-Fl7FuabXsJnV5Q1qIOQwx/sagGF18kogb4gpfcG4gjLBWO0WDiiz1ko/ExayuxE7InyQkBLkxRFG5oxY6Uu3Kg==} dev: true @@ -12023,7 +12355,7 @@ packages: resolution: {integrity: sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==} dev: true - /react-konva@18.2.10(konva@9.3.1)(react-dom@18.2.0)(react@18.2.0): + /react-konva@18.2.10(konva@9.3.3)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-ohcX1BJINL43m4ynjZ24MxFI1syjBdrXhqVxYVDw2rKgr3yuS0x/6m1Y2Z4sl4T/gKhfreBx8KHisd0XC6OT1g==} peerDependencies: konva: ^8.0.1 || ^7.2.5 || ^9.0.0 @@ -12032,7 +12364,7 @@ packages: dependencies: '@types/react-reconciler': 0.28.8 its-fine: 1.1.1(react@18.2.0) - konva: 9.3.1 + konva: 9.3.3 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) react-reconciler: 0.29.0(react@18.2.0) @@ -12050,7 +12382,7 @@ packages: scheduler: 0.23.0 dev: false - /react-redux@9.1.0(@types/react@18.2.48)(react@18.2.0)(redux@5.0.1): + /react-redux@9.1.0(@types/react@18.2.59)(react@18.2.0)(redux@5.0.1): resolution: {integrity: sha512-6qoDzIO+gbrza8h3hjMA9aq4nwVFCKFtY2iLxCtVT38Swyy2C/dJCGBXHeHLtx6qlg/8qzc2MrhOeduf5K32wQ==} peerDependencies: '@types/react': ^18.2.25 @@ -12065,7 +12397,7 @@ packages: redux: optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 '@types/use-sync-external-store': 0.0.3 react: 18.2.0 redux: 5.0.1 @@ -12077,8 +12409,8 @@ packages: engines: {node: '>=0.10.0'} dev: true - /react-remove-scroll-bar@2.3.4(@types/react@18.2.48)(react@18.2.0): - resolution: {integrity: sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==} + /react-remove-scroll-bar@2.3.5(@types/react@18.2.57)(react@18.2.0): + resolution: {integrity: sha512-3cqjOqg6s0XbOjWvmasmqHch+RLxIEk2r/70rzGXuz3iIGQsQheEQyqYCBb5EECoD01Vo2SIbDqW4paLeLTASw==} engines: {node: '>=10'} peerDependencies: '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 @@ -12087,12 +12419,28 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.57 react: 18.2.0 - react-style-singleton: 2.2.1(@types/react@18.2.48)(react@18.2.0) + react-style-singleton: 2.2.1(@types/react@18.2.57)(react@18.2.0) + tslib: 2.6.2 + dev: false + + /react-remove-scroll-bar@2.3.5(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-3cqjOqg6s0XbOjWvmasmqHch+RLxIEk2r/70rzGXuz3iIGQsQheEQyqYCBb5EECoD01Vo2SIbDqW4paLeLTASw==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@types/react': 18.2.59 + react: 18.2.0 + react-style-singleton: 2.2.1(@types/react@18.2.59)(react@18.2.0) tslib: 2.6.2 - /react-remove-scroll@2.5.5(@types/react@18.2.48)(react@18.2.0): + /react-remove-scroll@2.5.5(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-ImKhrzJJsyXJfBZ4bzu8Bwpka14c/fQt0k+cyFp/PBhTfyDnU5hjOtM4AG/0AMyy8oKzOTR0lDgJIM7pYXI0kw==} engines: {node: '>=10'} peerDependencies: @@ -12102,16 +12450,16 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 react: 18.2.0 - react-remove-scroll-bar: 2.3.4(@types/react@18.2.48)(react@18.2.0) - react-style-singleton: 2.2.1(@types/react@18.2.48)(react@18.2.0) + react-remove-scroll-bar: 2.3.5(@types/react@18.2.59)(react@18.2.0) + react-style-singleton: 2.2.1(@types/react@18.2.59)(react@18.2.0) tslib: 2.6.2 - use-callback-ref: 1.3.1(@types/react@18.2.48)(react@18.2.0) - use-sidecar: 1.1.2(@types/react@18.2.48)(react@18.2.0) + use-callback-ref: 1.3.1(@types/react@18.2.59)(react@18.2.0) + use-sidecar: 1.1.2(@types/react@18.2.59)(react@18.2.0) dev: true - /react-remove-scroll@2.5.7(@types/react@18.2.48)(react@18.2.0): + /react-remove-scroll@2.5.7(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==} engines: {node: '>=10'} peerDependencies: @@ -12121,17 +12469,36 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.57 react: 18.2.0 - react-remove-scroll-bar: 2.3.4(@types/react@18.2.48)(react@18.2.0) - react-style-singleton: 2.2.1(@types/react@18.2.48)(react@18.2.0) + react-remove-scroll-bar: 2.3.5(@types/react@18.2.57)(react@18.2.0) + react-style-singleton: 2.2.1(@types/react@18.2.57)(react@18.2.0) tslib: 2.6.2 - use-callback-ref: 1.3.1(@types/react@18.2.48)(react@18.2.0) - use-sidecar: 1.1.2(@types/react@18.2.48)(react@18.2.0) + use-callback-ref: 1.3.1(@types/react@18.2.57)(react@18.2.0) + use-sidecar: 1.1.2(@types/react@18.2.57)(react@18.2.0) dev: false - /react-resizable-panels@1.0.9(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-QPfW3L7yetEC6z04G9AYYFz5kBklh8rTWcOsVFImYMNUVhr1Y1r9Qc/20Yks2tA+lXMBWCUz4fkGEvbS7tpBSg==} + /react-remove-scroll@2.5.7(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-FnrTWO4L7/Bhhf3CYBNArEG/yROV0tKmTv7/3h9QCFvH6sndeFf1wPqOcbFVu5VAulS5dV1wGT3GZZ/1GawqiA==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@types/react': 18.2.59 + react: 18.2.0 + react-remove-scroll-bar: 2.3.5(@types/react@18.2.59)(react@18.2.0) + react-style-singleton: 2.2.1(@types/react@18.2.59)(react@18.2.0) + tslib: 2.6.2 + use-callback-ref: 1.3.1(@types/react@18.2.59)(react@18.2.0) + use-sidecar: 1.1.2(@types/react@18.2.59)(react@18.2.0) + dev: false + + /react-resizable-panels@2.0.11(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-tA3OvGFEK/U9rKuEg6TpXcr+i+cN5X8B4UIvs7jqr5lby629pDTGvqRjo1EJLhBpRZfkg0Zz1INJlSYigaS99g==} peerDependencies: react: ^16.14.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.14.0 || ^17.0.0 || ^18.0.0 @@ -12140,49 +12507,49 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: false - /react-select@5.7.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /react-select@5.7.7(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-HhashZZJDRlfF/AKj0a0Lnfs3sRdw/46VJIRd8IbB9/Ovr74+ZIwkAdSBjSPXsFMG+u72c5xShqwLSKIJllzqw==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: - '@babel/runtime': 7.23.6 + '@babel/runtime': 7.23.9 '@emotion/cache': 11.11.0 - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) - '@floating-ui/dom': 1.5.3 + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@floating-ui/dom': 1.6.3 '@types/react-transition-group': 4.4.10 memoize-one: 6.0.0 prop-types: 15.8.1 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) react-transition-group: 4.4.5(react-dom@18.2.0)(react@18.2.0) - use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.48)(react@18.2.0) + use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' dev: false - /react-select@5.8.0(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): + /react-select@5.8.0(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-TfjLDo58XrhP6VG5M/Mi56Us0Yt8X7xD6cDybC7yoRMUNm7BGO7qk8J0TLQOua/prb8vUOtsfnXZwfm30HGsAA==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 dependencies: - '@babel/runtime': 7.23.7 + '@babel/runtime': 7.23.9 '@emotion/cache': 11.11.0 - '@emotion/react': 11.11.3(@types/react@18.2.48)(react@18.2.0) - '@floating-ui/dom': 1.5.3 + '@emotion/react': 11.11.3(@types/react@18.2.59)(react@18.2.0) + '@floating-ui/dom': 1.6.3 '@types/react-transition-group': 4.4.10 memoize-one: 6.0.0 prop-types: 15.8.1 react: 18.2.0 react-dom: 18.2.0(react@18.2.0) react-transition-group: 4.4.5(react-dom@18.2.0)(react@18.2.0) - use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.48)(react@18.2.0) + use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.59)(react@18.2.0) transitivePeerDependencies: - '@types/react' dev: false - /react-style-singleton@2.2.1(@types/react@18.2.48)(react@18.2.0): + /react-style-singleton@2.2.1(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==} engines: {node: '>=10'} peerDependencies: @@ -12192,25 +12559,28 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.57 get-nonce: 1.0.1 invariant: 2.2.4 react: 18.2.0 tslib: 2.6.2 + dev: false - /react-textarea-autosize@8.5.3(@types/react@18.2.48)(react@18.2.0): - resolution: {integrity: sha512-XT1024o2pqCuZSuBt9FwHlaDeNtVrtCXu0Rnz88t1jUGheCLa3PhjE1GH8Ctm2axEtvdCl5SUHYschyQ0L5QHQ==} + /react-style-singleton@2.2.1(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==} engines: {node: '>=10'} peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true dependencies: - '@babel/runtime': 7.23.6 + '@types/react': 18.2.59 + get-nonce: 1.0.1 + invariant: 2.2.4 react: 18.2.0 - use-composed-ref: 1.3.0(react@18.2.0) - use-latest: 1.2.1(@types/react@18.2.48)(react@18.2.0) - transitivePeerDependencies: - - '@types/react' - dev: false + tslib: 2.6.2 /react-transition-group@4.4.5(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==} @@ -12218,7 +12588,7 @@ packages: react: '>=16.6.0' react-dom: '>=16.6.0' dependencies: - '@babel/runtime': 7.23.7 + '@babel/runtime': 7.23.9 dom-helpers: 5.2.1 loose-envify: 1.4.0 prop-types: 15.8.1 @@ -12236,8 +12606,8 @@ packages: tslib: 2.6.2 dev: false - /react-use@17.4.3(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-05Oyuwn4ZccdzLD4ttLbMe8TkobdKpOj7YCFE9VhVpbXrTWZpvCcMyroRw/Banh1RIcQRcM06tfzPpY5D9sTsQ==} + /react-use@17.5.0(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-PbfwSPMwp/hoL847rLnm/qkjg3sTRCvn6YhUZiHaUa3FA6/aNoFX79ul5Xt70O1rK+9GxSVqkY0eTwMdsR/bWg==} peerDependencies: react: '*' react-dom: '*' @@ -12260,8 +12630,8 @@ packages: tslib: 2.6.2 dev: false - /react-virtuoso@4.6.2(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-vvlqvzPif+MvBrJ09+hJJrVY0xJK9yran+A+/1iwY78k0YCVKsyoNPqoLxOxzYPggspNBNXqUXEcvckN29OxyQ==} + /react-virtuoso@4.7.1(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-V1JIZLEwgX7R+YNkbY8dq6NcnIGKGWXe4mnMJJPsA2L4qeFKst0LY3mDk6sBCJyKRbMzYFxTZWyTT4QsA1JvVQ==} engines: {node: '>=10'} peerDependencies: react: '>=16 || >=17 || >= 18' @@ -12277,18 +12647,18 @@ packages: dependencies: loose-envify: 1.4.0 - /reactflow@11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0): - resolution: {integrity: sha512-tqQJfPEiIkXonT3piVYf+F9CvABI5e28t5I6rpaLTnO8YVCAOh1h0f+ziDKz0Bx9Y2B/mFgyz+H7LZeUp/+lhQ==} + /reactflow@11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0): + resolution: {integrity: sha512-0CApYhtYicXEDg/x2kvUHiUk26Qur8lAtTtiSlptNKuyEuGti6P1y5cS32YGaUoDMoCqkm/m+jcKkfMOvSCVRA==} peerDependencies: react: '>=17' react-dom: '>=17' dependencies: - '@reactflow/background': 11.3.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@reactflow/controls': 11.2.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@reactflow/core': 11.10.2(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@reactflow/minimap': 11.7.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@reactflow/node-resizer': 2.2.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) - '@reactflow/node-toolbar': 1.3.7(@types/react@18.2.48)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/background': 11.3.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/controls': 11.2.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/core': 11.10.4(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/minimap': 11.7.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/node-resizer': 2.2.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) + '@reactflow/node-toolbar': 1.3.9(@types/react@18.2.59)(react-dom@18.2.0)(react@18.2.0) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) transitivePeerDependencies: @@ -12296,6 +12666,14 @@ packages: - immer dev: false + /read-package-json-fast@3.0.2: + resolution: {integrity: sha512-0J+Msgym3vrLOUB3hzQCuZHII0xkNGCtz/HJH9xZshwv9DbDwkw1KaE3gx/e2J5rpEY5rtOy6cyhKOPrkP7FZw==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dependencies: + json-parse-even-better-errors: 3.0.1 + npm-normalize-package-bin: 3.0.1 + dev: true + /read-pkg-up@7.0.1: resolution: {integrity: sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==} engines: {node: '>=8'} @@ -12354,14 +12732,6 @@ packages: tslib: 2.6.2 dev: true - /redent@3.0.0: - resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} - engines: {node: '>=8'} - dependencies: - indent-string: 4.0.0 - strip-indent: 3.0.0 - dev: true - /redux-dynamic-middlewares@2.2.0: resolution: {integrity: sha512-GHESQC+Y0PV98ZBoaC6br6cDOsNiM1Cu4UleGMqMWCXX03jIr3BoozYVrRkLVVAl4sC216chakMnZOu6SwNdGA==} dev: false @@ -12386,14 +12756,15 @@ packages: resolution: {integrity: sha512-M9/ELqF6fy8FwmkpnF0S3YKOqMyoWJ4+CS5Efg2ct3oY9daQvd/Pc71FpGZsVsbl3Cpb+IIcjBDUnnyBdQbq4w==} dev: false - /reflect.getprototypeof@1.0.4: - resolution: {integrity: sha512-ECkTw8TmJwW60lOTR+ZkODISW6RQ8+2CL3COqtiJKLd6MmB45hN51HprHFziKLGkAuTGQhBb91V8cy+KHlaCjw==} + /reflect.getprototypeof@1.0.5: + resolution: {integrity: sha512-62wgfC8dJWrmxv44CA36pLDnP6KKl3Vhxb7PL+8+qrrFMMoJij4vgiMP8zV4O8+CBMXY1mHxI5fITGHXFHVmQQ==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 - get-intrinsic: 1.2.2 + es-abstract: 1.22.4 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 globalthis: 1.0.3 which-builtin-type: 1.1.3 dev: true @@ -12415,16 +12786,17 @@ packages: /regenerator-transform@0.15.2: resolution: {integrity: sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 dev: true - /regexp.prototype.flags@1.5.1: - resolution: {integrity: sha512-sy6TXMN+hnP/wMy+ISxg3krXx7BAtWVO4UouuCN/ziM9UEne0euamVNafDfvC83bRNr95y0V5iijeDQFUNpvrg==} + /regexp.prototype.flags@1.5.2: + resolution: {integrity: sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - set-function-name: 2.0.1 + es-errors: 1.3.0 + set-function-name: 2.0.2 dev: true /regexpu-core@5.3.2: @@ -12464,6 +12836,14 @@ packages: unist-util-visit: 2.0.3 dev: true + /rename-overwrite@5.0.0: + resolution: {integrity: sha512-vSxE5Ww7Jnyotvaxi3Dj0vOMoojH8KMkBfs9xYeW/qNfJiLTcC1fmwTjrbGUq3mQSOCxkG0DbdcvwTUrpvBN4w==} + engines: {node: '>=12.10'} + dependencies: + '@zkochan/rimraf': 2.1.3 + fs-extra: 10.1.0 + dev: true + /require-directory@2.1.1: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} @@ -12479,34 +12859,14 @@ packages: engines: {node: '>=0.10.5'} dev: true - /requirejs-config-file@4.0.0: - resolution: {integrity: sha512-jnIre8cbWOyvr8a5F2KuqBnY+SDA4NXr/hzEZJG79Mxm2WiFQz2dzhC8ibtPJS7zkmBEl1mxSwp5HhC1W4qpxw==} - engines: {node: '>=10.13.0'} - dependencies: - esprima: 4.0.1 - stringify-object: 3.3.0 - dev: true - - /requirejs@2.3.6: - resolution: {integrity: sha512-ipEzlWQe6RK3jkzikgCupiTbTvm4S0/CAU5GlgptkN5SO6F3u0UD0K18wy6ErDqiCyP4J4YYe1HuAShvsxePLg==} - engines: {node: '>=0.4.0'} - hasBin: true - dev: true - - /reselect@5.0.1(patch_hash=kvbgwzjyy4x4fnh7znyocvb75q): - resolution: {integrity: sha512-D72j2ubjgHpvuCiORWkOUxndHJrxDaSolheiz5CO+roz8ka97/4msh2E8F5qay4GawR5vzBt5MkbDHT+Rdy/Wg==} + /reselect@5.1.0: + resolution: {integrity: sha512-aw7jcGLDpSgNDyWBQLv2cedml85qd95/iszJjN988zX1t7AVRJi19d9kto5+W7oCfQ94gyo40dVbT6g2k4/kXg==} dev: false - patched: true /resize-observer-polyfill@1.5.1: resolution: {integrity: sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==} dev: false - /resolve-dependency-path@2.0.0: - resolution: {integrity: sha512-DIgu+0Dv+6v2XwRaNWnumKu7GPufBBOr5I1gRPJHkvghrfCGOooJODFvgFimX/KRxk9j0whD2MnKHzM1jYvk9w==} - engines: {node: '>=6.0.0'} - dev: true - /resolve-from@4.0.0: resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} engines: {node: '>=4'} @@ -12553,11 +12913,20 @@ packages: engines: {node: '>=0.12'} dev: false + /retry@0.12.0: + resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==} + engines: {node: '>= 4'} + dev: true + /reusify@1.0.4: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} dev: true + /reverse-arguments@1.0.0: + resolution: {integrity: sha512-/x8uIPdTafBqakK0TmPNJzgkLP+3H+yxpUJhCQHsLBg1rYEVNR2D8BRYNWQhVBjyOd7oo1dZRVzIkwMY2oqfYQ==} + dev: true + /rimraf@2.6.3: resolution: {integrity: sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==} hasBin: true @@ -12620,33 +12989,33 @@ packages: fsevents: 2.3.3 dev: true - /rollup@4.9.4: - resolution: {integrity: sha512-2ztU7pY/lrQyXSCnnoU4ICjT/tCG9cdH3/G25ERqE3Lst6vl2BCM5hL2Nw+sslAvAf+ccKsAq1SkKQALyqhR7g==} + /rollup@4.12.0: + resolution: {integrity: sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==} engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true dependencies: '@types/estree': 1.0.5 optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.9.4 - '@rollup/rollup-android-arm64': 4.9.4 - '@rollup/rollup-darwin-arm64': 4.9.4 - '@rollup/rollup-darwin-x64': 4.9.4 - '@rollup/rollup-linux-arm-gnueabihf': 4.9.4 - '@rollup/rollup-linux-arm64-gnu': 4.9.4 - '@rollup/rollup-linux-arm64-musl': 4.9.4 - '@rollup/rollup-linux-riscv64-gnu': 4.9.4 - '@rollup/rollup-linux-x64-gnu': 4.9.4 - '@rollup/rollup-linux-x64-musl': 4.9.4 - '@rollup/rollup-win32-arm64-msvc': 4.9.4 - '@rollup/rollup-win32-ia32-msvc': 4.9.4 - '@rollup/rollup-win32-x64-msvc': 4.9.4 + '@rollup/rollup-android-arm-eabi': 4.12.0 + '@rollup/rollup-android-arm64': 4.12.0 + '@rollup/rollup-darwin-arm64': 4.12.0 + '@rollup/rollup-darwin-x64': 4.12.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.12.0 + '@rollup/rollup-linux-arm64-gnu': 4.12.0 + '@rollup/rollup-linux-arm64-musl': 4.12.0 + '@rollup/rollup-linux-riscv64-gnu': 4.12.0 + '@rollup/rollup-linux-x64-gnu': 4.12.0 + '@rollup/rollup-linux-x64-musl': 4.12.0 + '@rollup/rollup-win32-arm64-msvc': 4.12.0 + '@rollup/rollup-win32-ia32-msvc': 4.12.0 + '@rollup/rollup-win32-x64-msvc': 4.12.0 fsevents: 2.3.3 dev: true /rtl-css-js@1.16.1: resolution: {integrity: sha512-lRQgou1mu19e+Ya0LsTvKrVJ5TYUbqCVPAiImX3UfLTenarvPUl1QFdvu5Z3PYmHT9RCcwIfbjRQBntExyj3Zg==} dependencies: - '@babel/runtime': 7.23.8 + '@babel/runtime': 7.23.9 dev: false /run-parallel@1.2.0: @@ -12661,12 +13030,12 @@ packages: tslib: 2.6.2 dev: true - /safe-array-concat@1.0.1: - resolution: {integrity: sha512-6XbUAseYE2KtOuGueyeobCySj9L4+66Tn6KQMOPQJrAJEowYKW/YR/MGJZl7FdydUdaFu4LYyDZjxf4/Nmo23Q==} + /safe-array-concat@1.1.0: + resolution: {integrity: sha512-ZdQ0Jeb9Ofti4hbt5lX3T2JcAamT9hfzYU1MNB+z/jaEbB6wfFfPIR/zEORmZqobkCCJhSjodobH6WHNmJ97dg==} engines: {node: '>=0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 + call-bind: 1.0.7 + get-intrinsic: 1.2.4 has-symbols: 1.0.3 isarray: 2.0.5 dev: true @@ -12679,11 +13048,12 @@ packages: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} dev: true - /safe-regex-test@1.0.0: - resolution: {integrity: sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==} + /safe-regex-test@1.0.3: + resolution: {integrity: sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==} + engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 + call-bind: 1.0.7 + es-errors: 1.3.0 is-regex: 1.1.4 dev: true @@ -12696,14 +13066,6 @@ packages: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} dev: true - /sass-lookup@3.0.0: - resolution: {integrity: sha512-TTsus8CfFRn1N44bvdEai1no6PqdmDiQUiqW5DlpmtT+tYnIt1tXtDIph5KA1efC+LmioJXSnCtUVpcK9gaKIg==} - engines: {node: '>=6.0.0'} - hasBin: true - dependencies: - commander: 2.20.3 - dev: true - /scheduler@0.23.0: resolution: {integrity: sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==} dependencies: @@ -12736,6 +13098,14 @@ packages: lru-cache: 6.0.0 dev: true + /semver@7.6.0: + resolution: {integrity: sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + dev: true + /send@0.18.0: resolution: {integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==} engines: {node: '>= 0.8.0'} @@ -12776,23 +13146,26 @@ packages: - supports-color dev: true - /set-function-length@1.1.1: - resolution: {integrity: sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==} + /set-function-length@1.2.1: + resolution: {integrity: sha512-j4t6ccc+VsKwYHso+kElc5neZpjtq9EnRICFZtWyBsLojhmeF/ZBd/elqm22WJh/BziDe/SBiOeAt0m2mfLD0g==} engines: {node: '>= 0.4'} dependencies: - define-data-property: 1.1.1 - get-intrinsic: 1.2.2 + define-data-property: 1.1.4 + es-errors: 1.3.0 + function-bind: 1.1.2 + get-intrinsic: 1.2.4 gopd: 1.0.1 - has-property-descriptors: 1.0.1 + has-property-descriptors: 1.0.2 dev: true - /set-function-name@2.0.1: - resolution: {integrity: sha512-tMNCiqYVkXIZgc2Hnoy2IvC/f8ezc5koaRFkCjrpWzGpCd3qbZXPzVy9MAZzK1ch/X0jvSkojys3oqJN0qCmdA==} + /set-function-name@2.0.2: + resolution: {integrity: sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==} engines: {node: '>= 0.4'} dependencies: - define-data-property: 1.1.1 + define-data-property: 1.1.4 + es-errors: 1.3.0 functions-have-names: 1.2.3 - has-property-descriptors: 1.0.1 + has-property-descriptors: 1.0.2 dev: true /set-harmonic-interval@1.0.1: @@ -12823,18 +13196,28 @@ packages: engines: {node: '>=8'} dev: true + /shell-quote-word@1.0.1: + resolution: {integrity: sha512-lT297f1WLAdq0A4O+AknIFRP6kkiI3s8C913eJ0XqBxJbZPGWUNkRQk2u8zk4bEAjUJ5i+fSLwB6z1HzeT+DEg==} + dev: true + /shell-quote@1.8.1: resolution: {integrity: sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==} dev: true - /side-channel@1.0.4: - resolution: {integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==} + /side-channel@1.0.5: + resolution: {integrity: sha512-QcgiIWV4WV7qWExbN5llt6frQB/lBven9pqliLXfGPB+K9ZYXxDozp0wLkHS24kWCm+6YXH/f0HhnObZnZOBnQ==} + engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 + call-bind: 1.0.7 + es-errors: 1.3.0 + get-intrinsic: 1.2.4 object-inspect: 1.13.1 dev: true + /siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + dev: true + /signal-exit@3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} dev: true @@ -12853,6 +13236,11 @@ packages: engines: {node: '>=8'} dev: true + /smol-toml@1.1.4: + resolution: {integrity: sha512-Y0OT8HezWsTNeEOSVxDnKOW/AyNXHQ4BwJNbAXlLTF5wWsBvrcHhIkE5Rf8kQMLmgf7nDX3PVOlgC6/Aiggu3Q==} + engines: {node: '>= 18', pnpm: '>= 8'} + dev: true + /socket.io-client@4.7.4: resolution: {integrity: sha512-wh+OkeF0rAVCrABWQBaEjLfb7DVPotMbu0cgWgyR0v6eA4EoVnAwcIeIbcdTE3GT/H3kbdLl7OoH2+asoDRIIg==} engines: {node: '>=10.0.0'} @@ -12920,22 +13308,22 @@ packages: resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} dependencies: spdx-expression-parse: 3.0.1 - spdx-license-ids: 3.0.16 + spdx-license-ids: 3.0.17 dev: true - /spdx-exceptions@2.3.0: - resolution: {integrity: sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==} + /spdx-exceptions@2.5.0: + resolution: {integrity: sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==} dev: true /spdx-expression-parse@3.0.1: resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} dependencies: - spdx-exceptions: 2.3.0 - spdx-license-ids: 3.0.16 + spdx-exceptions: 2.5.0 + spdx-license-ids: 3.0.17 dev: true - /spdx-license-ids@3.0.16: - resolution: {integrity: sha512-eWN+LnM3GR6gPu35WxNgbGl8rmY1AEmoMDvL/QD6zYmPWgywxWqJWNdLGT+ke8dKNWrcYgYjPpG5gbTfghP8rw==} + /spdx-license-ids@3.0.17: + resolution: {integrity: sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg==} dev: true /split-on-first@3.0.0: @@ -12943,16 +13331,33 @@ packages: engines: {node: '>=12'} dev: false + /split2@3.2.2: + resolution: {integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==} + dependencies: + readable-stream: 3.6.2 + dev: true + /sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} dev: true + /ssri@10.0.5: + resolution: {integrity: sha512-bSf16tAFkGeRlUNDjXu8FzaMQt6g2HZJrun7mtMbIPOddxt3GLMSz5VWUWcqTJUPfLEaDIepGxv+bYQW49596A==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dependencies: + minipass: 7.0.4 + dev: true + /stack-generator@2.0.10: resolution: {integrity: sha512-mwnua/hkqM6pF4k8SnmZ2zfETsRUpWXREfA/goT8SLCV4iOFa4bzOX2nDipWAZFPTjLvQB82f5yaodMVhK0yJQ==} dependencies: stackframe: 1.3.4 dev: false + /stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + dev: true + /stackframe@1.3.4: resolution: {integrity: sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==} dev: false @@ -12977,22 +13382,19 @@ packages: engines: {node: '>= 0.8'} dev: true - /stop-iteration-iterator@1.0.0: - resolution: {integrity: sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==} - engines: {node: '>= 0.4'} - dependencies: - internal-slot: 1.0.6 + /std-env@3.7.0: + resolution: {integrity: sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==} dev: true - /store2@2.14.2: - resolution: {integrity: sha512-siT1RiqlfQnGqgT/YzXVUNsom9S0H1OX+dpdGN1xkyYATo4I6sep5NmsRD/40s3IIOvlCq6akxkqG82urIZW1w==} + /store2@2.14.3: + resolution: {integrity: sha512-4QcZ+yx7nzEFiV4BMLnr/pRa5HYzNITX2ri0Zh6sT9EyQHbBHacC6YigllUPU9X3D0f/22QCgfokpKs52YRrUg==} dev: true - /storybook@7.6.10: - resolution: {integrity: sha512-ypFeGhQTUBBfqSUVZYh7wS5ghn3O2wILCiQc4459SeUpvUn+skcqw/TlrwGSoF5EWjDA7gtRrWDxO3mnlPt5Cw==} + /storybook@7.6.17: + resolution: {integrity: sha512-8+EIo91bwmeFWPg1eysrxXlhIYv3OsXrznTr4+4Eq0NikqAoq6oBhtlN5K2RGS2lBVF537eN+9jTCNbR+WrzDA==} hasBin: true dependencies: - '@storybook/cli': 7.6.10 + '@storybook/cli': 7.6.17 transitivePeerDependencies: - bufferutil - encoding @@ -13004,12 +13406,6 @@ packages: resolution: {integrity: sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==} dev: true - /stream-to-array@2.3.0: - resolution: {integrity: sha512-UsZtOYEn4tWU2RGLOXr/o/xjRBftZRlG3dEWoaHr8j4GuypJ3isitGbVyjQKAuMu+xbiop8q224TjiZWc4XTZA==} - dependencies: - any-promise: 1.3.0 - dev: true - /string-argv@0.3.2: resolution: {integrity: sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==} engines: {node: '>=0.6.19'} @@ -13033,43 +13429,47 @@ packages: strip-ansi: 7.1.0 dev: true + /string.fromcodepoint@0.2.1: + resolution: {integrity: sha512-n69H31OnxSGSZyZbgBlvYIXlrMhJQ0dQAX1js1QDhpaUH6zmU3QYlj07bCwCNlPOu3oRXIubGPl2gDGnHsiCqg==} + dev: true + /string.prototype.matchall@4.0.10: resolution: {integrity: sha512-rGXbGmOEosIQi6Qva94HUjgPs9vKW+dkG7Y8Q5O2OYkWL6wFaTRZO8zM4mhP94uX55wgyrXzfS2aGtGzUL7EJQ==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 - get-intrinsic: 1.2.2 + es-abstract: 1.22.4 + get-intrinsic: 1.2.4 has-symbols: 1.0.3 - internal-slot: 1.0.6 - regexp.prototype.flags: 1.5.1 - set-function-name: 2.0.1 - side-channel: 1.0.4 + internal-slot: 1.0.7 + regexp.prototype.flags: 1.5.2 + set-function-name: 2.0.2 + side-channel: 1.0.5 dev: true /string.prototype.trim@1.2.8: resolution: {integrity: sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true /string.prototype.trimend@1.0.7: resolution: {integrity: sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true /string.prototype.trimstart@1.0.7: resolution: {integrity: sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 define-properties: 1.2.1 - es-abstract: 1.22.3 + es-abstract: 1.22.4 dev: true /string_decoder@1.1.1: @@ -13084,15 +13484,6 @@ packages: safe-buffer: 5.2.1 dev: true - /stringify-object@3.3.0: - resolution: {integrity: sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==} - engines: {node: '>=4'} - dependencies: - get-own-enumerable-property-symbols: 3.0.2 - is-obj: 1.0.1 - is-regexp: 1.0.0 - dev: true - /strip-ansi@6.0.1: resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} engines: {node: '>=8'} @@ -13112,6 +13503,11 @@ packages: engines: {node: '>=4'} dev: true + /strip-bom@4.0.0: + resolution: {integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==} + engines: {node: '>=8'} + dev: true + /strip-final-newline@2.0.0: resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} engines: {node: '>=6'} @@ -13122,13 +13518,6 @@ packages: engines: {node: '>=12'} dev: true - /strip-indent@3.0.0: - resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} - engines: {node: '>=8'} - dependencies: - min-indent: 1.0.1 - dev: true - /strip-indent@4.0.0: resolution: {integrity: sha512-mnVSV2l+Zv6BLpSD/8V87CW/y9EmmbYzGCIavsnsI6/nwn26DwffM/yztm30Z/I2DY9wdS3vXVCMnHDgZaVNoA==} engines: {node: '>=12'} @@ -13136,16 +13525,22 @@ packages: min-indent: 1.0.1 dev: true - /strip-json-comments@2.0.1: - resolution: {integrity: sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==} - engines: {node: '>=0.10.0'} - dev: true - /strip-json-comments@3.1.1: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} dev: true + /strip-json-comments@5.0.1: + resolution: {integrity: sha512-0fk9zBqO67Nq5M/m45qHCJxylV/DhBlIOVExqgOMiCCrzrhU6tCibRXNqE3jwJLftzE9SNuZtYbpzcO+i9FiKw==} + engines: {node: '>=14.16'} + dev: true + + /strip-literal@2.0.0: + resolution: {integrity: sha512-f9vHgsCWBq2ugHAkGMiiYY+AYG0D/cbloKKg0nhaaaSNsujdGIpVXCNsrJpCKr5M0f4aI31mr13UjY6GAuXCKA==} + dependencies: + js-tokens: 8.0.3 + dev: true + /stylis@4.2.0: resolution: {integrity: sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==} dev: false @@ -13154,15 +13549,8 @@ packages: resolution: {integrity: sha512-EQepAV+wMsIaGVGX1RECzgrcqRRU/0sYOHkeLsZ3fzHaHXZy4DaOOX0vOlGQdlsjkh3mFHAIlVimpwAs4dslyQ==} dev: false - /stylus-lookup@3.0.2: - resolution: {integrity: sha512-oEQGHSjg/AMaWlKe7gqsnYzan8DLcGIHe0dUaFkucZZ14z4zjENRlQMCHT4FNsiWnJf17YN9OvrCfCoi7VvOyg==} - engines: {node: '>=6.0.0'} - hasBin: true - dependencies: - commander: 2.20.3 - debug: 4.3.4 - transitivePeerDependencies: - - supports-color + /summary@2.1.0: + resolution: {integrity: sha512-nMIjMrd5Z2nuB2RZCKJfFMjgS3fygbeyGk9PxPPaJR1RIcyN9yn4A63Isovzm3ZtQuEkLBVgMdPup8UeLH7aQw==} dev: true /supports-color@5.5.0: @@ -13202,11 +13590,6 @@ packages: resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==} dev: false - /tapable@2.2.1: - resolution: {integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==} - engines: {node: '>=6'} - dev: true - /tar-fs@2.1.1: resolution: {integrity: sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==} dependencies: @@ -13293,11 +13676,26 @@ packages: xtend: 4.0.2 dev: true - /tiny-invariant@1.3.1: - resolution: {integrity: sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==} + /through2@4.0.2: + resolution: {integrity: sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==} + dependencies: + readable-stream: 3.6.2 + dev: true - /tinyspy@2.2.0: - resolution: {integrity: sha512-d2eda04AN/cPOR89F7Xv5bK/jrQEhmcLFe6HFldoeO9AJtps+fqEnh486vnT/8y4bw38pSyxDcTCAq+Ks2aJTg==} + /tiny-invariant@1.3.3: + resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + + /tinybench@2.6.0: + resolution: {integrity: sha512-N8hW3PG/3aOoZAN5V/NSAEDz0ZixDSSt5b/a05iqtpgfLWMSVuCo7w0k2vVvEjdrIoeGqZzweX2WlyioNIHchA==} + dev: true + + /tinypool@0.8.2: + resolution: {integrity: sha512-SUszKYe5wgsxnNOVlBYO6IC+8VGWdVGZWAqUxp3UErNBtptZvWbwyUOyzNL59zigz2rCA92QiL3wvG+JDSdJdQ==} + engines: {node: '>=14.0.0'} + dev: true + + /tinyspy@2.2.1: + resolution: {integrity: sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==} engines: {node: '>=14.0.0'} dev: true @@ -13309,6 +13707,16 @@ packages: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} + /to-no-case@1.0.2: + resolution: {integrity: sha512-Z3g735FxuZY8rodxV4gH7LxClE4H0hTIyHNIHdk+vpQxjLm0cwnKXq/OFVZ76SOQmto7txVcwSCwkU5kqp+FKg==} + dev: true + + /to-pascal-case@1.0.0: + resolution: {integrity: sha512-QGMWHqM6xPrcQW57S23c5/3BbYb0Tbe9p+ur98ckRnGDwD4wbbtDiYI38CfmMKNB5Iv0REjs5SNDntTwvDxzZA==} + dependencies: + to-space-case: 1.0.0 + dev: true + /to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} @@ -13316,6 +13724,12 @@ packages: is-number: 7.0.0 dev: true + /to-space-case@1.0.0: + resolution: {integrity: sha512-rLdvwXZ39VOn1IxGL3V6ZstoTbwLRckQmn/U8ZDLuWwIXNpuZDhQ3AiRUlhTbOXFVE9C+dR51wM0CBDhk31VcA==} + dependencies: + to-no-case: 1.0.2 + dev: true + /tocbot@4.25.0: resolution: {integrity: sha512-kE5wyCQJ40hqUaRVkyQ4z5+4juzYsv/eK+aqD97N62YH0TxFhzJvo22RUQQZdO3YnXAk42ZOfOpjVdy+Z0YokA==} dev: true @@ -13337,9 +13751,9 @@ packages: hasBin: true dev: true - /ts-api-utils@1.0.3(typescript@5.3.3): - resolution: {integrity: sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg==} - engines: {node: '>=16.13.0'} + /ts-api-utils@1.2.1(typescript@5.3.3): + resolution: {integrity: sha512-RIYA36cJn2WiH9Hy77hdF9r7oEwxAtB/TS9/S4Qd90Ap4z5FSiin5zEiTL44OII1Y3IIlEvxwxFUVgrHSZ/UpA==} + engines: {node: '>=16'} peerDependencies: typescript: '>=4.2.0' dependencies: @@ -13359,17 +13773,16 @@ packages: resolution: {integrity: sha512-tLJxacIQUM82IR7JO1UUkKlYuUTmoY9HBJAmNWFzheSlDS5SPMcNIepejHJa4BpPQLAcbRhRf3GDJzyj6rbKvA==} dev: false - /ts-graphviz@1.8.1: - resolution: {integrity: sha512-54/fe5iu0Jb6X0pmDmzsA2UHLfyHjUEUwfHtZcEOR0fZ6Myf+dFoO6eNsyL8CBDMJ9u7WWEewduVaiaXlvjSVw==} - engines: {node: '>=14.16'} - dev: true - /ts-toolbelt@9.6.0: resolution: {integrity: sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==} dev: true - /tsconfck@3.0.1(typescript@5.3.3): - resolution: {integrity: sha512-7ppiBlF3UEddCLeI1JRx5m2Ryq+xk4JrZuq4EuYXykipebaq1dV0Fhgr1hb7CkmHt32QSgOZlcqVLEtHBG4/mg==} + /tsafe@1.6.6: + resolution: {integrity: sha512-gzkapsdbMNwBnTIjgO758GujLCj031IgHK/PKr2mrmkCSJMhSOR5FeOuSxKLMUoYc0vAA4RGEYYbjt/v6afD3g==} + dev: true + + /tsconfck@3.0.2(typescript@5.3.3): + resolution: {integrity: sha512-6lWtFjwuhS3XI4HsX4Zg0izOI3FU/AI9EGVlPEUMDIhvLPMD4wkiof0WCoDgW7qY+Dy198g4d9miAqUHWHFH6Q==} engines: {node: ^18 || >=20} hasBin: true peerDependencies: @@ -13401,26 +13814,6 @@ packages: /tslib@2.6.2: resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} - /tsutils@3.21.0(typescript@3.9.10): - resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} - engines: {node: '>= 6'} - peerDependencies: - typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' - dependencies: - tslib: 1.14.1 - typescript: 3.9.10 - dev: true - - /tsutils@3.21.0(typescript@4.9.5): - resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} - engines: {node: '>= 6'} - peerDependencies: - typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' - dependencies: - tslib: 1.14.1 - typescript: 4.9.5 - dev: true - /tsutils@3.21.0(typescript@5.3.3): resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} engines: {node: '>= 6'} @@ -13467,11 +13860,6 @@ packages: resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} engines: {node: '>=12.20'} - /type-fest@4.9.0: - resolution: {integrity: sha512-KS/6lh/ynPGiHD/LnAobrEFq3Ad4pBzOlJ1wAnJx9N4EYoqFhMfLIBjUT2UEx4wg5ZE+cC1ob6DCSpppVo+rtg==} - engines: {node: '>=16'} - dev: false - /type-is@1.6.18: resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} engines: {node: '>= 0.6'} @@ -13480,68 +13868,62 @@ packages: mime-types: 2.1.35 dev: true - /typed-array-buffer@1.0.0: - resolution: {integrity: sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==} + /typed-array-buffer@1.0.2: + resolution: {integrity: sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 - get-intrinsic: 1.2.2 - is-typed-array: 1.1.12 + call-bind: 1.0.7 + es-errors: 1.3.0 + is-typed-array: 1.1.13 dev: true - /typed-array-byte-length@1.0.0: - resolution: {integrity: sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==} + /typed-array-byte-length@1.0.1: + resolution: {integrity: sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==} engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 for-each: 0.3.3 - has-proto: 1.0.1 - is-typed-array: 1.1.12 + gopd: 1.0.1 + has-proto: 1.0.3 + is-typed-array: 1.1.13 dev: true - /typed-array-byte-offset@1.0.0: - resolution: {integrity: sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==} + /typed-array-byte-offset@1.0.2: + resolution: {integrity: sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==} engines: {node: '>= 0.4'} dependencies: - available-typed-arrays: 1.0.5 - call-bind: 1.0.5 + available-typed-arrays: 1.0.7 + call-bind: 1.0.7 for-each: 0.3.3 - has-proto: 1.0.1 - is-typed-array: 1.1.12 + gopd: 1.0.1 + has-proto: 1.0.3 + is-typed-array: 1.1.13 dev: true - /typed-array-length@1.0.4: - resolution: {integrity: sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==} + /typed-array-length@1.0.5: + resolution: {integrity: sha512-yMi0PlwuznKHxKmcpoOdeLwxBoVPkqZxd7q2FgMkmD3bNwvF5VW0+UlUQ1k1vmktTu4Yu13Q0RIxEP8+B+wloA==} + engines: {node: '>= 0.4'} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 for-each: 0.3.3 - is-typed-array: 1.1.12 + gopd: 1.0.1 + has-proto: 1.0.3 + is-typed-array: 1.1.13 + possible-typed-array-names: 1.0.0 dev: true /typedarray@0.0.6: resolution: {integrity: sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==} dev: true - /typescript@3.9.10: - resolution: {integrity: sha512-w6fIxVE/H1PkLKcCPsFqKE7Kv7QUwhU8qQY2MueZXWx5cPZdwFupLgKK3vntcK98BtNHZtAF4LA/yl2a7k8R6Q==} - engines: {node: '>=4.2.0'} - hasBin: true - dev: true - - /typescript@4.9.5: - resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} - engines: {node: '>=4.2.0'} - hasBin: true - dev: true - /typescript@5.3.3: resolution: {integrity: sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==} engines: {node: '>=14.17'} hasBin: true dev: true - /ufo@1.3.2: - resolution: {integrity: sha512-o+ORpgGwaYQXgqGDwd+hkS4PuZ3QnmqMMxRuajK/a38L6fTpcE5GPIfrf+L/KemFzfUpeUQc1rRS1iDBozvnFA==} + /ufo@1.4.0: + resolution: {integrity: sha512-Hhy+BhRBleFjpJ2vchUNN40qgkh0366FWJGqVLYBHev0vpHTrXSA0ryT+74UiW6KWsldNurQMKGqCm1M2zBciQ==} dev: true /uglify-js@3.17.4: @@ -13555,7 +13937,7 @@ packages: /unbox-primitive@1.0.2: resolution: {integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==} dependencies: - call-bind: 1.0.5 + call-bind: 1.0.7 has-bigints: 1.0.2 has-symbols: 1.0.3 which-boxed-primitive: 1.0.2 @@ -13565,13 +13947,19 @@ packages: resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} dev: true - /undici@5.28.2: - resolution: {integrity: sha512-wh1pHJHnUeQV5Xa8/kyQhO7WFa8M34l026L5P/+2TYiakvGy5Rdc8jWZVyG7ieht/0WgJLEd3kcU5gKx+6GC8w==} + /undici@5.28.3: + resolution: {integrity: sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==} engines: {node: '>=14.0'} dependencies: '@fastify/busboy': 2.1.0 dev: true + /unescape-js@1.1.4: + resolution: {integrity: sha512-42SD8NOQEhdYntEiUQdYq/1V/YHwr1HLwlHuTJB5InVVdOSbgI6xu8jK5q65yIzuFCfczzyDF/7hbGzVbyCw0g==} + dependencies: + string.fromcodepoint: 0.2.1 + dev: true + /unicode-canonical-property-names-ecmascript@2.0.0: resolution: {integrity: sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==} engines: {node: '>=4'} @@ -13595,10 +13983,6 @@ packages: engines: {node: '>=4'} dev: true - /uniq@1.0.1: - resolution: {integrity: sha512-Gw+zz50YNKPDKXs+9d+aKAjVwpjNwqzvNpLigIruT4HA9lMZNdMqs9x07kKHB/L9WRzqp4+DlTU5s4wG2esdoA==} - dev: true - /unique-string@2.0.0: resolution: {integrity: sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==} engines: {node: '>=8'} @@ -13640,11 +14024,11 @@ packages: engines: {node: '>= 0.8'} dev: true - /unplugin@1.6.0: - resolution: {integrity: sha512-BfJEpWBu3aE/AyHx8VaNE/WgouoQxgH9baAiH82JjX8cqVyi3uJQstqwD5J+SZxIK326SZIhsSZlALXVBCknTQ==} + /unplugin@1.7.1: + resolution: {integrity: sha512-JqzORDAPxxs8ErLV4x+LL7bk5pk3YlcWqpSNsIkAZj972KzFZLClc/ekppahKkOczGkwIG6ElFgdOgOlK4tXZw==} dependencies: acorn: 8.11.3 - chokidar: 3.5.3 + chokidar: 3.6.0 webpack-sources: 3.2.3 webpack-virtual-modules: 0.6.1 dev: true @@ -13654,14 +14038,14 @@ packages: engines: {node: '>=8'} dev: true - /update-browserslist-db@1.0.13(browserslist@4.22.2): + /update-browserslist-db@1.0.13(browserslist@4.23.0): resolution: {integrity: sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==} hasBin: true peerDependencies: browserslist: '>= 4.21.0' dependencies: - browserslist: 4.22.2 - escalade: 3.1.1 + browserslist: 4.23.0 + escalade: 3.1.2 picocolors: 1.0.0 dev: true @@ -13671,7 +14055,7 @@ packages: punycode: 2.3.1 dev: true - /use-callback-ref@1.3.1(@types/react@18.2.48)(react@18.2.0): + /use-callback-ref@1.3.1(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-Lg4Vx1XZQauB42Hw3kK7JM6yjVjgFmFC5/Ab797s79aARomD2nEErc4mCgM8EZrARLmmbWpi5DGCadmK50DcAQ==} engines: {node: '>=10'} peerDependencies: @@ -13681,18 +14065,25 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.57 react: 18.2.0 tslib: 2.6.2 - - /use-composed-ref@1.3.0(react@18.2.0): - resolution: {integrity: sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==} - peerDependencies: - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - dependencies: - react: 18.2.0 dev: false + /use-callback-ref@1.3.1(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-Lg4Vx1XZQauB42Hw3kK7JM6yjVjgFmFC5/Ab797s79aARomD2nEErc4mCgM8EZrARLmmbWpi5DGCadmK50DcAQ==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.8.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@types/react': 18.2.59 + react: 18.2.0 + tslib: 2.6.2 + /use-debounce@10.0.0(react@18.2.0): resolution: {integrity: sha512-XRjvlvCB46bah9IBXVnq/ACP2lxqXyZj0D9hj4K5OzNroMDpTEBg8Anuh1/UfRTRs7pLhQ+RiNxxwZu9+MVl1A==} engines: {node: '>= 16.0.0'} @@ -13712,7 +14103,7 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: false - /use-isomorphic-layout-effect@1.1.2(@types/react@18.2.48)(react@18.2.0): + /use-isomorphic-layout-effect@1.1.2(@types/react@18.2.59)(react@18.2.0): resolution: {integrity: sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==} peerDependencies: '@types/react': '*' @@ -13721,24 +14112,10 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 react: 18.2.0 dev: false - /use-latest@1.2.1(@types/react@18.2.48)(react@18.2.0): - resolution: {integrity: sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==} - peerDependencies: - '@types/react': '*' - react: ^16.8.0 || ^17.0.0 || ^18.0.0 - peerDependenciesMeta: - '@types/react': - optional: true - dependencies: - '@types/react': 18.2.48 - react: 18.2.0 - use-isomorphic-layout-effect: 1.1.2(@types/react@18.2.48)(react@18.2.0) - dev: false - /use-resize-observer@9.1.0(react-dom@18.2.0)(react@18.2.0): resolution: {integrity: sha512-R25VqO9Wb3asSD4eqtcxk8sJalvIOYBqS8MNZlpDSQ4l4xMQxC/J7Id9HoTqPq8FwULIn0PVW+OAqF2dyYbjow==} peerDependencies: @@ -13750,7 +14127,7 @@ packages: react-dom: 18.2.0(react@18.2.0) dev: true - /use-sidecar@1.1.2(@types/react@18.2.48)(react@18.2.0): + /use-sidecar@1.1.2(@types/react@18.2.57)(react@18.2.0): resolution: {integrity: sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==} engines: {node: '>=10'} peerDependencies: @@ -13760,7 +14137,23 @@ packages: '@types/react': optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.57 + detect-node-es: 1.1.0 + react: 18.2.0 + tslib: 2.6.2 + dev: false + + /use-sidecar@1.1.2(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==} + engines: {node: '>=10'} + peerDependencies: + '@types/react': ^16.9.0 || ^17.0.0 || ^18.0.0 + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + peerDependenciesMeta: + '@types/react': + optional: true + dependencies: + '@types/react': 18.2.59 detect-node-es: 1.1.0 react: 18.2.0 tslib: 2.6.2 @@ -13783,8 +14176,8 @@ packages: inherits: 2.0.4 is-arguments: 1.1.1 is-generator-function: 1.0.10 - is-typed-array: 1.1.12 - which-typed-array: 1.1.13 + is-typed-array: 1.1.13 + which-typed-array: 1.1.14 dev: true /utils-merge@1.0.1: @@ -13803,6 +14196,20 @@ packages: spdx-expression-parse: 3.0.1 dev: true + /validate-npm-package-name@4.0.0: + resolution: {integrity: sha512-mzR0L8ZDktZjpX4OB46KT+56MAhl4EIazWP/+G/HPGuvfdaqg4YsCdtOm6U9+LOFyYDoh4dpnpxZRB9MQQns5Q==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + dependencies: + builtins: 5.0.1 + dev: true + + /validate-npm-package-name@5.0.0: + resolution: {integrity: sha512-YuKoXDAhBYxY7SfOKxHBDoSyENFeW5VvIIQp2TGQuit8gpK6MnWaQelBKxso72DoxTZfZdcP3W90LqpSkgPzLQ==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + dependencies: + builtins: 5.0.1 + dev: true + /validator@13.11.0: resolution: {integrity: sha512-Ii+sehpSfZy+At5nPdnyMhx78fEoPDkR2XW/zimHEL3MyGJQOCQ7WeP20jPYRz7ZCpcKLB21NxuXHF3bxjStBQ==} engines: {node: '>= 0.10'} @@ -13813,16 +14220,44 @@ packages: engines: {node: '>= 0.8'} dev: true - /vite-plugin-css-injected-by-js@3.3.1(vite@5.0.12): - resolution: {integrity: sha512-PjM/X45DR3/V1K1fTRs8HtZHEQ55kIfdrn+dzaqNBFrOYO073SeSNCxp4j7gSYhV9NffVHaEnOL4myoko0ePAg==} + /version-selector-type@3.0.0: + resolution: {integrity: sha512-PSvMIZS7C1MuVNBXl/CDG2pZq8EXy/NW2dHIdm3bVP5N0PC8utDK8ttXLXj44Gn3J0lQE3U7Mpm1estAOd+eiA==} + engines: {node: '>=10.13'} + dependencies: + semver: 7.6.0 + dev: true + + /vite-node@1.3.1(@types/node@20.11.20): + resolution: {integrity: sha512-azbRrqRxlWTJEVbzInZCTchx0X69M/XPTCz4H+TLvlTcR/xH/3hkRqhOakT41fMJCMzXTu4UvegkZiEoJAWvng==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + dependencies: + cac: 6.7.14 + debug: 4.3.4 + pathe: 1.1.2 + picocolors: 1.0.0 + vite: 5.1.4(@types/node@20.11.20) + transitivePeerDependencies: + - '@types/node' + - less + - lightningcss + - sass + - stylus + - sugarss + - supports-color + - terser + dev: true + + /vite-plugin-css-injected-by-js@3.4.0(vite@5.1.4): + resolution: {integrity: sha512-wS5+UYtJXQ/vNornsqTQxOLBVO/UjXU54ZsYMeX0mj2OrbStMQ4GLgvneVDQGPwyGJcm/ntBPawc2lA7xx+Lpg==} peerDependencies: vite: '>2.0.0-0' dependencies: - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) dev: true - /vite-plugin-dts@3.7.1(@types/node@20.11.5)(typescript@5.3.3)(vite@5.0.12): - resolution: {integrity: sha512-VZJckNFpVfRAkmOxhGT5OgTUVWVXxkNQqLpBUuiNGAr9HbtvmvsPLo2JB3Xhn+o/Z9+CT6YZfYa4bX9SGR5hNw==} + /vite-plugin-dts@3.7.3(@types/node@20.11.20)(typescript@5.3.3)(vite@5.1.4): + resolution: {integrity: sha512-26eTlBYdpjRLWCsTJebM8vkCieE+p9gP3raf+ecDnzzK5E3FG6VE1wcy55OkRpfWWVlVvKkYFe6uvRHYWx7Nog==} engines: {node: ^14.18.0 || >=16.0.0} peerDependencies: typescript: '*' @@ -13831,13 +14266,13 @@ packages: vite: optional: true dependencies: - '@microsoft/api-extractor': 7.39.0(@types/node@20.11.5) + '@microsoft/api-extractor': 7.39.0(@types/node@20.11.20) '@rollup/pluginutils': 5.1.0 '@vue/language-core': 1.8.27(typescript@5.3.3) debug: 4.3.4 kolorist: 1.8.0 typescript: 5.3.3 - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) vue-tsc: 1.8.27(typescript@5.3.3) transitivePeerDependencies: - '@types/node' @@ -13845,20 +14280,20 @@ packages: - supports-color dev: true - /vite-plugin-eslint@1.8.1(eslint@8.56.0)(vite@5.0.12): + /vite-plugin-eslint@1.8.1(eslint@8.57.0)(vite@5.1.4): resolution: {integrity: sha512-PqdMf3Y2fLO9FsNPmMX+//2BF5SF8nEWspZdgl4kSt7UvHDRHVVfHvxsD7ULYzZrJDGRxR81Nq7TOFgwMnUang==} peerDependencies: eslint: '>=7' vite: '>=2' dependencies: '@rollup/pluginutils': 4.2.1 - '@types/eslint': 8.56.0 - eslint: 8.56.0 + '@types/eslint': 8.56.3 + eslint: 8.57.0 rollup: 2.79.1 - vite: 5.0.12(@types/node@20.11.5) + vite: 5.1.4(@types/node@20.11.20) dev: true - /vite-tsconfig-paths@4.3.1(typescript@5.3.3)(vite@5.0.12): + /vite-tsconfig-paths@4.3.1(typescript@5.3.3)(vite@5.1.4): resolution: {integrity: sha512-cfgJwcGOsIxXOLU/nELPny2/LUD/lcf1IbfyeKTv2bsupVbTH/xpFtdQlBmIP1GEK2CjjLxYhFfB+QODFAx5aw==} peerDependencies: vite: '*' @@ -13868,15 +14303,15 @@ packages: dependencies: debug: 4.3.4 globrex: 0.1.2 - tsconfck: 3.0.1(typescript@5.3.3) - vite: 5.0.12(@types/node@20.11.5) + tsconfck: 3.0.2(typescript@5.3.3) + vite: 5.1.4(@types/node@20.11.20) transitivePeerDependencies: - supports-color - typescript dev: true - /vite@5.0.12(@types/node@20.11.5): - resolution: {integrity: sha512-4hsnEkG3q0N4Tzf1+t6NdN9dg/L3BM+q8SWgbSPnJvrgH2kgdyzfVJwbR1ic69/4uMJJ/3dqDZZE5/WwqW8U1w==} + /vite@5.1.4(@types/node@20.11.20): + resolution: {integrity: sha512-n+MPqzq+d9nMVTKyewqw6kSt+R3CkvF9QAKY8obiQn8g1fwTscKxyfaYnC632HtBXAQGc1Yjomphwn1dtwGAHg==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true peerDependencies: @@ -13903,14 +14338,74 @@ packages: terser: optional: true dependencies: - '@types/node': 20.11.5 - esbuild: 0.19.11 - postcss: 8.4.33 - rollup: 4.9.4 + '@types/node': 20.11.20 + esbuild: 0.19.12 + postcss: 8.4.35 + rollup: 4.12.0 optionalDependencies: fsevents: 2.3.3 dev: true + /vitest@1.3.1(@types/node@20.11.20): + resolution: {integrity: sha512-/1QJqXs8YbCrfv/GPQ05wAZf2eakUPLPa18vkJAKE7RXOKfVHqMZZ1WlTjiwl6Gcn65M5vpNUB6EFLnEdRdEXQ==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/node': ^18.0.0 || >=20.0.0 + '@vitest/browser': 1.3.1 + '@vitest/ui': 1.3.1 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + dependencies: + '@types/node': 20.11.20 + '@vitest/expect': 1.3.1 + '@vitest/runner': 1.3.1 + '@vitest/snapshot': 1.3.1 + '@vitest/spy': 1.3.1 + '@vitest/utils': 1.3.1 + acorn-walk: 8.3.2 + chai: 4.4.1 + debug: 4.3.4 + execa: 8.0.1 + local-pkg: 0.5.0 + magic-string: 0.30.7 + pathe: 1.1.2 + picocolors: 1.0.0 + std-env: 3.7.0 + strip-literal: 2.0.0 + tinybench: 2.6.0 + tinypool: 0.8.2 + vite: 5.1.4(@types/node@20.11.20) + vite-node: 1.3.1(@types/node@20.11.20) + why-is-node-running: 2.2.2 + transitivePeerDependencies: + - less + - lightningcss + - sass + - stylus + - sugarss + - supports-color + - terser + dev: true + + /vlq@0.2.3: + resolution: {integrity: sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==} + dev: true + /void-elements@3.1.0: resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} engines: {node: '>=0.10.0'} @@ -13931,15 +14426,10 @@ packages: dependencies: '@volar/typescript': 1.11.1 '@vue/language-core': 1.8.27(typescript@5.3.3) - semver: 7.5.4 + semver: 7.6.0 typescript: 5.3.3 dev: true - /walkdir@0.4.1: - resolution: {integrity: sha512-3eBwRyEln6E1MSzcxcVpQIhRG8Q1jLvEqRmCZqS3dsfXEDR/AhOF4d+jHg1qvDCpYaVRZjENPQyrVxAkQqxPgQ==} - engines: {node: '>=6.0.0'} - dev: true - /walker@1.0.8: resolution: {integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==} dependencies: @@ -13993,7 +14483,7 @@ packages: engines: {node: '>= 0.4'} dependencies: function.prototype.name: 1.1.6 - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 is-async-function: 2.0.0 is-date-object: 1.0.5 is-finalizationregistry: 1.0.2 @@ -14003,7 +14493,7 @@ packages: isarray: 2.0.5 which-boxed-primitive: 1.0.2 which-collection: 1.0.1 - which-typed-array: 1.1.13 + which-typed-array: 1.1.14 dev: true /which-collection@1.0.1: @@ -14015,15 +14505,15 @@ packages: is-weakset: 2.0.2 dev: true - /which-typed-array@1.1.13: - resolution: {integrity: sha512-P5Nra0qjSncduVPEAr7xhoF5guty49ArDTwzJ/yNuPIbZppyRxFQsRCWrocxIY+CnMVG+qfbU2FmDKyvSGClow==} + /which-typed-array@1.1.14: + resolution: {integrity: sha512-VnXFiIW8yNn9kIHN88xvZ4yOWchftKDsRJ8fEPacX/wl1lOvBrhsJ/OeJCXq7B0AaijRuqgzSKalJoPk+D8MPg==} engines: {node: '>= 0.4'} dependencies: - available-typed-arrays: 1.0.5 - call-bind: 1.0.5 + available-typed-arrays: 1.0.7 + call-bind: 1.0.7 for-each: 0.3.3 gopd: 1.0.1 - has-tostringtag: 1.0.0 + has-tostringtag: 1.0.2 dev: true /which@2.0.2: @@ -14034,6 +14524,23 @@ packages: isexe: 2.0.0 dev: true + /which@4.0.0: + resolution: {integrity: sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==} + engines: {node: ^16.13.0 || >=18.0.0} + hasBin: true + dependencies: + isexe: 3.1.1 + dev: true + + /why-is-node-running@2.2.2: + resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} + engines: {node: '>=8'} + hasBin: true + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + dev: true + /wordwrap@1.0.0: resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} dev: true @@ -14154,7 +14661,7 @@ packages: engines: {node: '>=12'} dependencies: cliui: 8.0.1 - escalade: 3.1.1 + escalade: 3.1.2 get-caller-file: 2.0.5 require-directory: 2.1.1 string-width: 4.2.3 @@ -14174,6 +14681,11 @@ packages: engines: {node: '>=10'} dev: true + /yocto-queue@1.0.0: + resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} + engines: {node: '>=12.20'} + dev: true + /z-schema@5.0.5: resolution: {integrity: sha512-D7eujBWkLa3p2sIpJA0d1pr7es+a7m0vFAnZLlCEKq/Ij2k0MLi9Br2UPxoxdYystm5K1yeBGzub0FlYUEWj2Q==} engines: {node: '>=8.0.0'} @@ -14186,25 +14698,23 @@ packages: commander: 9.5.0 dev: true - /zod-validation-error@3.0.0(zod@3.22.4): - resolution: {integrity: sha512-x+agsJJG9rvC7axF0xqTEdZhJkLHyIZkdOAWDJSmwGPzxNHMHwtU6w2yDOAAP6yuSfTAUhAMJRBfhVGY64ySEQ==} + /zod-validation-error@3.0.2(zod@3.22.4): + resolution: {integrity: sha512-21xGaDmnU7lJZ4J63n5GXWqi+rTzGy3gDHbuZ1jP6xrK/DEQGyOqs/xW7eH96tIfCOYm+ecCuT0bfajBRKEVUw==} engines: {node: '>=18.0.0'} peerDependencies: zod: ^3.18.0 dependencies: zod: 3.22.4 - dev: false /zod@3.22.4: resolution: {integrity: sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==} - dev: false - /zustand@4.4.7(@types/react@18.2.48)(react@18.2.0): - resolution: {integrity: sha512-QFJWJMdlETcI69paJwhSMJz7PPWjVP8Sjhclxmxmxv/RYI7ZOvR5BHX+ktH0we9gTWQMxcne8q1OY8xxz604gw==} + /zustand@4.5.1(@types/react@18.2.59)(react@18.2.0): + resolution: {integrity: sha512-XlauQmH64xXSC1qGYNv00ODaQ3B+tNPoy22jv2diYiP4eoDKr9LA+Bh5Bc3gplTrFdb6JVI+N4kc1DZ/tbtfPg==} engines: {node: '>=12.7.0'} peerDependencies: '@types/react': '>=16.8' - immer: '>=9.0' + immer: '>=9.0.6' react: '>=16.8' peerDependenciesMeta: '@types/react': @@ -14214,7 +14724,7 @@ packages: react: optional: true dependencies: - '@types/react': 18.2.48 + '@types/react': 18.2.59 react: 18.2.0 use-sync-external-store: 1.2.0(react@18.2.0) dev: false diff --git a/invokeai/frontend/web/public/locales/de.json b/invokeai/frontend/web/public/locales/de.json index 3dca1267dc..65aa7b2a7a 100644 --- a/invokeai/frontend/web/public/locales/de.json +++ b/invokeai/frontend/web/public/locales/de.json @@ -81,7 +81,7 @@ "outputs": "Ausgabe", "data": "Daten", "safetensors": "Safe-Tensors", - "outpaint": "Ausmalen", + "outpaint": "Outpaint (Außen ausmalen)", "details": "Details", "format": "Format", "unknown": "Unbekannt", @@ -110,17 +110,18 @@ "nextPage": "Nächste Seite", "unknownError": "Unbekannter Fehler", "unsaved": "Nicht gespeichert", - "aboutDesc": "Verwenden Sie Invoke für die Arbeit? Dann siehe hier:", + "aboutDesc": "Verwenden Sie Invoke für die Arbeit? Siehe hier:", "localSystem": "Lokales System", "orderBy": "Ordnen nach", - "saveAs": "Speicher als", + "saveAs": "Speichern als", "updated": "Aktualisiert", "copy": "Kopieren", - "aboutHeading": "Nutzen Sie Ihre kreative Energie" + "aboutHeading": "Nutzen Sie Ihre kreative Energie", + "toResolve": "Lösen" }, "gallery": { "generations": "Erzeugungen", - "showGenerations": "Zeige Erzeugnisse", + "showGenerations": "Zeige Ergebnisse", "uploads": "Uploads", "showUploads": "Zeige Uploads", "galleryImageSize": "Bildgröße", @@ -150,9 +151,9 @@ "problemDeletingImagesDesc": "Ein oder mehrere Bilder konnten nicht gelöscht werden", "starImage": "Bild markieren", "assets": "Ressourcen", - "unstarImage": "Markierung Entfernen", + "unstarImage": "Markierung entfernen", "image": "Bild", - "deleteSelection": "Lösche markierte", + "deleteSelection": "Lösche Auswahl", "dropToUpload": "$t(gallery.drop) zum hochladen", "dropOrUpload": "$t(gallery.drop) oder hochladen", "drop": "Ablegen", @@ -590,10 +591,21 @@ "general": "Allgemein", "hiresStrength": "High Res Stärke", "hidePreview": "Verstecke Vorschau", - "showPreview": "Zeige Vorschau" + "showPreview": "Zeige Vorschau", + "aspect": "Seitenverhältnis", + "aspectRatio": "Seitenverhältnis", + "scheduler": "Planer", + "aspectRatioFree": "Frei", + "setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (kann zu groß sein)", + "lockAspectRatio": "Seitenverhältnis sperren", + "swapDimensions": "Seitenverhältnis umkehren", + "setToOptimalSize": "Optimiere Größe für Modell", + "useSize": "Maße übernehmen", + "remixImage": "Remix des Bilds erstellen", + "imageActions": "Weitere Bildaktionen" }, "settings": { - "displayInProgress": "Bilder in Bearbeitung anzeigen", + "displayInProgress": "Zwischenbilder anzeigen", "saveSteps": "Speichern der Bilder alle n Schritte", "confirmOnDelete": "Bestätigen beim Löschen", "displayHelpIcons": "Hilfesymbole anzeigen", @@ -606,7 +618,34 @@ "useSlidersForAll": "Schieberegler für alle Optionen verwenden", "showAdvancedOptions": "Erweiterte Optionen anzeigen", "alternateCanvasLayout": "Alternatives Leinwand-Layout", - "clearIntermediatesDesc1": "Das Löschen der Zwischenprodukte setzt Leinwand und ControlNet zurück." + "clearIntermediatesDesc1": "Das Löschen der Zwischenbilder setzt Leinwand und ControlNet zurück.", + "favoriteSchedulers": "Lieblings-Planer", + "favoriteSchedulersPlaceholder": "Keine Planer favorisiert", + "generation": "Erzeugung", + "enableInformationalPopovers": "Info-Popouts anzeigen", + "shouldLogToConsole": "Konsole loggen", + "showProgressInViewer": "Zwischenbilder im Viewer anzeigen", + "clearIntermediatesDesc3": "Ihre Bilder werden nicht gelöscht.", + "clearIntermediatesWithCount_one": "Lösche {{count}} Zwischenbilder", + "clearIntermediatesWithCount_other": "Lösche {{count}} Zwischenbilder", + "reloadingIn": "Neuladen in", + "enableNodesEditor": "Nodes Editor aktivieren", + "autoChangeDimensions": "Breite/Höhe auf Modellstandard setzen", + "experimental": "Experimentell", + "intermediatesCleared_one": "{{count}} Zwischenbilder gelöscht", + "intermediatesCleared_other": "{{count}} Zwischenbilder gelöscht", + "enableInvisibleWatermark": "Unsichtbares Wasserzeichen aktivieren", + "general": "Allgemein", + "consoleLogLevel": "Protokollierungsstufe", + "clearIntermediatesDisabled": "Warteschlange muss leer sein, um Zwischenbilder zu löschen", + "developer": "Entwickler", + "antialiasProgressImages": "Zwischenbilder mit Anti-Alias", + "beta": "Beta", + "ui": "Benutzeroberfläche", + "clearIntermediatesDesc2": "Zwischenbilder sind Nebenprodukte der Erstellung. Sie zu löschen macht Festplattenspeicher frei.", + "clearIntermediates": "Zwischenbilder löschen", + "intermediatesClearedFailed": "Problem beim Löschen der Zwischenbilder", + "enableNSFWChecker": "Auf unangemessene Inhalte prüfen" }, "toast": { "tempFoldersEmptied": "Temp-Ordner geleert", @@ -651,7 +690,9 @@ "problemCopyingCanvas": "Problem beim Kopieren der Leinwand", "problemCopyingCanvasDesc": "Kann Basis-Layer nicht exportieren", "problemDownloadingCanvas": "Problem beim Herunterladen der Leinwand", - "setAsCanvasInitialImage": "Als Ausgangsbild gesetzt" + "setAsCanvasInitialImage": "Als Ausgangsbild gesetzt", + "addedToBoard": "Dem Board hinzugefügt", + "loadedWithWarnings": "Workflow mit Warnungen geladen" }, "tooltip": { "feature": { @@ -733,23 +774,23 @@ "accessibility": { "modelSelect": "Modell-Auswahl", "uploadImage": "Bild hochladen", - "previousImage": "Voriges Bild", + "previousImage": "Vorheriges Bild", "useThisParameter": "Benutze diesen Parameter", - "copyMetadataJson": "Kopiere Metadaten JSON", + "copyMetadataJson": "Kopiere JSON-Metadaten", "zoomIn": "Vergrößern", "rotateClockwise": "Im Uhrzeigersinn drehen", "flipHorizontally": "Horizontal drehen", "flipVertically": "Vertikal drehen", "modifyConfig": "Optionen einstellen", "toggleAutoscroll": "Auroscroll ein/ausschalten", - "toggleLogViewer": "Log Betrachter ein/ausschalten", + "toggleLogViewer": "Log-Betrachter ein/ausschalten", "showOptionsPanel": "Seitenpanel anzeigen", "reset": "Zurücksetzten", "nextImage": "Nächstes Bild", "zoomOut": "Verkleinern", "rotateCounterClockwise": "Gegen den Uhrzeigersinn drehen", - "showGalleryPanel": "Galeriefenster anzeigen", - "exitViewer": "Betrachten beenden", + "showGalleryPanel": "Galerie-Panel anzeigen", + "exitViewer": "Betrachter beenden", "menu": "Menü", "loadMore": "Mehr laden", "invokeProgressBar": "Invoke Fortschrittsanzeige", @@ -759,7 +800,7 @@ "about": "Über" }, "boards": { - "autoAddBoard": "Automatisches Hinzufügen zum Ordner", + "autoAddBoard": "Automatisches Hinzufügen zum Board", "topMessage": "Dieser Ordner enthält Bilder die in den folgenden Funktionen verwendet werden:", "move": "Bewegen", "menuItemAutoAdd": "Auto-Hinzufügen zu diesem Ordner", @@ -768,13 +809,13 @@ "noMatching": "Keine passenden Ordner", "selectBoard": "Ordner aussuchen", "cancel": "Abbrechen", - "addBoard": "Ordner hinzufügen", + "addBoard": "Board hinzufügen", "uncategorized": "Ohne Kategorie", "downloadBoard": "Ordner runterladen", "changeBoard": "Ordner wechseln", "loading": "Laden...", "clearSearch": "Suche leeren", - "bottomMessage": "Durch das Löschen dieses Ordners und seiner Bilder werden alle Funktionen zurückgesetzt, die sie derzeit verwenden.", + "bottomMessage": "Löschen des Boards und seiner Bilder setzt alle Funktionen zurück, die sie gerade verwenden.", "deleteBoardOnly": "Nur Ordner löschen", "deleteBoard": "Löschen Ordner", "deleteBoardAndImages": "Löschen Ordner und Bilder", @@ -820,7 +861,7 @@ "colorMap": "Farbe", "lowThreshold": "Niedrige Schwelle", "highThreshold": "Hohe Schwelle", - "toggleControlNet": "Schalten ControlNet um", + "toggleControlNet": "Dieses ControlNet ein- oder ausschalten", "delete": "Löschen", "controlAdapter_one": "Control Adapter", "controlAdapter_other": "Control Adapter", @@ -865,18 +906,23 @@ "maxFaces": "Maximale Anzahl Gesichter", "resizeSimple": "Größe ändern (einfach)", "large": "Groß", - "modelSize": "Modell Größe", + "modelSize": "Modellgröße", "small": "Klein", "base": "Basis", - "depthAnything": "Depth Anything / \"Tiefe irgendwas\"", - "depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik" + "depthAnything": "Depth Anything", + "depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth-Anything-Technik", + "face": "Gesicht", + "body": "Körper", + "hands": "Hände", + "dwOpenpose": "DW Openpose", + "dwOpenposeDescription": "Posenschätzung mit DW Openpose" }, "queue": { "status": "Status", "cancelTooltip": "Aktuellen Aufgabe abbrechen", "queueEmpty": "Warteschlange leer", "in_progress": "In Arbeit", - "queueFront": "An den Anfang der Warteschlange tun", + "queueFront": "Am Anfang der Warteschlange einreihen", "completed": "Fertig", "queueBack": "In die Warteschlange", "clearFailed": "Probleme beim leeren der Warteschlange", @@ -904,7 +950,7 @@ "batchValues": "Stapel Werte", "queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen", "queuedCount": "{{pending}} wartenden Elemente", - "clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.", + "clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.", "completedIn": "Fertig in", "cancelBatchSucceeded": "Stapel abgebrochen", "cancelBatch": "Stapel stoppen", @@ -913,20 +959,20 @@ "cancelBatchFailed": "Problem beim Abbruch vom Stapel", "clearQueueAlertDialog2": "Warteschlange wirklich leeren?", "pruneSucceeded": "{{item_count}} abgeschlossene Elemente aus der Warteschlange entfernt", - "pauseSucceeded": "Prozessor angehalten", + "pauseSucceeded": "Prozess angehalten", "cancelFailed": "Problem beim Stornieren des Auftrags", - "pauseFailed": "Problem beim Anhalten des Prozessors", + "pauseFailed": "Problem beim Anhalten des Prozesses", "front": "Vorne", "pruneTooltip": "Bereinigen Sie {{item_count}} abgeschlossene Aufträge", - "resumeFailed": "Problem beim wieder aufnehmen von Prozessor", + "resumeFailed": "Problem beim Fortsetzen des Prozesses", "pruneFailed": "Problem beim leeren der Warteschlange", - "pauseTooltip": "Pause von Prozessor", + "pauseTooltip": "Prozess anhalten", "back": "Hinten", - "resumeSucceeded": "Prozessor wieder aufgenommen", - "resumeTooltip": "Prozessor wieder aufnehmen", + "resumeSucceeded": "Prozess wird fortgesetzt", + "resumeTooltip": "Prozess wieder aufnehmen", "time": "Zeit", - "batchQueuedDesc_one": "{{count}} Eintrag ans {{direction}} der Wartschlange hinzugefügt", - "batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt", + "batchQueuedDesc_one": "{{count}} Eintrag an {{direction}} der Wartschlange hinzugefügt", + "batchQueuedDesc_other": "{{count}} Einträge an {{direction}} der Wartschlange hinzugefügt", "openQueue": "Warteschlange öffnen", "batchFailedToQueue": "Fehler beim Einreihen in die Stapelverarbeitung", "batchFieldValues": "Stapelverarbeitungswerte", @@ -961,11 +1007,12 @@ "workflow": "Workflow", "scheduler": "Planer", "noRecallParameters": "Es wurden keine Parameter zum Abrufen gefunden", - "recallParameters": "Parameter wiederherstellen" + "recallParameters": "Parameter wiederherstellen", + "cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)" }, "popovers": { "noiseUseCPU": { - "heading": "Nutze Prozessor rauschen", + "heading": "Nutze CPU-Rauschen", "paragraphs": [ "Entscheidet, ob auf der CPU oder GPU Rauschen erzeugt wird.", "Mit aktiviertem CPU-Rauschen wird ein bestimmter Seedwert das gleiche Bild auf jeder Maschine erzeugen.", @@ -975,8 +1022,7 @@ "paramModel": { "heading": "Modell", "paragraphs": [ - "Modell für die Entrauschungsschritte.", - "Verschiedene Modelle werden in der Regel so trainiert, dass sie sich auf die Erzeugung bestimmter Ästhetik und/oder Inhalte spezialisiert." + "Modell für die Entrauschungsschritte." ] }, "paramIterations": { @@ -1084,12 +1130,23 @@ "Wie stark wird das ControlNet das generierte Bild beeinflussen wird." ], "heading": "Einfluss" + }, + "paramScheduler": { + "paragraphs": [ + "\"Planer\" definiert, wie iterativ Rauschen zu einem Bild hinzugefügt wird, oder wie ein Sample bei der Ausgabe eines Modells aktualisiert wird." + ], + "heading": "Planer" + }, + "imageFit": { + "paragraphs": [ + "Reduziert das Ausgangsbild auf die Breite und Höhe des Ausgangsbildes. Empfohlen zu aktivieren." + ] } }, "ui": { "lockRatio": "Verhältnis sperren", - "hideProgressImages": "Verstecke Prozess Bild", - "showProgressImages": "Zeige Prozess Bild", + "hideProgressImages": "Fortschrittsbilder verbergen", + "showProgressImages": "Fortschrittsbilder anzeigen", "swapSizes": "Tausche Größen" }, "invocationCache": { @@ -1287,7 +1344,19 @@ "vaeFieldDescription": "VAE Submodell.", "unknownInput": "Unbekannte Eingabe: {{name}}", "unknownNodeType": "Unbekannter Knotentyp", - "float": "Kommazahlen" + "float": "Kommazahlen", + "latentsPolymorphic": "Latents Polymorph", + "integerPolymorphicDescription": "Eine Sammlung von ganzen Zahlen.", + "integerPolymorphic": "Ganze Zahl Polymorph", + "ipAdapterPolymorphic": "IP-Adapter Polymorph", + "floatPolymorphic": "Fließkommazahl Polymorph", + "enumDescription": "Aufzählungen sind Werte, die eine von mehreren Optionen sein können.", + "floatCollection": "Fließkommazahl Sammlung", + "enum": "Aufzählung", + "floatPolymorphicDescription": "Eine Sammlung von Fließkommazahlen", + "fullyContainNodes": "Vollständig ausgewählte Nodes auswählen", + "editMode": "Im Workflow-Editor bearbeiten", + "floatCollectionDescription": "Eine Sammlung von Fließkommazahlen" }, "hrf": { "enableHrf": "Korrektur für hohe Auflösungen", @@ -1336,12 +1405,12 @@ }, "control": { "title": "Kontrolle", - "controlAdaptersTab": "Kontroll Adapter", - "ipTab": "Bild Beschreibung" + "controlAdaptersTab": "Kontroll-Adapter", + "ipTab": "Bild-Prompts" }, "compositing": { "coherenceTab": "Kohärenzpass", - "infillTab": "Füllung", + "infillTab": "Füllung / Infill", "title": "Compositing" } }, @@ -1379,5 +1448,15 @@ }, "app": { "storeNotInitialized": "App-Store ist nicht initialisiert" + }, + "sdxl": { + "concatPromptStyle": "Verknüpfen von Prompt & Stil", + "scheduler": "Planer", + "steps": "Schritte", + "useRefiner": "Refiner verwenden", + "selectAModel": "Modell auswählen" + }, + "dynamicPrompts": { + "showDynamicPrompts": "Dynamische Prompts anzeigen" } } diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 0fe833db39..4065b0db86 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -175,6 +175,7 @@ "statusUpscaling": "Upscaling", "statusUpscalingESRGAN": "Upscaling (ESRGAN)", "template": "Template", + "toResolve": "To resolve", "training": "Training", "trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.", "trainingDesc2": "InvokeAI already supports training custom embeddourings using Textual Inversion using the main script.", @@ -423,8 +424,11 @@ "uploads": "Uploads", "deleteSelection": "Delete Selection", "downloadSelection": "Download Selection", - "preparingDownload": "Preparing Download", - "preparingDownloadFailed": "Problem Preparing Download", + "bulkDownloadRequested": "Preparing Download", + "bulkDownloadRequestedDesc": "Your download request is being prepared. This may take a few moments.", + "bulkDownloadRequestFailed": "Problem Preparing Download", + "bulkDownloadStarting": "Download Starting", + "bulkDownloadFailed": "Download Failed", "problemDeletingImages": "Problem Deleting Images", "problemDeletingImagesDesc": "One or more images could not be deleted" }, @@ -652,6 +656,7 @@ } }, "metadata": { + "allPrompts": "All Prompts", "cfgScale": "CFG scale", "cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)", "createdBy": "Created By", @@ -660,6 +665,7 @@ "height": "Height", "hiresFix": "High Resolution Optimization", "imageDetails": "Image Details", + "imageDimensions": "Image Dimensions", "initImage": "Initial image", "metadata": "Metadata", "model": "Model", @@ -667,9 +673,12 @@ "noImageDetails": "No image details found", "noMetaData": "No metadata found", "noRecallParameters": "No parameters to recall found", + "parameterSet": "Parameter {{parameter}} set", + "parsingFailed": "Parsing Failed", "perlin": "Perlin Noise", "positivePrompt": "Positive Prompt", "recallParameters": "Recall Parameters", + "recallParameter": "Recall {{label}}", "scheduler": "Scheduler", "seamless": "Seamless", "seed": "Seed", @@ -683,20 +692,24 @@ }, "modelManager": { "active": "active", + "addAll": "Add All", "addCheckpointModel": "Add Checkpoint / Safetensor Model", "addDifference": "Add Difference", "addDiffuserModel": "Add Diffusers", "addManually": "Add Manually", "addModel": "Add Model", + "addModels": "Add Models", "addNew": "Add New", "addNewModel": "Add New Model", "addSelected": "Add Selected", "advanced": "Advanced", + "advancedImportInfo": "The advanced tab allows for manual configuration of core model settings. Only use this tab if you are confident that you know the correct model type and configuration for the selected model.", "allModels": "All Models", "alpha": "Alpha", "availableModels": "Available Models", "baseModel": "Base Model", "cached": "cached", + "cancel": "Cancel", "cannotUseSpaces": "Cannot Use Spaces", "checkpointFolder": "Checkpoint Folder", "checkpointModels": "Checkpoints", @@ -730,6 +743,7 @@ "descriptionValidationMsg": "Add a description for your model", "deselectAll": "Deselect All", "diffusersModels": "Diffusers", + "edit": "Edit", "findModels": "Find Models", "formMessageDiffusersModelLocation": "Diffusers Model Location", "formMessageDiffusersModelLocationDesc": "Please enter at least one.", @@ -738,7 +752,9 @@ "height": "Height", "heightValidationMsg": "Default height of your model.", "ignoreMismatch": "Ignore Mismatches Between Selected Models", + "imageEncoderModelId": "Image Encoder Model ID", "importModels": "Import Models", + "importQueue": "Import Queue", "inpainting": "v1 Inpainting", "interpolationType": "Interpolation Type", "inverseSigmoid": "Inverse Sigmoid", @@ -767,8 +783,11 @@ "modelMergeHeaderHelp1": "You can merge up to three different models to create a blend that suits your needs.", "modelMergeHeaderHelp2": "Only Diffusers are available for merging. If you want to merge a checkpoint model, please convert it to Diffusers first.", "modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.", + "modelMetadata": "Model Metadata", + "modelName": "Model Name", "modelOne": "Model 1", "modelsFound": "Models Found", + "modelSettings": "Model Settings", "modelsMerged": "Models Merged", "modelsMergeFailed": "Model Merge Failed", "modelsSynced": "Models Synced", @@ -788,16 +807,24 @@ "notLoaded": "not loaded", "oliveModels": "Olives", "onnxModels": "Onnx", + "path": "Path", "pathToCustomConfig": "Path To Custom Config", "pickModelType": "Pick Model Type", - "predictionType": "Prediction Type (for Stable Diffusion 2.x Models and occasional Stable Diffusion 1.x Models)", + "predictionType": "Prediction Type", + "prune": "Prune", + "pruneTooltip": "Prune finished imports from queue", "quickAdd": "Quick Add", + "removeFromQueue": "Remove From Queue", "repo_id": "Repo ID", "repoIDValidationMsg": "Online repository of your model", + "repoVariant": "Repo Variant", "safetensorModels": "SafeTensors", "sameFolder": "Same folder", + "scan": "Scan", + "scanFolder": "Scan folder", "scanAgain": "Scan Again", "scanForModels": "Scan For Models", + "scanResults": "Scan Results", "search": "Search", "selectAll": "Select All", "selectAndAdd": "Select and Add Models Listed Below", @@ -808,9 +835,11 @@ "showExisting": "Show Existing", "sigmoid": "Sigmoid", "simpleModelDesc": "Provide a path to a local Diffusers model, local checkpoint / safetensors model a HuggingFace Repo ID, or a checkpoint/diffusers model URL.", + "source": "Source", "statusConverting": "Converting", "syncModels": "Sync Models", - "syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you manually update your models.yaml file or add models to the InvokeAI root folder after the application has booted.", + "syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you add models to the InvokeAI root folder or autoimport directory after the application has booted.", + "upcastAttention": "Upcast Attention", "updateModel": "Update Model", "useCustomConfig": "Use Custom Config", "v1": "v1", @@ -825,7 +854,8 @@ "variant": "Variant", "weightedSum": "Weighted Sum", "width": "Width", - "widthValidationMsg": "Default width of your model." + "widthValidationMsg": "Default width of your model.", + "ztsnrTraining": "ZTSNR Training" }, "models": { "addLora": "Add LoRA", @@ -900,6 +930,7 @@ "doesNotExist": "does not exist", "downloadWorkflow": "Download Workflow JSON", "edge": "Edge", + "editMode": "Edit in Workflow Editor", "enum": "Enum", "enumDescription": "Enums are values that may be one of a number of options.", "executionStateCompleted": "Completed", @@ -995,8 +1026,10 @@ "problemReadingMetadata": "Problem reading metadata from image", "problemReadingWorkflow": "Problem reading workflow from image", "problemSettingTitle": "Problem Setting Title", + "resetToDefaultValue": "Reset to default value", "reloadNodeTemplates": "Reload Node Templates", "removeLinearView": "Remove from Linear View", + "reorderLinearView": "Reorder Linear View", "newWorkflow": "New Workflow", "newWorkflowDesc": "Create a new workflow?", "newWorkflowDesc2": "Your current workflow has unsaved changes.", @@ -1067,6 +1100,7 @@ "vaeModelFieldDescription": "TODO", "validateConnections": "Validate Connections and Graph", "validateConnectionsHelp": "Prevent invalid connections from being made, and invalid graphs from being invoked", + "viewMode": "Use in Linear View", "unableToGetWorkflowVersion": "Unable to get workflow schema version", "unrecognizedWorkflowVersion": "Unrecognized workflow schema version {{version}}", "version": "Version", @@ -1115,8 +1149,8 @@ "codeformerFidelity": "Fidelity", "coherenceMode": "Mode", "coherencePassHeader": "Coherence Pass", - "coherenceSteps": "Steps", - "coherenceStrength": "Strength", + "coherenceEdgeSize": "Edge Size", + "coherenceMinDenoise": "Min Denoise", "compositingSettingsHeader": "Compositing Settings", "controlNetControlMode": "Control Mode", "copyImage": "Copy Image", @@ -1160,8 +1194,8 @@ "unableToInvoke": "Unable to Invoke" }, "maskAdjustmentsHeader": "Mask Adjustments", - "maskBlur": "Blur", - "maskBlurMethod": "Blur Method", + "maskBlur": "Mask Blur", + "maskBlurMethod": "Mask Blur Method", "maskEdge": "Mask Edge", "negativePromptPlaceholder": "Negative Prompt", "noiseSettings": "Noise", @@ -1343,6 +1377,8 @@ "modelAdded": "Model Added: {{modelName}}", "modelAddedSimple": "Model Added", "modelAddFailed": "Model Add Failed", + "modelImportCanceled": "Model Import Canceled", + "modelImportRemoved": "Model Import Removed", "nodesBrokenConnections": "Cannot load. Some connections are broken.", "nodesCorruptedGraph": "Cannot load. Graph seems to be corrupted.", "nodesLoaded": "Nodes Loaded", @@ -1351,8 +1387,8 @@ "nodesNotValidJSON": "Not a valid JSON", "nodesSaved": "Nodes Saved", "nodesUnrecognizedTypes": "Cannot load. Graph has unrecognized types", - "parameterNotSet": "Parameter not set", - "parameterSet": "Parameter set", + "parameterNotSet": "{{parameter}} not set", + "parameterSet": "{{parameter}} set", "parametersFailed": "Problem loading parameters", "parametersFailedDesc": "Unable to load init image.", "parametersNotSet": "Parameters Not Set", @@ -1376,6 +1412,7 @@ "promptNotSet": "Prompt Not Set", "promptNotSetDesc": "Could not find prompt for this image.", "promptSet": "Prompt Set", + "prunedQueue": "Pruned Queue", "resetInitialImage": "Reset Initial Image", "seedNotSet": "Seed Not Set", "seedNotSetDesc": "Could not find seed for this image.", @@ -1419,9 +1456,8 @@ "clipSkip": { "heading": "CLIP Skip", "paragraphs": [ - "Choose how many layers of the CLIP model to skip.", - "Some models work better with certain CLIP Skip settings.", - "A higher value typically results in a less detailed image." + "How many layers of the CLIP model to skip.", + "Certain models are better suited to be used with CLIP Skip." ] }, "paramNegativeConditioning": { @@ -1441,11 +1477,12 @@ "paramScheduler": { "heading": "Scheduler", "paragraphs": [ - "Scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output." + "Scheduler used during the generation process.", + "Each scheduler defines how to iteratively add noise to an image or how to update a sample based on a model's output." ] }, - "compositingBlur": { - "heading": "Blur", + "compositingMaskBlur": { + "heading": "Mask Blur", "paragraphs": ["The blur radius of the mask."] }, "compositingBlurMethod": { @@ -1458,47 +1495,55 @@ }, "compositingCoherenceMode": { "heading": "Mode", - "paragraphs": ["The mode of the Coherence Pass."] + "paragraphs": ["Method used to create a coherent image with the newly generated masked area."] }, - "compositingCoherenceSteps": { - "heading": "Steps", - "paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."] + "compositingCoherenceEdgeSize": { + "heading": "Edge Size", + "paragraphs": ["The edge size of the coherence pass."] }, - "compositingStrength": { - "heading": "Strength", + "compositingCoherenceMinDenoise": { + "heading": "Minimum Denoise", "paragraphs": [ - "Denoising strength for the Coherence Pass.", - "Same as the Image to Image Denoising Strength parameter." + "Minimum denoise strength for the Coherence mode", + "The minimum denoise strength for the coherence region when inpainting or outpainting" ] }, "compositingMaskAdjustments": { "heading": "Mask Adjustments", "paragraphs": ["Adjust the mask."] }, - "controlNetBeginEnd": { - "heading": "Begin / End Step Percentage", - "paragraphs": [ - "Which steps of the denoising process will have the ControlNet applied.", - "ControlNets applied at the beginning of the process guide composition, and ControlNets applied at the end guide details." - ] - }, - "controlNetControlMode": { - "heading": "Control Mode", - "paragraphs": ["Lends more weight to either the prompt or ControlNet."] - }, - "controlNetResizeMode": { - "heading": "Resize Mode", - "paragraphs": ["How the ControlNet image will be fit to the image output size."] - }, "controlNet": { "heading": "ControlNet", "paragraphs": [ "ControlNets provide guidance to the generation process, helping create images with controlled composition, structure, or style, depending on the model selected." ] }, + "controlNetBeginEnd": { + "heading": "Begin / End Step Percentage", + "paragraphs": [ + "The part of the of the denoising process that will have the Control Adapter applied.", + "Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details." + ] + }, + "controlNetControlMode": { + "heading": "Control Mode", + "paragraphs": ["Lend more weight to either the prompt or ControlNet."] + }, + "controlNetProcessor": { + "heading": "Processor", + "paragraphs": [ + "Method of processing the input image to guide the generation process. Different processors will providedifferent effects or styles in your generated images." + ] + }, + "controlNetResizeMode": { + "heading": "Resize Mode", + "paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."] + }, "controlNetWeight": { "heading": "Weight", - "paragraphs": ["How strongly the ControlNet will impact the generated image."] + "paragraphs": [ + "Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image." + ] }, "dynamicPrompts": { "heading": "Dynamic Prompts", @@ -1521,13 +1566,23 @@ "Per Image will use a unique seed for each image. This provides more variation." ] }, + "imageFit": { + "heading": "Fit Initial Image to Output Size", + "paragraphs": [ + "Resizes the initial image to the width and height of the output image. Recommended to enable." + ] + }, "infillMethod": { "heading": "Infill Method", - "paragraphs": ["Method to infill the selected area."] + "paragraphs": ["Method of infilling during the Outpainting or Inpainting process."] }, "lora": { - "heading": "LoRA Weight", - "paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."] + "heading": "LoRA", + "paragraphs": ["Lightweight models that are used in conjunction with base models."] + }, + "loraWeight": { + "heading": "Weight", + "paragraphs": ["Weight of the LoRA. Higher weight will lead to larger impacts on the final image."] }, "noiseUseCPU": { "heading": "Use CPU Noise", @@ -1537,14 +1592,25 @@ "There is no performance impact to enabling CPU Noise." ] }, + "paramAspect": { + "heading": "Aspect", + "paragraphs": [ + "Aspect ratio of the generated image. Changing the ratio will update the Width and Height accordingly.", + "“Optimize” will set the Width and Height to optimal dimensions for the chosen model." + ] + }, "paramCFGScale": { "heading": "CFG Scale", - "paragraphs": ["Controls how much your prompt influences the generation process."] + "paragraphs": [ + "Controls how much the prompt influences the generation process.", + "High CFG Scale values can result in over-saturation and distorted generation results. " + ] }, "paramCFGRescaleMultiplier": { "heading": "CFG Rescale Multiplier", "paragraphs": [ - "Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr). Suggested value 0.7." + "Rescale multiplier for CFG guidance, used for models trained using zero-terminal SNR (ztsnr).", + "Suggested value of 0.7 for these models." ] }, "paramDenoisingStrength": { @@ -1554,6 +1620,16 @@ "0 will result in an identical image, while 1 will result in a completely new image." ] }, + "paramHeight": { + "heading": "Height", + "paragraphs": ["Height of the generated image. Must be a multiple of 8."] + }, + "paramHrf": { + "heading": "Enable High Resolution Fix", + "paragraphs": [ + "Generate high quality images at a larger resolution than optimal for the model. Generally used to prevent duplication in the generated image." + ] + }, "paramIterations": { "heading": "Iterations", "paragraphs": [ @@ -1564,8 +1640,7 @@ "paramModel": { "heading": "Model", "paragraphs": [ - "Model used for the denoising steps.", - "Different models are typically trained to specialize in producing particular aesthetic results and content." + "Model used for generation. Different models are trained to specialize in producing different aesthetic results and content." ] }, "paramRatio": { @@ -1579,7 +1654,7 @@ "heading": "Seed", "paragraphs": [ "Controls the starting noise used for generation.", - "Disable “Random Seed” to produce identical results with the same generation settings." + "Disable the “Random” option to produce identical results with the same generation settings." ] }, "paramSteps": { @@ -1589,6 +1664,10 @@ "Higher step counts will typically create better images but will require more generation time." ] }, + "paramUpscaleMethod": { + "heading": "Upscale Method", + "paragraphs": ["Method used to upscale the image for High Resolution Fix."] + }, "paramVAE": { "heading": "VAE", "paragraphs": ["Model used for translating AI output into the final image."] @@ -1596,14 +1675,82 @@ "paramVAEPrecision": { "heading": "VAE Precision", "paragraphs": [ - "The precision used during VAE encoding and decoding. FP16/half precision is more efficient, at the expense of minor image variations." + "The precision used during VAE encoding and decoding.", + "Fp16/Half precision is more efficient, at the expense of minor image variations." + ] + }, + "paramWidth": { + "heading": "Width", + "paragraphs": ["Width of the generated image. Must be a multiple of 8."] + }, + "patchmatchDownScaleSize": { + "heading": "Downscale", + "paragraphs": [ + "How much downscaling occurs before infilling.", + "Higher downscaling will improve performance and reduce quality." + ] + }, + "refinerModel": { + "heading": "Refiner Model", + "paragraphs": [ + "Model used during the refiner portion of the generation process.", + "Similar to the Generation Model." + ] + }, + "refinerPositiveAestheticScore": { + "heading": "Positive Aesthetic Score", + "paragraphs": [ + "Weight generations to be more similar to images with a high aesthetic score, based on the training data." + ] + }, + "refinerNegativeAestheticScore": { + "heading": "Negative Aesthetic Score", + "paragraphs": [ + "Weight generations to be more similar to images with a low aesthetic score, based on the training data." + ] + }, + "refinerScheduler": { + "heading": "Scheduler", + "paragraphs": [ + "Scheduler used during the refiner portion of the generation process.", + "Similar to the Generation Scheduler." + ] + }, + "refinerStart": { + "heading": "Refiner Start", + "paragraphs": [ + "Where in the generation process the refiner will start to be used.", + "0 means the refiner will be used for the entire generation process, 0.8 means the refiner will be used for the last 20% of the generation process." + ] + }, + "refinerSteps": { + "heading": "Steps", + "paragraphs": [ + "Number of steps that will be performed during the refiner portion of the generation process.", + "Similar to the Generation Steps." + ] + }, + "refinerCfgScale": { + "heading": "CFG Scale", + "paragraphs": [ + "Controls how much the prompt influences the generation process.", + "Similar to the Generation CFG Scale." ] }, "scaleBeforeProcessing": { "heading": "Scale Before Processing", "paragraphs": [ - "Scales the selected area to the size best suited for the model before the image generation process." + "“Auto” scales the selected area to the size best suited for the model before the image generation process.", + "“Manual” allows you to choose the width and height the selected area will be scaled to before the image generation process." ] + }, + "seamlessTilingXAxis": { + "heading": "Seamless Tiling X Axis", + "paragraphs": ["Seamlessly tile an image along the horizontal axis."] + }, + "seamlessTilingYAxis": { + "heading": "Seamless Tiling Y Axis", + "paragraphs": ["Seamlessly tile an image along the vertical axis."] } }, "ui": { @@ -1637,6 +1784,9 @@ "clearCanvasHistoryMessage": "Clearing the canvas history leaves your current canvas intact, but irreversibly clears the undo and redo history.", "clearHistory": "Clear History", "clearMask": "Clear Mask (Shift+C)", + "coherenceModeGaussianBlur": "Gaussian Blur", + "coherenceModeBoxBlur": "Box Blur", + "coherenceModeStaged": "Staged", "colorPicker": "Color Picker", "copyToClipboard": "Copy to Clipboard", "cursorPosition": "Cursor Position", diff --git a/invokeai/frontend/web/public/locales/it.json b/invokeai/frontend/web/public/locales/it.json index 590e9ee28f..1a55f967f7 100644 --- a/invokeai/frontend/web/public/locales/it.json +++ b/invokeai/frontend/web/public/locales/it.json @@ -47,7 +47,7 @@ "statusModelConverted": "Modello Convertito", "statusConvertingModel": "Conversione Modello", "loading": "Caricamento in corso", - "loadingInvokeAI": "Caricamento Invoke AI", + "loadingInvokeAI": "Caricamento di Invoke AI", "postprocessing": "Post Elaborazione", "txt2img": "Testo a Immagine", "accept": "Accetta", @@ -61,7 +61,7 @@ "imagePrompt": "Prompt Immagine", "darkMode": "Modalità scura", "batch": "Gestione Lotto", - "modelManager": "Gestore modello", + "modelManager": "Gestore Modelli", "communityLabel": "Comunità", "nodeEditor": "Editor dei nodi", "statusProcessing": "Elaborazione in corso", @@ -81,7 +81,7 @@ "error": "Errore", "installed": "Installato", "template": "Schema", - "outputs": "Uscite", + "outputs": "Risultati", "data": "Dati", "somethingWentWrong": "Qualcosa è andato storto", "copyError": "$t(gallery.copy) Errore", @@ -93,7 +93,7 @@ "created": "Creato", "prevPage": "Pagina precedente", "delete": "Elimina", - "orderBy": "Ordinato per", + "orderBy": "Ordina per", "nextPage": "Pagina successiva", "saveAs": "Salva come", "unsaved": "Non salvato", @@ -109,7 +109,12 @@ "green": "Verde", "blue": "Blu", "alpha": "Alfa", - "copy": "Copia" + "copy": "Copia", + "on": "Attivato", + "checkpoint": "Checkpoint", + "safetensors": "Safetensors", + "ai": "ia", + "file": "File" }, "gallery": { "generations": "Generazioni", @@ -934,7 +939,7 @@ "executionStateCompleted": "Completato", "boardFieldDescription": "Una bacheca della galleria", "addNodeToolTip": "Aggiungi nodo (Shift+A, Space)", - "sDXLRefinerModelField": "Modello Refiner", + "sDXLRefinerModelField": "Modello Affinatore", "problemReadingMetadata": "Problema durante la lettura dei metadati dall'immagine", "colorCodeEdgesHelp": "Bordi con codice colore in base ai campi collegati", "animatedEdges": "Bordi animati", @@ -1138,7 +1143,11 @@ "unsupportedAnyOfLength": "unione di troppi elementi ({{count}})", "clearWorkflowDesc": "Cancellare questo flusso di lavoro e avviarne uno nuovo?", "clearWorkflow": "Cancella il flusso di lavoro", - "clearWorkflowDesc2": "Il tuo flusso di lavoro attuale presenta modifiche non salvate." + "clearWorkflowDesc2": "Il tuo flusso di lavoro attuale presenta modifiche non salvate.", + "viewMode": "Utilizzare nella vista lineare", + "reorderLinearView": "Riordina la vista lineare", + "editMode": "Modifica nell'editor del flusso di lavoro", + "resetToDefaultValue": "Ripristina il valore predefinito" }, "boards": { "autoAddBoard": "Aggiungi automaticamente bacheca", @@ -1241,7 +1250,16 @@ "large": "Grande", "small": "Piccolo", "depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything", - "modelSize": "Dimensioni del modello" + "modelSize": "Dimensioni del modello", + "dwOpenposeDescription": "Stima della posa umana utilizzando DW Openpose", + "face": "Viso", + "body": "Corpo", + "hands": "Mani", + "lineartAnime": "Linea Anime", + "base": "Base", + "lineart": "Linea", + "controlnet": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.controlNet))", + "mediapipeFace": "Mediapipe Volto" }, "queue": { "queueFront": "Aggiungi all'inizio della coda", @@ -1321,7 +1339,7 @@ "noModelsAvailable": "Nessun modello disponibile", "selectModel": "Seleziona un modello", "selectLoRA": "Seleziona un LoRA", - "noRefinerModelsInstalled": "Nessun modello SDXL Refiner installato", + "noRefinerModelsInstalled": "Nessun modello affinatore SDXL installato", "noLoRAsInstalled": "Nessun LoRA installato", "esrganModel": "Modello ESRGAN", "addLora": "Aggiungi LoRA", @@ -1371,7 +1389,8 @@ "popovers": { "paramScheduler": { "paragraphs": [ - "Il campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello." + "Il campionatore utilizzato durante il processo di generazione.", + "Ciascun campionatore definisce come aggiungere in modo iterativo il rumore a un'immagine o come aggiornare un campione in base all'output di un modello." ], "heading": "Campionatore" }, @@ -1384,8 +1403,8 @@ "compositingCoherenceSteps": { "heading": "Passi", "paragraphs": [ - "Numero di passi di riduzione del rumore utilizzati nel Passaggio di Coerenza.", - "Uguale al parametro principale Passi." + "Numero di passi utilizzati nel Passaggio di Coerenza.", + "Simile ai passi di generazione." ] }, "compositingBlur": { @@ -1397,14 +1416,13 @@ "compositingCoherenceMode": { "heading": "Modalità", "paragraphs": [ - "La modalità del Passaggio di Coerenza." + "Metodo utilizzato per creare un'immagine coerente con l'area mascherata appena generata." ] }, "clipSkip": { "paragraphs": [ "Scegli quanti livelli del modello CLIP saltare.", - "Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip.", - "Un valore più alto in genere produce un'immagine meno dettagliata." + "Alcuni modelli funzionano meglio con determinate impostazioni di CLIP Skip." ] }, "compositingCoherencePass": { @@ -1416,8 +1434,8 @@ "compositingStrength": { "heading": "Forza", "paragraphs": [ - "Intensità di riduzione del rumore per il passaggio di coerenza.", - "Uguale al parametro intensità di riduzione del rumore da immagine a immagine." + "Quantità di rumore aggiunta per il Passaggio di Coerenza.", + "Simile alla forza di riduzione del rumore." ] }, "paramNegativeConditioning": { @@ -1443,8 +1461,8 @@ "controlNetBeginEnd": { "heading": "Percentuale passi Inizio / Fine", "paragraphs": [ - "A quali passi del processo di rimozione del rumore verrà applicato ControlNet.", - "I ControlNet applicati all'inizio del processo guidano la composizione, mentre i ControlNet applicati alla fine guidano i dettagli." + "La parte del processo di rimozione del rumore in cui verrà applicato l'adattatore di controllo.", + "In genere, gli adattatori di controllo applicati all'inizio del processo guidano la composizione, mentre quelli applicati alla fine guidano i dettagli." ] }, "noiseUseCPU": { @@ -1457,7 +1475,8 @@ }, "scaleBeforeProcessing": { "paragraphs": [ - "Ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine." + "\"Auto\" ridimensiona l'area selezionata alla dimensione più adatta al modello prima del processo di generazione dell'immagine.", + "\"Manuale\" consente di scegliere la larghezza e l'altezza a cui verrà ridimensionata l'area selezionata prima del processo di generazione dell'immagine." ], "heading": "Scala prima dell'elaborazione" }, @@ -1492,20 +1511,21 @@ "paramVAEPrecision": { "heading": "Precisione VAE", "paragraphs": [ - "La precisione utilizzata durante la codifica e decodifica VAE. FP16/mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine." + "La precisione utilizzata durante la codifica e decodifica VAE.", + "Fp16/Mezza precisione è più efficiente, a scapito di minori variazioni dell'immagine." ] }, "paramSeed": { "paragraphs": [ "Controlla il rumore iniziale utilizzato per la generazione.", - "Disabilita seme \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione." + "Disabilita l'opzione \"Casuale\" per produrre risultati identici con le stesse impostazioni di generazione." ], "heading": "Seme" }, "controlNetResizeMode": { "heading": "Modalità ridimensionamento", "paragraphs": [ - "Come l'immagine ControlNet verrà adattata alle dimensioni di output dell'immagine." + "Metodo per adattare le dimensioni dell'immagine in ingresso dell'adattatore di controllo alle dimensioni della generazione di output." ] }, "dynamicPromptsSeedBehaviour": { @@ -1520,8 +1540,7 @@ "paramModel": { "heading": "Modello", "paragraphs": [ - "Modello utilizzato per i passaggi di riduzione del rumore.", - "Diversi modelli sono generalmente addestrati per specializzarsi nella produzione di particolari risultati e contenuti estetici." + "Modello utilizzato per la generazione. Diversi modelli vengono addestrati per specializzarsi nella produzione di risultati e contenuti estetici diversi." ] }, "paramDenoisingStrength": { @@ -1539,25 +1558,26 @@ }, "infillMethod": { "paragraphs": [ - "Metodo per riempire l'area selezionata." + "Metodo di riempimento durante il processo di Outpainting o Inpainting." ], "heading": "Metodo di riempimento" }, "controlNetWeight": { "heading": "Peso", "paragraphs": [ - "Quanto forte sarà l'impatto di ControlNet sull'immagine generata." + "Peso dell'adattatore di controllo. Un peso maggiore porterà a impatti maggiori sull'immagine finale." ] }, "paramCFGScale": { "heading": "Scala CFG", "paragraphs": [ - "Controlla quanto il tuo prompt influenza il processo di generazione." + "Controlla quanto il prompt influenza il processo di generazione.", + "Valori elevati della scala CFG possono provocare una saturazione eccessiva e distorsioni nei risultati della generazione. " ] }, "controlNetControlMode": { "paragraphs": [ - "Attribuisce più peso al prompt o a ControlNet." + "Attribuisce più peso al prompt oppure a ControlNet." ], "heading": "Modalità di controllo" }, @@ -1569,9 +1589,9 @@ ] }, "lora": { - "heading": "Peso LoRA", + "heading": "LoRA", "paragraphs": [ - "Un peso LoRA più elevato porterà a impatti maggiori sull'immagine finale." + "Modelli leggeri utilizzati insieme ai modelli base." ] }, "controlNet": { @@ -1583,7 +1603,123 @@ "paramCFGRescaleMultiplier": { "heading": "Moltiplicatore di riscala CFG", "paragraphs": [ - "Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr). Valore suggerito 0.7." + "Moltiplicatore di riscala per la guida CFG, utilizzato per modelli addestrati utilizzando SNR a terminale zero (ztsnr).", + "Valore suggerito di 0.7 per questi modelli." + ] + }, + "controlNetProcessor": { + "heading": "Processore", + "paragraphs": [ + "Metodo di elaborazione dell'immagine di input per guidare il processo di generazione. Processori diversi forniranno effetti o stili diversi nelle immagini generate." + ] + }, + "imageFit": { + "heading": "Adatta l'immagine iniziale alle dimensioni di output", + "paragraphs": [ + "Ridimensiona l'immagine iniziale in base alla larghezza e all'altezza dell'immagine di output. Si consiglia di abilitarlo." + ] + }, + "loraWeight": { + "heading": "Peso", + "paragraphs": [ + "Peso del LoRA. Un peso maggiore comporterà un impatto maggiore sull'immagine finale." + ] + }, + "paramAspect": { + "heading": "Aspetto", + "paragraphs": [ + "Proporzioni dell'immagine generata. La modifica del rapporto aggiornerà di conseguenza la larghezza e l'altezza.", + "\"Ottimizza\" imposterà la larghezza e l'altezza alle dimensioni ottimali per il modello scelto." + ] + }, + "paramHeight": { + "heading": "Altezza", + "paragraphs": [ + "Altezza dell'immagine generata. Deve essere un multiplo di 8." + ] + }, + "paramHrf": { + "heading": "Abilita correzione alta risoluzione", + "paragraphs": [ + "Genera immagini di alta qualità con una risoluzione maggiore di quella ottimale per il modello. Generalmente utilizzato per impedire la duplicazione nell'immagine generata." + ] + }, + "paramUpscaleMethod": { + "heading": "Metodo di ampliamento", + "paragraphs": [ + "Metodo utilizzato per eseguire l'ampliamento dell'immagine per la correzione ad alta risoluzione." + ] + }, + "patchmatchDownScaleSize": { + "heading": "Ridimensiona", + "paragraphs": [ + "Quanto ridimensionamento avviene prima del riempimento.", + "Un ridimensionamento più elevato migliorerà le prestazioni e ridurrà la qualità." + ] + }, + "paramWidth": { + "paragraphs": [ + "Larghezza dell'immagine generata. Deve essere un multiplo di 8." + ], + "heading": "Larghezza" + }, + "refinerModel": { + "heading": "Modello Affinatore", + "paragraphs": [ + "Modello utilizzato durante la parte di affinamento del processo di generazione.", + "Simile al modello di generazione." + ] + }, + "refinerNegativeAestheticScore": { + "paragraphs": [ + "Valuta le generazioni in modo che siano più simili alle immagini con un punteggio estetico basso, in base ai dati di addestramento." + ], + "heading": "Punteggio estetico negativo" + }, + "refinerScheduler": { + "paragraphs": [ + "Campionatore utilizzato durante la parte di affinamento del processo di generazione.", + "Simile al campionatore di generazione." + ], + "heading": "Campionatore" + }, + "refinerStart": { + "heading": "Inizio affinamento", + "paragraphs": [ + "A che punto nel processo di generazione inizierà ad essere utilizzato l'affinatore.", + "0 significa che l'affinatore verrà utilizzato per l'intero processo di generazione, 0.8 significa che l'affinatore verrà utilizzato per l'ultimo 20% del processo di generazione." + ] + }, + "refinerSteps": { + "heading": "Passi", + "paragraphs": [ + "Numero di passi che verranno eseguiti durante la parte di affinamento del processo di generazione.", + "Simile ai passi di generazione." + ] + }, + "refinerCfgScale": { + "heading": "Scala CFG", + "paragraphs": [ + "Controlla quanto il prompt influenza il processo di generazione.", + "Simile alla scala CFG di generazione." + ] + }, + "seamlessTilingXAxis": { + "heading": "Asse X di piastrellatura senza cuciture", + "paragraphs": [ + "Affianca senza soluzione di continuità un'immagine lungo l'asse orizzontale." + ] + }, + "seamlessTilingYAxis": { + "heading": "Asse Y di piastrellatura senza cuciture", + "paragraphs": [ + "Affianca senza soluzione di continuità un'immagine lungo l'asse verticale." + ] + }, + "refinerPositiveAestheticScore": { + "heading": "Punteggio estetico positivo", + "paragraphs": [ + "Valuta le generazioni in modo che siano più simili alle immagini con un punteggio estetico elevato, in base ai dati di addestramento." ] } }, @@ -1632,7 +1768,8 @@ "steps": "Passi", "scheduler": "Campionatore", "recallParameters": "Richiama i parametri", - "noRecallParameters": "Nessun parametro da richiamare trovato" + "noRecallParameters": "Nessun parametro da richiamare trovato", + "cfgRescaleMultiplier": "$t(parameters.cfgRescaleMultiplier)" }, "hrf": { "enableHrf": "Abilita Correzione Alta Risoluzione", diff --git a/invokeai/frontend/web/public/locales/nl.json b/invokeai/frontend/web/public/locales/nl.json index aaf836604f..c23030bf54 100644 --- a/invokeai/frontend/web/public/locales/nl.json +++ b/invokeai/frontend/web/public/locales/nl.json @@ -1217,16 +1217,14 @@ "clipSkip": { "paragraphs": [ "Kies hoeveel CLIP-modellagen je wilt overslaan.", - "Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen.", - "Een hogere waarde geeft meestal een minder gedetailleerde afbeelding." + "Bepaalde modellen werken beter met bepaalde Overslaan CLIP-instellingen." ], "heading": "Overslaan CLIP" }, "paramModel": { "heading": "Model", "paragraphs": [ - "Model gebruikt voor de ontruisingsstappen.", - "Verschillende modellen zijn meestal getraind om zich te specialiseren in het maken van bepaalde esthetische resultaten en materiaal." + "Model gebruikt voor de ontruisingsstappen." ] }, "compositingCoherencePass": { diff --git a/invokeai/frontend/web/public/locales/ru.json b/invokeai/frontend/web/public/locales/ru.json index 18bfad7f02..8468554bab 100644 --- a/invokeai/frontend/web/public/locales/ru.json +++ b/invokeai/frontend/web/public/locales/ru.json @@ -108,7 +108,16 @@ "preferencesLabel": "Предпочтения", "or": "или", "advancedOptions": "Расширенные настройки", - "free": "Свободно" + "free": "Свободно", + "aboutHeading": "Владей своей творческой силой", + "red": "Красный", + "green": "Зеленый", + "blue": "Синий", + "alpha": "Альфа", + "toResolve": "Чтоб решить", + "copy": "Копировать", + "localSystem": "Локальная система", + "aboutDesc": "Используя Invoke для работы? Проверьте это:" }, "gallery": { "generations": "Генерации", @@ -152,17 +161,17 @@ }, "hotkeys": { "keyboardShortcuts": "Горячие клавиши", - "appHotkeys": "Горячие клавиши приложения", - "generalHotkeys": "Общие горячие клавиши", - "galleryHotkeys": "Горячие клавиши галереи", - "unifiedCanvasHotkeys": "Горячие клавиши Единого холста", + "appHotkeys": "Приложение", + "generalHotkeys": "Общее", + "galleryHotkeys": "Галлерея", + "unifiedCanvasHotkeys": "Единый холст", "invoke": { "title": "Invoke", "desc": "Сгенерировать изображение" }, "cancel": { "title": "Отменить", - "desc": "Отменить генерацию изображения" + "desc": "Отменить текущий элемент" }, "focusPrompt": { "title": "Переключиться на ввод запроса", @@ -352,7 +361,7 @@ "desc": "Открывает меню добавления узла", "title": "Добавление узлов" }, - "nodesHotkeys": "Горячие клавиши узлов", + "nodesHotkeys": "Узлы", "cancelAndClear": { "desc": "Отмена текущего элемента очереди и очистка всех ожидающих элементов", "title": "Отменить и очистить" @@ -367,7 +376,11 @@ "desc": "Открытие и закрытие панели опций и галереи", "title": "Переключить опции и галерею" }, - "clearSearch": "Очистить поиск" + "clearSearch": "Очистить поиск", + "remixImage": { + "desc": "Используйте все параметры, кроме сида из текущего изображения", + "title": "Ремикс изображения" + } }, "modelManager": { "modelManager": "Менеджер моделей", @@ -512,7 +525,8 @@ "modelType": "Тип модели", "customConfigFileLocation": "Расположение пользовательского файла конфигурации", "vaePrecision": "Точность VAE", - "noModelSelected": "Модель не выбрана" + "noModelSelected": "Модель не выбрана", + "configFile": "Файл конфигурации" }, "parameters": { "images": "Изображения", @@ -583,8 +597,8 @@ "copyImage": "Скопировать изображение", "showPreview": "Показать предпросмотр", "noiseSettings": "Шум", - "seamlessXAxis": "Горизонтальная", - "seamlessYAxis": "Вертикальная", + "seamlessXAxis": "Бесшовность по оси X", + "seamlessYAxis": "Бесшовность по оси Y", "scheduler": "Планировщик", "boundingBoxWidth": "Ширина ограничивающей рамки", "boundingBoxHeight": "Высота ограничивающей рамки", @@ -612,7 +626,7 @@ "noControlImageForControlAdapter": "Адаптер контроля #{{number}} не имеет изображения", "noModelForControlAdapter": "Не выбрана модель адаптера контроля #{{number}}.", "unableToInvoke": "Невозможно вызвать", - "incompatibleBaseModelForControlAdapter": "Модель контрольного адаптера №{{number}} недействительна для основной модели.", + "incompatibleBaseModelForControlAdapter": "Адаптер контроля №{{number}} несовместим с основной моделью.", "systemDisconnected": "Система отключена", "missingNodeTemplate": "Отсутствует шаблон узла", "readyToInvoke": "Готово к вызову", @@ -653,7 +667,10 @@ "setToOptimalSize": "Установить оптимальный для модели размер", "setToOptimalSizeTooSmall": "$t(parameters.setToOptimalSize) (может быть слишком маленьким)", "setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (может быть слишком большим)", - "lockAspectRatio": "Заблокировать соотношение" + "lockAspectRatio": "Заблокировать соотношение", + "boxBlur": "Размытие прямоугольника", + "gaussianBlur": "Размытие по Гауссу", + "remixImage": "Ремикс изображения" }, "settings": { "models": "Модели", @@ -787,7 +804,10 @@ "canvasSavedGallery": "Холст сохранен в галерею", "imageUploadFailed": "Не удалось загрузить изображение", "modelAdded": "Добавлена модель: {{modelName}}", - "problemImportingMask": "Проблема с импортом маски" + "problemImportingMask": "Проблема с импортом маски", + "problemDownloadingImage": "Не удается скачать изображение", + "uploadInitialImage": "Загрузить начальное изображение", + "resetInitialImage": "Сбросить начальное изображение" }, "tooltip": { "feature": { @@ -892,7 +912,8 @@ "mode": "Режим", "loadMore": "Загрузить больше", "resetUI": "$t(accessibility.reset) интерфейс", - "createIssue": "Сообщить о проблеме" + "createIssue": "Сообщить о проблеме", + "about": "Об этом" }, "ui": { "showProgressImages": "Показывать промежуточный итог", @@ -1117,7 +1138,18 @@ "unableToParseEdge": "Невозможно разобрать край", "unknownInput": "Неизвестный вход: {{name}}", "oNNXModelFieldDescription": "Поле модели ONNX.", - "imageCollection": "Коллекция изображений" + "imageCollection": "Коллекция изображений", + "newWorkflow": "Новый рабочий процесс", + "newWorkflowDesc": "Создать новый рабочий процесс?", + "clearWorkflow": "Очистить рабочий процесс", + "newWorkflowDesc2": "Текущий рабочий процесс имеет несохраненные изменения.", + "latentsCollection": "Коллекция латентов", + "clearWorkflowDesc": "Очистить этот рабочий процесс и создать новый?", + "clearWorkflowDesc2": "Текущий рабочий процесс имеет несохраненные измерения.", + "reorderLinearView": "Изменить порядок линейного просмотра", + "viewMode": "Использовать в линейном представлении", + "editMode": "Открыть в редакторе узлов", + "resetToDefaultValue": "Сбросить к стандартному значкнию" }, "controlnet": { "amult": "a_mult", @@ -1198,7 +1230,18 @@ "enableIPAdapter": "Включить IP Adapter", "maxFaces": "Макс Лица", "mlsdDescription": "Минималистичный детектор отрезков линии", - "resizeSimple": "Изменить размер (простой)" + "resizeSimple": "Изменить размер (простой)", + "megaControl": "Mega контроль", + "base": "Базовый", + "depthAnything": "Глубина всего", + "depthAnythingDescription": "Создание карты глубины с использованием метода Depth Anything", + "face": "Лицо", + "dwOpenposeDescription": "Оценка позы человека с помощью DW Openpose", + "large": "Большой", + "modelSize": "Размер модели", + "small": "Маленький", + "body": "Тело", + "hands": "Руки" }, "boards": { "autoAddBoard": "Авто добавление Доски", @@ -1281,7 +1324,7 @@ "compositingCoherenceSteps": { "heading": "Шаги", "paragraphs": [ - null, + "Количество шагов снижения шума, используемых при прохождении когерентности.", "То же, что и основной параметр «Шаги»." ] }, @@ -1319,7 +1362,10 @@ ] }, "compositingCoherenceMode": { - "heading": "Режим" + "heading": "Режим", + "paragraphs": [ + "Режим прохождения когерентности." + ] }, "paramSeed": { "paragraphs": [ @@ -1353,16 +1399,14 @@ "clipSkip": { "paragraphs": [ "Выберите, сколько слоев модели CLIP нужно пропустить.", - "Некоторые модели работают лучше с определенными настройками пропуска CLIP.", - "Более высокое значение обычно приводит к менее детализированному изображению." + "Некоторые модели работают лучше с определенными настройками пропуска CLIP." ], "heading": "CLIP пропуск" }, "paramModel": { "heading": "Модель", "paragraphs": [ - "Модель, используемая для шагов шумоподавления.", - "Различные модели обычно обучаются, чтобы специализироваться на достижении определенных эстетических результатов и содержания." + "Модель, используемая для шагов шумоподавления." ] }, "compositingCoherencePass": { @@ -1601,7 +1645,7 @@ "openWorkflow": "Открытый рабочий процесс", "clearWorkflowSearchFilter": "Очистить фильтр поиска рабочих процессов", "workflowLibrary": "Библиотека", - "downloadWorkflow": "Скачать рабочий процесс", + "downloadWorkflow": "Сохранить в файл", "noRecentWorkflows": "Нет недавних рабочих процессов", "workflowSaved": "Рабочий процесс сохранен", "workflowIsOpen": "Рабочий процесс открыт", @@ -1614,9 +1658,12 @@ "deleteWorkflow": "Удалить рабочий процесс", "workflows": "Рабочие процессы", "noDescription": "Без описания", - "uploadWorkflow": "Загрузить рабочий процесс", + "uploadWorkflow": "Загрузить из файла", "userWorkflows": "Мои рабочие процессы", - "newWorkflowCreated": "Создан новый рабочий процесс" + "newWorkflowCreated": "Создан новый рабочий процесс", + "saveWorkflowToProject": "Сохранить рабочий процесс в проект", + "workflowCleared": "Рабочий процесс очищен", + "noWorkflows": "Нет рабочих процессов" }, "embedding": { "noEmbeddingsLoaded": "встраивания не загружены", diff --git a/invokeai/frontend/web/public/locales/zh_CN.json b/invokeai/frontend/web/public/locales/zh_CN.json index 65ebc7f01a..3e4319fef8 100644 --- a/invokeai/frontend/web/public/locales/zh_CN.json +++ b/invokeai/frontend/web/public/locales/zh_CN.json @@ -1444,16 +1444,14 @@ "clipSkip": { "paragraphs": [ "选择要跳过 CLIP 模型多少层。", - "部分模型跳过特定数值的层时效果会更好。", - "较高的数值通常会导致图像细节更少。" + "部分模型跳过特定数值的层时效果会更好。" ], "heading": "CLIP 跳过层" }, "paramModel": { "heading": "模型", "paragraphs": [ - "用于去噪过程的模型。", - "不同的模型一般会通过接受训练来专门产生特定的美学内容和结果。" + "用于去噪过程的模型。" ] }, "paramIterations": { diff --git a/invokeai/frontend/web/scripts/colors.js b/invokeai/frontend/web/scripts/colors.js deleted file mode 100644 index 3fc8f8d751..0000000000 --- a/invokeai/frontend/web/scripts/colors.js +++ /dev/null @@ -1,34 +0,0 @@ -export const COLORS = { - reset: '\x1b[0m', - bright: '\x1b[1m', - dim: '\x1b[2m', - underscore: '\x1b[4m', - blink: '\x1b[5m', - reverse: '\x1b[7m', - hidden: '\x1b[8m', - - fg: { - black: '\x1b[30m', - red: '\x1b[31m', - green: '\x1b[32m', - yellow: '\x1b[33m', - blue: '\x1b[34m', - magenta: '\x1b[35m', - cyan: '\x1b[36m', - white: '\x1b[37m', - gray: '\x1b[90m', - crimson: '\x1b[38m', - }, - bg: { - black: '\x1b[40m', - red: '\x1b[41m', - green: '\x1b[42m', - yellow: '\x1b[43m', - blue: '\x1b[44m', - magenta: '\x1b[45m', - cyan: '\x1b[46m', - white: '\x1b[47m', - gray: '\x1b[100m', - crimson: '\x1b[48m', - }, -}; diff --git a/invokeai/frontend/web/src/app/hooks/useSocketIO.ts b/invokeai/frontend/web/src/app/hooks/useSocketIO.ts index 058a607261..e1c4cebdb9 100644 --- a/invokeai/frontend/web/src/app/hooks/useSocketIO.ts +++ b/invokeai/frontend/web/src/app/hooks/useSocketIO.ts @@ -19,7 +19,7 @@ declare global { } export const $socketOptions = map>({}); -export const $isSocketInitialized = atom(false); +const $isSocketInitialized = atom(false); /** * Initializes the socket.io connection and sets up event listeners. diff --git a/invokeai/frontend/web/src/app/logging/logger.ts b/invokeai/frontend/web/src/app/logging/logger.ts index 491fc27688..d0e6340625 100644 --- a/invokeai/frontend/web/src/app/logging/logger.ts +++ b/invokeai/frontend/web/src/app/logging/logger.ts @@ -12,7 +12,6 @@ ROARR.serializeMessage = serializeMessage; ROARR.write = createLogWriter(); export const BASE_CONTEXT = {}; -export const log = Roarr.child(BASE_CONTEXT); export const $logger = atom(Roarr.child(BASE_CONTEXT)); diff --git a/invokeai/frontend/web/src/app/store/actions.ts b/invokeai/frontend/web/src/app/store/actions.ts index 0800d1a63b..85debfc607 100644 --- a/invokeai/frontend/web/src/app/store/actions.ts +++ b/invokeai/frontend/web/src/app/store/actions.ts @@ -1,10 +1,7 @@ import { createAction } from '@reduxjs/toolkit'; import type { InvokeTabName } from 'features/ui/store/tabMap'; -import type { BatchConfig } from 'services/api/types'; export const enqueueRequested = createAction<{ tabName: InvokeTabName; prepend: boolean; }>('app/enqueueRequested'); - -export const batchEnqueued = createAction('app/batchEnqueued'); diff --git a/invokeai/frontend/web/src/app/store/constants.ts b/invokeai/frontend/web/src/app/store/constants.ts index c2f3a5e10b..14a2c0b77f 100644 --- a/invokeai/frontend/web/src/app/store/constants.ts +++ b/invokeai/frontend/web/src/app/store/constants.ts @@ -1 +1,2 @@ export const STORAGE_PREFIX = '@@invokeai-'; +export const EMPTY_ARRAY = []; diff --git a/invokeai/frontend/web/src/app/store/createMemoizedSelector.ts b/invokeai/frontend/web/src/app/store/createMemoizedSelector.ts index 8e6142ce1d..8e2559927a 100644 --- a/invokeai/frontend/web/src/app/store/createMemoizedSelector.ts +++ b/invokeai/frontend/web/src/app/store/createMemoizedSelector.ts @@ -13,19 +13,9 @@ export const createMemoizedSelector = createSelectorCreator({ argsMemoize: lruMemoize, }); -/** - * A memoized selector creator that uses LRU cache default shallow equality check. - */ -export const createLruSelector = createSelectorCreator({ - memoize: lruMemoize, - argsMemoize: lruMemoize, -}); - -export const createLruDraftSafeSelector = createDraftSafeSelectorCreator({ - memoize: lruMemoize, - argsMemoize: lruMemoize, -}); - export const getSelectorsOptions: GetSelectorsOptions = { - createSelector: createLruDraftSafeSelector, + createSelector: createDraftSafeSelectorCreator({ + memoize: lruMemoize, + argsMemoize: lruMemoize, + }), }; diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts index 61fbd015f8..7196e1fcea 100644 --- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts +++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/driver.ts @@ -2,15 +2,15 @@ import { StorageError } from 'app/store/enhancers/reduxRemember/errors'; import { $projectId } from 'app/store/nanostores/projectId'; import type { UseStore } from 'idb-keyval'; import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval'; -import { action, atom } from 'nanostores'; +import { atom } from 'nanostores'; import type { Driver } from 'redux-remember'; // Create a custom idb-keyval store (just needed to customize the name) -export const $idbKeyValStore = atom(createIDBKeyValStore('invoke', 'invoke-store')); +const $idbKeyValStore = atom(createIDBKeyValStore('invoke', 'invoke-store')); -export const clearIdbKeyValStore = action($idbKeyValStore, 'clear', (store) => { - clear(store.get()); -}); +export const clearIdbKeyValStore = () => { + clear($idbKeyValStore.get()); +}; // Create redux-remember driver, wrapping idb-keyval export const idbKeyValDriver: Driver = { diff --git a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts index 0ce113fc39..9704c49cf2 100644 --- a/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts +++ b/invokeai/frontend/web/src/app/store/enhancers/reduxRemember/errors.ts @@ -3,7 +3,7 @@ import { parseify } from 'common/util/serialize'; import { PersistError, RehydrateError } from 'redux-remember'; import { serializeError } from 'serialize-error'; -export type StorageErrorArgs = { +type StorageErrorArgs = { key: string; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ // any is correct value?: any; diff --git a/invokeai/frontend/web/src/app/store/middleware/devtools/actionSanitizer.ts b/invokeai/frontend/web/src/app/store/middleware/devtools/actionSanitizer.ts index 2e2d2014b2..ed8c82d91c 100644 --- a/invokeai/frontend/web/src/app/store/middleware/devtools/actionSanitizer.ts +++ b/invokeai/frontend/web/src/app/store/middleware/devtools/actionSanitizer.ts @@ -1,6 +1,6 @@ import type { UnknownAction } from '@reduxjs/toolkit'; import { isAnyGraphBuilt } from 'features/nodes/store/actions'; -import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice'; +import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice'; import { cloneDeep } from 'lodash-es'; import { appInfoApi } from 'services/api/endpoints/appInfo'; import type { Graph } from 'services/api/types'; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts index 322c4eb1ec..c5d86a127f 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/index.ts @@ -1,80 +1,65 @@ -import type { ListenerEffect, TypedAddListener, TypedStartListening, UnknownAction } from '@reduxjs/toolkit'; -import { addListener, createListenerMiddleware } from '@reduxjs/toolkit'; +import type { TypedStartListening } from '@reduxjs/toolkit'; +import { createListenerMiddleware } from '@reduxjs/toolkit'; +import { addCommitStagingAreaImageListener } from 'app/store/middleware/listenerMiddleware/listeners/addCommitStagingAreaImageListener'; +import { addFirstListImagesListener } from 'app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts'; +import { addAnyEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/anyEnqueued'; +import { addAppConfigReceivedListener } from 'app/store/middleware/listenerMiddleware/listeners/appConfigReceived'; +import { addAppStartedListener } from 'app/store/middleware/listenerMiddleware/listeners/appStarted'; +import { addBatchEnqueuedListener } from 'app/store/middleware/listenerMiddleware/listeners/batchEnqueued'; +import { addDeleteBoardAndImagesFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted'; +import { addBoardIdSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/boardIdSelected'; +import { addBulkDownloadListeners } from 'app/store/middleware/listenerMiddleware/listeners/bulkDownload'; +import { addCanvasCopiedToClipboardListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard'; +import { addCanvasDownloadedAsImageListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage'; +import { addCanvasImageToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet'; +import { addCanvasMaskSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery'; +import { addCanvasMaskToControlNetListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet'; +import { addCanvasMergedListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasMerged'; +import { addCanvasSavedToGalleryListener } from 'app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery'; +import { addControlNetAutoProcessListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess'; +import { addControlNetImageProcessedListener } from 'app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed'; +import { addEnqueueRequestedCanvasListener } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas'; +import { addEnqueueRequestedLinear } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear'; +import { addEnqueueRequestedNodes } from 'app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes'; import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked'; +import { addGetOpenAPISchemaListener } from 'app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema'; +import { addImageAddedToBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard'; +import { addRequestedSingleImageDeletionListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDeleted'; +import { addImageDroppedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped'; +import { addImageRemovedFromBoardFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard'; +import { addImagesStarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesStarred'; +import { addImagesUnstarredListener } from 'app/store/middleware/listenerMiddleware/listeners/imagesUnstarred'; +import { addImageToDeleteSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected'; +import { addImageUploadedFulfilledListener } from 'app/store/middleware/listenerMiddleware/listeners/imageUploaded'; +import { addInitialImageSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/initialImageSelected'; +import { addModelSelectedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelSelected'; +import { addModelsLoadedListener } from 'app/store/middleware/listenerMiddleware/listeners/modelsLoaded'; +import { addDynamicPromptsListener } from 'app/store/middleware/listenerMiddleware/listeners/promptChanged'; +import { addSocketConnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected'; +import { addSocketDisconnectedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected'; +import { addGeneratorProgressEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress'; +import { addGraphExecutionStateCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete'; +import { addInvocationCompleteEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete'; +import { addInvocationErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError'; +import { addInvocationRetrievalErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError'; +import { addInvocationStartedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted'; +import { addModelInstallEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall'; +import { addModelLoadEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad'; +import { addSocketQueueItemStatusChangedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged'; +import { addSessionRetrievalErrorEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError'; +import { addSocketSubscribedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed'; +import { addSocketUnsubscribedEventListener } from 'app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed'; +import { addStagingAreaImageSavedListener } from 'app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved'; +import { addUpdateAllNodesRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested'; +import { addUpscaleRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/upscaleRequested'; +import { addWorkflowLoadRequestedListener } from 'app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested'; import type { AppDispatch, RootState } from 'app/store/store'; -import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener'; -import { addFirstListImagesListener } from './listeners/addFirstListImagesListener.ts'; -import { addAnyEnqueuedListener } from './listeners/anyEnqueued'; -import { addAppConfigReceivedListener } from './listeners/appConfigReceived'; -import { addAppStartedListener } from './listeners/appStarted'; -import { addBatchEnqueuedListener } from './listeners/batchEnqueued'; -import { addDeleteBoardAndImagesFulfilledListener } from './listeners/boardAndImagesDeleted'; -import { addBoardIdSelectedListener } from './listeners/boardIdSelected'; -import { addCanvasCopiedToClipboardListener } from './listeners/canvasCopiedToClipboard'; -import { addCanvasDownloadedAsImageListener } from './listeners/canvasDownloadedAsImage'; -import { addCanvasImageToControlNetListener } from './listeners/canvasImageToControlNet'; -import { addCanvasMaskSavedToGalleryListener } from './listeners/canvasMaskSavedToGallery'; -import { addCanvasMaskToControlNetListener } from './listeners/canvasMaskToControlNet'; -import { addCanvasMergedListener } from './listeners/canvasMerged'; -import { addCanvasSavedToGalleryListener } from './listeners/canvasSavedToGallery'; -import { addControlNetAutoProcessListener } from './listeners/controlNetAutoProcess'; -import { addControlNetImageProcessedListener } from './listeners/controlNetImageProcessed'; -import { addEnqueueRequestedCanvasListener } from './listeners/enqueueRequestedCanvas'; -import { addEnqueueRequestedLinear } from './listeners/enqueueRequestedLinear'; -import { addEnqueueRequestedNodes } from './listeners/enqueueRequestedNodes'; -import { addGetOpenAPISchemaListener } from './listeners/getOpenAPISchema'; -import { - addImageAddedToBoardFulfilledListener, - addImageAddedToBoardRejectedListener, -} from './listeners/imageAddedToBoard'; -import { - addImageDeletedFulfilledListener, - addImageDeletedPendingListener, - addImageDeletedRejectedListener, - addRequestedMultipleImageDeletionListener, - addRequestedSingleImageDeletionListener, -} from './listeners/imageDeleted'; -import { addImageDroppedListener } from './listeners/imageDropped'; -import { - addImageRemovedFromBoardFulfilledListener, - addImageRemovedFromBoardRejectedListener, -} from './listeners/imageRemovedFromBoard'; -import { addImagesStarredListener } from './listeners/imagesStarred'; -import { addImagesUnstarredListener } from './listeners/imagesUnstarred'; -import { addImageToDeleteSelectedListener } from './listeners/imageToDeleteSelected'; -import { addImageUploadedFulfilledListener, addImageUploadedRejectedListener } from './listeners/imageUploaded'; -import { addInitialImageSelectedListener } from './listeners/initialImageSelected'; -import { addModelSelectedListener } from './listeners/modelSelected'; -import { addModelsLoadedListener } from './listeners/modelsLoaded'; -import { addDynamicPromptsListener } from './listeners/promptChanged'; -import { addSocketConnectedEventListener as addSocketConnectedListener } from './listeners/socketio/socketConnected'; -import { addSocketDisconnectedEventListener as addSocketDisconnectedListener } from './listeners/socketio/socketDisconnected'; -import { addGeneratorProgressEventListener as addGeneratorProgressListener } from './listeners/socketio/socketGeneratorProgress'; -import { addGraphExecutionStateCompleteEventListener as addGraphExecutionStateCompleteListener } from './listeners/socketio/socketGraphExecutionStateComplete'; -import { addInvocationCompleteEventListener as addInvocationCompleteListener } from './listeners/socketio/socketInvocationComplete'; -import { addInvocationErrorEventListener as addInvocationErrorListener } from './listeners/socketio/socketInvocationError'; -import { addInvocationRetrievalErrorEventListener } from './listeners/socketio/socketInvocationRetrievalError'; -import { addInvocationStartedEventListener as addInvocationStartedListener } from './listeners/socketio/socketInvocationStarted'; -import { addModelLoadEventListener } from './listeners/socketio/socketModelLoad'; -import { addSocketQueueItemStatusChangedEventListener } from './listeners/socketio/socketQueueItemStatusChanged'; -import { addSessionRetrievalErrorEventListener } from './listeners/socketio/socketSessionRetrievalError'; -import { addSocketSubscribedEventListener as addSocketSubscribedListener } from './listeners/socketio/socketSubscribed'; -import { addSocketUnsubscribedEventListener as addSocketUnsubscribedListener } from './listeners/socketio/socketUnsubscribed'; -import { addStagingAreaImageSavedListener } from './listeners/stagingAreaImageSaved'; -import { addUpdateAllNodesRequestedListener } from './listeners/updateAllNodesRequested'; -import { addUpscaleRequestedListener } from './listeners/upscaleRequested'; -import { addWorkflowLoadRequestedListener } from './listeners/workflowLoadRequested'; - export const listenerMiddleware = createListenerMiddleware(); export type AppStartListening = TypedStartListening; -export const startAppListening = listenerMiddleware.startListening as AppStartListening; - -export const addAppListener = addListener as TypedAddListener; - -export type AppListenerEffect = ListenerEffect; +const startAppListening = listenerMiddleware.startListening as AppStartListening; /** * The RTK listener middleware is a lightweight alternative sagas/observables. @@ -83,93 +68,88 @@ export type AppListenerEffect = ListenerEffect { +export const addCommitStagingAreaImageListener = (startAppListening: AppStartListening) => { startAppListening({ matcher, effect: async (_, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts index 3c9b245b11..3f831de5c6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/addFirstListImagesListener.ts.ts @@ -1,15 +1,11 @@ -import { createAction } from '@reduxjs/toolkit'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { imageSelected } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageCache } from 'services/api/types'; import { getListImagesUrl, imagesSelectors } from 'services/api/util'; -import { startAppListening } from '..'; - -export const appStarted = createAction('app/appStarted'); - -export const addFirstListImagesListener = () => { +export const addFirstListImagesListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.listImages.matchFulfilled, effect: async (action, { dispatch, unsubscribe, cancelActiveListeners }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts index e2114769f4..373fa3dd28 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/anyEnqueued.ts @@ -1,8 +1,7 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue'; -import { startAppListening } from '..'; - -export const addAnyEnqueuedListener = () => { +export const addAnyEnqueuedListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: queueApi.endpoints.enqueueBatch.matchFulfilled, effect: async (_, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts index 3fc9cdda44..4ee73af642 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appConfigReceived.ts @@ -1,10 +1,9 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { setInfillMethod } from 'features/parameters/store/generationSlice'; import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice'; import { appInfoApi } from 'services/api/endpoints/appInfo'; -import { startAppListening } from '..'; - -export const addAppConfigReceivedListener = () => { +export const addAppConfigReceivedListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled, effect: async (action, { getState, dispatch }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts index 9cbd1c4aca..729067ee82 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/appStarted.ts @@ -1,10 +1,9 @@ import { createAction } from '@reduxjs/toolkit'; - -import { startAppListening } from '..'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; export const appStarted = createAction('app/appStarted'); -export const addAppStartedListener = () => { +export const addAppStartedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: appStarted, effect: async (action, { unsubscribe, cancelActiveListeners }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts index 6419f840ec..68eda997b7 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/batchEnqueued.ts @@ -1,19 +1,13 @@ -import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui-library'; import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; +import { toast } from 'common/util/toast'; import { zPydanticValidationError } from 'features/system/store/zodSchemas'; import { t } from 'i18next'; import { truncate, upperFirst } from 'lodash-es'; import { queueApi } from 'services/api/endpoints/queue'; -import { startAppListening } from '..'; - -const { toast } = createStandaloneToast({ - theme: theme, - defaultOptions: TOAST_OPTIONS.defaultOptions, -}); - -export const addBatchEnqueuedListener = () => { +export const addBatchEnqueuedListener = (startAppListening: AppStartListening) => { // success startAppListening({ matcher: queueApi.endpoints.enqueueBatch.matchFulfilled, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted.ts index 2e77896ad3..8e8d3f4b99 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardAndImagesDeleted.ts @@ -1,3 +1,4 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { resetCanvas } from 'features/canvas/store/canvasSlice'; import { controlAdaptersReset } from 'features/controlAdapters/store/controlAdaptersSlice'; import { getImageUsage } from 'features/deleteImageModal/store/selectors'; @@ -5,9 +6,7 @@ import { nodeEditorReset } from 'features/nodes/store/nodesSlice'; import { clearInitialImage } from 'features/parameters/store/generationSlice'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addDeleteBoardAndImagesFulfilledListener = () => { +export const addDeleteBoardAndImagesFulfilledListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.deleteBoardAndImages.matchFulfilled, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts index ebcb96d116..2c1aa6ec8b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/boardIdSelected.ts @@ -1,12 +1,11 @@ import { isAnyOf } from '@reduxjs/toolkit'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice'; import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { imagesApi } from 'services/api/endpoints/images'; import { imagesSelectors } from 'services/api/util'; -import { startAppListening } from '..'; - -export const addBoardIdSelectedListener = () => { +export const addBoardIdSelectedListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: isAnyOf(boardIdSelected, galleryViewChanged), effect: async (action, { getState, dispatch, condition, cancelActiveListeners }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx new file mode 100644 index 0000000000..38a0fd7911 --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/bulkDownload.tsx @@ -0,0 +1,114 @@ +import type { UseToastOptions } from '@invoke-ai/ui-library'; +import { ExternalLink } from '@invoke-ai/ui-library'; +import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; +import { toast } from 'common/util/toast'; +import { t } from 'i18next'; +import { imagesApi } from 'services/api/endpoints/images'; +import { + socketBulkDownloadCompleted, + socketBulkDownloadFailed, + socketBulkDownloadStarted, +} from 'services/events/actions'; + +const log = logger('images'); + +export const addBulkDownloadListeners = (startAppListening: AppStartListening) => { + startAppListening({ + matcher: imagesApi.endpoints.bulkDownloadImages.matchFulfilled, + effect: async (action) => { + log.debug(action.payload, 'Bulk download requested'); + + // If we have an item name, we are processing the bulk download locally and should use it as the toast id to + // prevent multiple toasts for the same item. + toast({ + id: action.payload.bulk_download_item_name ?? undefined, + title: t('gallery.bulkDownloadRequested'), + status: 'success', + // Show the response message if it exists, otherwise show the default message + description: action.payload.response || t('gallery.bulkDownloadRequestedDesc'), + duration: null, + isClosable: true, + }); + }, + }); + + startAppListening({ + matcher: imagesApi.endpoints.bulkDownloadImages.matchRejected, + effect: async () => { + log.debug('Bulk download request failed'); + + // There isn't any toast to update if we get this event. + toast({ + title: t('gallery.bulkDownloadRequestFailed'), + status: 'success', + isClosable: true, + }); + }, + }); + + startAppListening({ + actionCreator: socketBulkDownloadStarted, + effect: async (action) => { + // This should always happen immediately after the bulk download request, so we don't need to show a toast here. + log.debug(action.payload.data, 'Bulk download preparation started'); + }, + }); + + startAppListening({ + actionCreator: socketBulkDownloadCompleted, + effect: async (action) => { + log.debug(action.payload.data, 'Bulk download preparation completed'); + + const { bulk_download_item_name } = action.payload.data; + + // TODO(psyche): This URL may break in in some environments (e.g. Nvidia workbench) but we need to test it first + const url = `/api/v1/images/download/${bulk_download_item_name}`; + + const toastOptions: UseToastOptions = { + id: bulk_download_item_name, + title: t('gallery.bulkDownloadReady', 'Download ready'), + status: 'success', + description: ( + + ), + duration: null, + isClosable: true, + }; + + if (toast.isActive(bulk_download_item_name)) { + toast.update(bulk_download_item_name, toastOptions); + } else { + toast(toastOptions); + } + }, + }); + + startAppListening({ + actionCreator: socketBulkDownloadFailed, + effect: async (action) => { + log.debug(action.payload.data, 'Bulk download preparation failed'); + + const { bulk_download_item_name } = action.payload.data; + + const toastOptions: UseToastOptions = { + id: bulk_download_item_name, + title: t('gallery.bulkDownloadFailed'), + status: 'error', + description: action.payload.data.error, + duration: null, + isClosable: true, + }; + + if (toast.isActive(bulk_download_item_name)) { + toast.update(bulk_download_item_name, toastOptions); + } else { + toast(toastOptions); + } + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard.ts index a4ae936ada..e1f4804d56 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasCopiedToClipboard.ts @@ -1,13 +1,12 @@ import { $logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasCopiedToClipboard } from 'features/canvas/store/actions'; import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob'; import { addToast } from 'features/system/store/systemSlice'; import { copyBlobToClipboard } from 'features/system/util/copyBlobToClipboard'; import { t } from 'i18next'; -import { startAppListening } from '..'; - -export const addCanvasCopiedToClipboardListener = () => { +export const addCanvasCopiedToClipboardListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasCopiedToClipboard, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage.ts index af37afa8d5..5b8150bd20 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasDownloadedAsImage.ts @@ -1,13 +1,12 @@ import { $logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasDownloadedAsImage } from 'features/canvas/store/actions'; import { downloadBlob } from 'features/canvas/util/downloadBlob'; import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob'; import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; -import { startAppListening } from '..'; - -export const addCanvasDownloadedAsImageListener = () => { +export const addCanvasDownloadedAsImageListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasDownloadedAsImage, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts index b9b08d2b4e..775a20965d 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasImageToControlNet.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasImageToControlAdapter } from 'features/canvas/store/actions'; import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob'; import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice'; @@ -6,9 +7,7 @@ import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addCanvasImageToControlNetListener = () => { +export const addCanvasImageToControlNetListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasImageToControlAdapter, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery.ts index d8a3c3827d..af0c3878fc 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskSavedToGallery.ts @@ -1,13 +1,12 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasMaskSavedToGallery } from 'features/canvas/store/actions'; import { getCanvasData } from 'features/canvas/util/getCanvasData'; import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addCanvasMaskSavedToGalleryListener = () => { +export const addCanvasMaskSavedToGalleryListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasMaskSavedToGallery, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts index cf1658cb41..4024488566 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMaskToControlNet.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasMaskToControlAdapter } from 'features/canvas/store/actions'; import { getCanvasData } from 'features/canvas/util/getCanvasData'; import { controlAdapterImageChanged } from 'features/controlAdapters/store/controlAdaptersSlice'; @@ -6,9 +7,7 @@ import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addCanvasMaskToControlNetListener = () => { +export const addCanvasMaskToControlNetListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasMaskToControlAdapter, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts index f29a095c1f..71b0e62b44 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasMerged.ts @@ -1,4 +1,5 @@ import { $logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasMerged } from 'features/canvas/store/actions'; import { $canvasBaseLayer } from 'features/canvas/store/canvasNanostore'; import { setMergedCanvas } from 'features/canvas/store/canvasSlice'; @@ -7,9 +8,7 @@ import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addCanvasMergedListener = () => { +export const addCanvasMergedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasMerged, effect: async (action, { dispatch }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery.ts index f09cbe12d1..e3ba988886 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/canvasSavedToGallery.ts @@ -1,13 +1,12 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { canvasSavedToGallery } from 'features/canvas/store/actions'; import { getBaseLayerBlob } from 'features/canvas/util/getBaseLayerBlob'; import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addCanvasSavedToGalleryListener = () => { +export const addCanvasSavedToGalleryListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: canvasSavedToGallery, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts index d194195665..e52df30681 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetAutoProcess.ts @@ -1,5 +1,6 @@ import type { AnyListenerPredicate } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import type { RootState } from 'app/store/store'; import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions'; import { @@ -12,8 +13,6 @@ import { } from 'features/controlAdapters/store/controlAdaptersSlice'; import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; -import { startAppListening } from '..'; - type AnyControlAdapterParamChangeAction = | ReturnType | ReturnType @@ -67,7 +66,7 @@ const DEBOUNCE_MS = 300; * * The network request is debounced. */ -export const addControlNetAutoProcessListener = () => { +export const addControlNetAutoProcessListener = (startAppListening: AppStartListening) => { startAppListening({ predicate, effect: async (action, { dispatch, cancelActiveListeners, delay }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts index fba274beb8..0055866aa7 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; import { controlAdapterImageProcessed } from 'features/controlAdapters/store/actions'; import { @@ -16,9 +17,7 @@ import { queueApi } from 'services/api/endpoints/queue'; import type { BatchConfig, ImageDTO } from 'services/api/types'; import { socketInvocationComplete } from 'services/events/actions'; -import { startAppListening } from '..'; - -export const addControlNetImageProcessedListener = () => { +export const addControlNetImageProcessedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: controlAdapterImageProcessed, effect: async (action, { dispatch, getState, take }) => { @@ -51,6 +50,7 @@ export const addControlNetImageProcessedListener = () => { image: { image_name: ca.controlImage }, }, }, + edges: [], }, runs: 1, }, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts index 7847448d3e..ed1f4fdd98 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedCanvas.ts @@ -1,5 +1,6 @@ import { logger } from 'app/logging/logger'; import { enqueueRequested } from 'app/store/actions'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import openBase64ImageInTab from 'common/util/openBase64ImageInTab'; import { parseify } from 'common/util/serialize'; import { canvasBatchIdAdded, stagingAreaInitialized } from 'features/canvas/store/canvasSlice'; @@ -13,8 +14,6 @@ import { imagesApi } from 'services/api/endpoints/images'; import { queueApi } from 'services/api/endpoints/queue'; import type { ImageDTO } from 'services/api/types'; -import { startAppListening } from '..'; - /** * This listener is responsible invoking the canvas. This involves a number of steps: * @@ -28,7 +27,7 @@ import { startAppListening } from '..'; * 8. Initialize the staging area if not yet initialized * 9. Dispatch the sessionReadyToInvoke action to invoke the session */ -export const addEnqueueRequestedCanvasListener = () => { +export const addEnqueueRequestedCanvasListener = (startAppListening: AppStartListening) => { startAppListening({ predicate: (action): action is ReturnType => enqueueRequested.match(action) && action.payload.tabName === 'unifiedCanvas', diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts index e1e13fadbe..337c0f4145 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedLinear.ts @@ -1,4 +1,5 @@ import { enqueueRequested } from 'app/store/actions'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { prepareLinearUIBatch } from 'features/nodes/util/graph/buildLinearBatchConfig'; import { buildLinearImageToImageGraph } from 'features/nodes/util/graph/buildLinearImageToImageGraph'; import { buildLinearSDXLImageToImageGraph } from 'features/nodes/util/graph/buildLinearSDXLImageToImageGraph'; @@ -6,9 +7,7 @@ import { buildLinearSDXLTextToImageGraph } from 'features/nodes/util/graph/build import { buildLinearTextToImageGraph } from 'features/nodes/util/graph/buildLinearTextToImageGraph'; import { queueApi } from 'services/api/endpoints/queue'; -import { startAppListening } from '..'; - -export const addEnqueueRequestedLinear = () => { +export const addEnqueueRequestedLinear = (startAppListening: AppStartListening) => { startAppListening({ predicate: (action): action is ReturnType => enqueueRequested.match(action) && (action.payload.tabName === 'txt2img' || action.payload.tabName === 'img2img'), @@ -19,7 +18,7 @@ export const addEnqueueRequestedLinear = () => { let graph; - if (model && model.base_model === 'sdxl') { + if (model && model.base === 'sdxl') { if (action.payload.tabName === 'txt2img') { graph = buildLinearSDXLTextToImageGraph(state); } else { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts index 0ad33057fd..e33f7c964a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/enqueueRequestedNodes.ts @@ -1,12 +1,11 @@ import { enqueueRequested } from 'app/store/actions'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { buildNodesGraph } from 'features/nodes/util/graph/buildNodesGraph'; import { buildWorkflowWithValidation } from 'features/nodes/util/workflow/buildWorkflow'; import { queueApi } from 'services/api/endpoints/queue'; import type { BatchConfig } from 'services/api/types'; -import { startAppListening } from '..'; - -export const addEnqueueRequestedNodes = () => { +export const addEnqueueRequestedNodes = (startAppListening: AppStartListening) => { startAppListening({ predicate: (action): action is ReturnType => enqueueRequested.match(action) && action.payload.tabName === 'nodes', diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts index cc810a2517..67c6d076ee 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/galleryImageClicked.ts @@ -1,12 +1,11 @@ import { createAction } from '@reduxjs/toolkit'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { selectListImagesQueryArgs } from 'features/gallery/store/gallerySelectors'; import { selectionChanged } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; import { imagesSelectors } from 'services/api/util'; -import { startAppListening } from '..'; - export const galleryImageClicked = createAction<{ imageDTO: ImageDTO; shiftKey: boolean; @@ -25,7 +24,7 @@ export const galleryImageClicked = createAction<{ * is much more responsive. */ -export const addGalleryImageClickedListener = () => { +export const addGalleryImageClickedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: galleryImageClicked, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts index b2d3615909..acb2bdb698 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/getOpenAPISchema.ts @@ -1,13 +1,12 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; -import { nodeTemplatesBuilt } from 'features/nodes/store/nodeTemplatesSlice'; +import { nodeTemplatesBuilt } from 'features/nodes/store/nodesSlice'; import { parseSchema } from 'features/nodes/util/schema/parseSchema'; import { size } from 'lodash-es'; import { appInfoApi } from 'services/api/endpoints/appInfo'; -import { startAppListening } from '..'; - -export const addGetOpenAPISchemaListener = () => { +export const addGetOpenAPISchemaListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: appInfoApi.endpoints.getOpenAPISchema.matchFulfilled, effect: (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard.ts index 61da8ff669..5412e0f236 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageAddedToBoard.ts @@ -1,9 +1,8 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addImageAddedToBoardFulfilledListener = () => { +export const addImageAddedToBoardFulfilledListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.addImageToBoard.matchFulfilled, effect: (action) => { @@ -15,9 +14,7 @@ export const addImageAddedToBoardFulfilledListener = () => { log.debug({ board_id, imageDTO }, 'Image added to board'); }, }); -}; -export const addImageAddedToBoardRejectedListener = () => { startAppListening({ matcher: imagesApi.endpoints.addImageToBoard.matchRejected, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts index 1312e54891..9bbbf80263 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDeleted.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { resetCanvas } from 'features/canvas/store/canvasSlice'; import { controlAdapterImageChanged, @@ -19,9 +20,7 @@ import { api } from 'services/api'; import { imagesApi } from 'services/api/endpoints/images'; import { imagesSelectors } from 'services/api/util'; -import { startAppListening } from '..'; - -export const addRequestedSingleImageDeletionListener = () => { +export const addRequestedSingleImageDeletionListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: imageDeletionConfirmed, effect: async (action, { dispatch, getState, condition }) => { @@ -134,12 +133,7 @@ export const addRequestedSingleImageDeletionListener = () => { } }, }); -}; -/** - * Called when the user requests an image deletion - */ -export const addRequestedMultipleImageDeletionListener = () => { startAppListening({ actionCreator: imageDeletionConfirmed, effect: async (action, { dispatch, getState }) => { @@ -224,24 +218,14 @@ export const addRequestedMultipleImageDeletionListener = () => { } }, }); -}; -/** - * Called when the actual delete request is sent to the server - */ -export const addImageDeletedPendingListener = () => { startAppListening({ matcher: imagesApi.endpoints.deleteImage.matchPending, effect: () => { // }, }); -}; -/** - * Called on successful delete - */ -export const addImageDeletedFulfilledListener = () => { startAppListening({ matcher: imagesApi.endpoints.deleteImage.matchFulfilled, effect: (action) => { @@ -249,12 +233,7 @@ export const addImageDeletedFulfilledListener = () => { log.debug({ imageDTO: action.meta.arg.originalArgs }, 'Image deleted'); }, }); -}; -/** - * Called on failed delete - */ -export const addImageDeletedRejectedListener = () => { startAppListening({ matcher: imagesApi.endpoints.deleteImage.matchRejected, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts index 268aac25d9..5c1f321b64 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageDropped.ts @@ -1,5 +1,6 @@ import { createAction } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice'; import { @@ -12,14 +13,12 @@ import { fieldImageValueChanged } from 'features/nodes/store/nodesSlice'; import { initialImageChanged, selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '../'; - export const dndDropped = createAction<{ overData: TypesafeDroppableData; activeData: TypesafeDraggableData; }>('dnd/dndDropped'); -export const addImageDroppedListener = () => { +export const addImageDroppedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: dndDropped, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard.ts index 4c21a750f1..274e4c51c2 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageRemovedFromBoard.ts @@ -1,9 +1,8 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addImageRemovedFromBoardFulfilledListener = () => { +export const addImageRemovedFromBoardFulfilledListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.removeImageFromBoard.matchFulfilled, effect: (action) => { @@ -13,9 +12,7 @@ export const addImageRemovedFromBoardFulfilledListener = () => { log.debug({ imageDTO }, 'Image removed from board'); }, }); -}; -export const addImageRemovedFromBoardRejectedListener = () => { startAppListening({ matcher: imagesApi.endpoints.removeImageFromBoard.matchRejected, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected.ts index 03921264a2..d20c0c7c23 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageToDeleteSelected.ts @@ -1,10 +1,9 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { imageDeletionConfirmed } from 'features/deleteImageModal/store/actions'; import { selectImageUsage } from 'features/deleteImageModal/store/selectors'; import { imagesToDeleteSelected, isModalOpenChanged } from 'features/deleteImageModal/store/slice'; -import { startAppListening } from '..'; - -export const addImageToDeleteSelectedListener = () => { +export const addImageToDeleteSelectedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: imagesToDeleteSelected, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts index d17727fcdc..2cebf0aef8 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imageUploaded.ts @@ -1,5 +1,6 @@ import type { UseToastOptions } from '@invoke-ai/ui-library'; import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice'; import { controlAdapterImageChanged, @@ -13,9 +14,7 @@ import { omit } from 'lodash-es'; import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addImageUploadedFulfilledListener = () => { +export const addImageUploadedFulfilledListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.uploadImage.matchFulfilled, effect: (action, { dispatch, getState }) => { @@ -133,9 +132,7 @@ export const addImageUploadedFulfilledListener = () => { } }, }); -}; -export const addImageUploadedRejectedListener = () => { startAppListening({ matcher: imagesApi.endpoints.uploadImage.matchRejected, effect: (action, { dispatch }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesStarred.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesStarred.ts index 064e9876fc..74b36e3297 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesStarred.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesStarred.ts @@ -1,10 +1,9 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { selectionChanged } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; -import { startAppListening } from '..'; - -export const addImagesStarredListener = () => { +export const addImagesStarredListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.starImages.matchFulfilled, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesUnstarred.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesUnstarred.ts index 7174bd066d..ebae7885c1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesUnstarred.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/imagesUnstarred.ts @@ -1,10 +1,9 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { selectionChanged } from 'features/gallery/store/gallerySlice'; import { imagesApi } from 'services/api/endpoints/images'; import type { ImageDTO } from 'services/api/types'; -import { startAppListening } from '..'; - -export const addImagesUnstarredListener = () => { +export const addImagesUnstarredListener = (startAppListening: AppStartListening) => { startAppListening({ matcher: imagesApi.endpoints.unstarImages.matchFulfilled, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts index 93e921e911..735ce8367a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/initialImageSelected.ts @@ -1,12 +1,11 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { initialImageSelected } from 'features/parameters/store/actions'; import { initialImageChanged } from 'features/parameters/store/generationSlice'; import { addToast } from 'features/system/store/systemSlice'; import { makeToast } from 'features/system/util/makeToast'; import { t } from 'i18next'; -import { startAppListening } from '..'; - -export const addInitialImageSelectedListener = () => { +export const addInitialImageSelectedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: initialImageSelected, effect: (action, { dispatch }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts index 7638c5522a..bc049cf498 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelSelected.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { controlAdapterIsEnabledChanged, selectControlAdapterAll, @@ -12,9 +13,7 @@ import { makeToast } from 'features/system/util/makeToast'; import { t } from 'i18next'; import { forEach } from 'lodash-es'; -import { startAppListening } from '..'; - -export const addModelSelectedListener = () => { +export const addModelSelectedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: modelSelected, effect: (action, { getState, dispatch }) => { @@ -30,8 +29,8 @@ export const addModelSelectedListener = () => { const newModel = result.data; - const newBaseModel = newModel.base_model; - const didBaseModelChange = state.generation.model?.base_model !== newBaseModel; + const newBaseModel = newModel.base; + const didBaseModelChange = state.generation.model?.base !== newBaseModel; if (didBaseModelChange) { // we may need to reset some incompatible submodels @@ -39,7 +38,7 @@ export const addModelSelectedListener = () => { // handle incompatible loras forEach(state.lora.loras, (lora, id) => { - if (lora.base_model !== newBaseModel) { + if (lora.model.base !== newBaseModel) { dispatch(loraRemoved(id)); modelsCleared += 1; } @@ -47,14 +46,14 @@ export const addModelSelectedListener = () => { // handle incompatible vae const { vae } = state.generation; - if (vae && vae.base_model !== newBaseModel) { + if (vae && vae.base !== newBaseModel) { dispatch(vaeSelected(null)); modelsCleared += 1; } // handle incompatible controlnets selectControlAdapterAll(state.controlAdapters).forEach((ca) => { - if (ca.model?.base_model !== newBaseModel) { + if (ca.model?.base !== newBaseModel) { dispatch(controlAdapterIsEnabledChanged({ id: ca.id, isEnabled: false })); modelsCleared += 1; } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts index 0ffe88cd07..75c6080bf6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/modelsLoaded.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { controlAdapterModelCleared, selectAllControlNets, @@ -13,9 +14,7 @@ import { forEach, some } from 'lodash-es'; import { mainModelsAdapterSelectors, modelsApi, vaeModelsAdapterSelectors } from 'services/api/endpoints/models'; import type { TypeGuardFor } from 'services/api/types'; -import { startAppListening } from '..'; - -export const addModelsLoadedListener = () => { +export const addModelsLoadedListener = (startAppListening: AppStartListening) => { startAppListening({ predicate: (action): action is TypeGuardFor => modelsApi.endpoints.getMainModels.matchFulfilled(action) && @@ -34,14 +33,7 @@ export const addModelsLoadedListener = () => { return; } - const isCurrentModelAvailable = currentModel - ? models.some( - (m) => - m.model_name === currentModel.model_name && - m.base_model === currentModel.base_model && - m.model_type === currentModel.model_type - ) - : false; + const isCurrentModelAvailable = currentModel ? models.some((m) => m.key === currentModel.key) : false; if (isCurrentModelAvailable) { return; @@ -74,14 +66,7 @@ export const addModelsLoadedListener = () => { return; } - const isCurrentModelAvailable = currentModel - ? models.some( - (m) => - m.model_name === currentModel.model_name && - m.base_model === currentModel.base_model && - m.model_type === currentModel.model_type - ) - : false; + const isCurrentModelAvailable = currentModel ? models.some((m) => m.key === currentModel.key) : false; if (!isCurrentModelAvailable) { dispatch(refinerModelChanged(null)); @@ -103,10 +88,7 @@ export const addModelsLoadedListener = () => { return; } - const isCurrentVAEAvailable = some( - action.payload.entities, - (m) => m?.model_name === currentVae?.model_name && m?.base_model === currentVae?.base_model - ); + const isCurrentVAEAvailable = some(action.payload.entities, (m) => m?.key === currentVae?.key); if (isCurrentVAEAvailable) { return; @@ -140,10 +122,7 @@ export const addModelsLoadedListener = () => { const loras = getState().lora.loras; forEach(loras, (lora, id) => { - const isLoRAAvailable = some( - action.payload.entities, - (m) => m?.model_name === lora?.model_name && m?.base_model === lora?.base_model - ); + const isLoRAAvailable = some(action.payload.entities, (m) => m?.key === lora?.model.key); if (isLoRAAvailable) { return; @@ -161,10 +140,7 @@ export const addModelsLoadedListener = () => { log.info({ models: action.payload.entities }, `ControlNet models loaded (${action.payload.ids.length})`); selectAllControlNets(getState().controlAdapters).forEach((ca) => { - const isModelAvailable = some( - action.payload.entities, - (m) => m?.model_name === ca?.model?.model_name && m?.base_model === ca?.model?.base_model - ); + const isModelAvailable = some(action.payload.entities, (m) => m?.key === ca?.model?.key); if (isModelAvailable) { return; @@ -182,10 +158,7 @@ export const addModelsLoadedListener = () => { log.info({ models: action.payload.entities }, `T2I Adapter models loaded (${action.payload.ids.length})`); selectAllT2IAdapters(getState().controlAdapters).forEach((ca) => { - const isModelAvailable = some( - action.payload.entities, - (m) => m?.model_name === ca?.model?.model_name && m?.base_model === ca?.model?.base_model - ); + const isModelAvailable = some(action.payload.entities, (m) => m?.key === ca?.model?.key); if (isModelAvailable) { return; @@ -203,10 +176,7 @@ export const addModelsLoadedListener = () => { log.info({ models: action.payload.entities }, `IP Adapter models loaded (${action.payload.ids.length})`); selectAllIPAdapters(getState().controlAdapters).forEach((ca) => { - const isModelAvailable = some( - action.payload.entities, - (m) => m?.model_name === ca?.model?.model_name && m?.base_model === ca?.model?.base_model - ); + const isModelAvailable = some(action.payload.entities, (m) => m?.key === ca?.model?.key); if (isModelAvailable) { return; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts index bd6cd502f6..b78ddc3f69 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/promptChanged.ts @@ -1,4 +1,5 @@ import { isAnyOf } from '@reduxjs/toolkit'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { combinatorialToggled, isErrorChanged, @@ -13,11 +14,9 @@ import { setPositivePrompt } from 'features/parameters/store/generationSlice'; import { utilitiesApi } from 'services/api/endpoints/utilities'; import { socketConnected } from 'services/events/actions'; -import { startAppListening } from '..'; - const matcher = isAnyOf(setPositivePrompt, combinatorialToggled, maxPromptsChanged, maxPromptsReset, socketConnected); -export const addDynamicPromptsListener = () => { +export const addDynamicPromptsListener = (startAppListening: AppStartListening) => { startAppListening({ matcher, effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts index df0de6bbda..4c76474cdc 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketConnected.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { $baseUrl } from 'app/store/nanostores/baseUrl'; import { isEqual } from 'lodash-es'; import { atom } from 'nanostores'; @@ -6,13 +7,11 @@ import { api } from 'services/api'; import { queueApi, selectQueueStatus } from 'services/api/endpoints/queue'; import { socketConnected } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); const $isFirstConnection = atom(true); -export const addSocketConnectedEventListener = () => { +export const addSocketConnectedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketConnected, effect: async (action, { dispatch, getState, cancelActiveListeners, delay }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected.ts index 02d3b77f89..be1a7663b3 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketDisconnected.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketDisconnected } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addSocketDisconnectedEventListener = () => { +export const addSocketDisconnectedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketDisconnected, effect: () => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress.ts index 0965f41ee1..bb113a09ee 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGeneratorProgress.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketGeneratorProgress } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addGeneratorProgressEventListener = () => { +export const addGeneratorProgressEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketGeneratorProgress, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete.ts index e4f83561c8..5221679232 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketGraphExecutionStateComplete.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketGraphExecutionStateComplete } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addGraphExecutionStateCompleteEventListener = () => { +export const addGraphExecutionStateCompleteEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketGraphExecutionStateComplete, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts index d49f35cd2a..279f9aac5b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts @@ -1,33 +1,31 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; import { addImageToStagingArea } from 'features/canvas/store/canvasSlice'; import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice'; import { IMAGE_CATEGORIES } from 'features/gallery/store/types'; import { isImageOutput } from 'features/nodes/types/common'; -import { LINEAR_UI_OUTPUT, nodeIDDenyList } from 'features/nodes/util/graph/constants'; +import { CANVAS_OUTPUT } from 'features/nodes/util/graph/constants'; import { boardsApi } from 'services/api/endpoints/boards'; import { imagesApi } from 'services/api/endpoints/images'; import { imagesAdapter } from 'services/api/util'; import { socketInvocationComplete } from 'services/events/actions'; -import { startAppListening } from '../..'; - // These nodes output an image, but do not actually *save* an image, so we don't want to handle the gallery logic on them const nodeTypeDenylist = ['load_image', 'image']; const log = logger('socketio'); -export const addInvocationCompleteEventListener = () => { +export const addInvocationCompleteEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketInvocationComplete, effect: async (action, { dispatch, getState }) => { const { data } = action.payload; log.debug({ data: parseify(data) }, `Invocation complete (${action.payload.data.node.type})`); - const { result, node, queue_batch_id, source_node_id } = data; - + const { result, node, queue_batch_id } = data; // This complete event has an associated image output - if (isImageOutput(result) && !nodeTypeDenylist.includes(node.type) && !nodeIDDenyList.includes(source_node_id)) { + if (isImageOutput(result) && !nodeTypeDenylist.includes(node.type)) { const { image_name } = result.image; const { canvas, gallery } = getState(); @@ -42,7 +40,7 @@ export const addInvocationCompleteEventListener = () => { imageDTORequest.unsubscribe(); // Add canvas images to the staging area - if (canvas.batchIds.includes(queue_batch_id) && [LINEAR_UI_OUTPUT].includes(data.source_node_id)) { + if (canvas.batchIds.includes(queue_batch_id) && data.source_node_id === CANVAS_OUTPUT) { dispatch(addImageToStagingArea(imageDTO)); } diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError.ts index dfc1f0dd58..fb898b4c7a 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationError.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketInvocationError } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addInvocationErrorEventListener = () => { +export const addInvocationErrorEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketInvocationError, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError.ts index f02317c236..44da4c0ddb 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationRetrievalError.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketInvocationRetrievalError } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addInvocationRetrievalErrorEventListener = () => { +export const addInvocationRetrievalErrorEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketInvocationRetrievalError, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted.ts index 12766b2707..baf476a66b 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationStarted.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketInvocationStarted } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addInvocationStartedEventListener = () => { +export const addInvocationStartedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketInvocationStarted, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall.ts new file mode 100644 index 0000000000..9faedc64d7 --- /dev/null +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelInstall.ts @@ -0,0 +1,66 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; +import { api } from 'services/api'; +import { modelsApi } from 'services/api/endpoints/models'; +import { + socketModelInstallCompleted, + socketModelInstallDownloading, + socketModelInstallError, +} from 'services/events/actions'; + +export const addModelInstallEventListener = (startAppListening: AppStartListening) => { + startAppListening({ + actionCreator: socketModelInstallDownloading, + effect: async (action, { dispatch }) => { + const { bytes, total_bytes, id } = action.payload.data; + + dispatch( + modelsApi.util.updateQueryData('getModelImports', undefined, (draft) => { + const modelImport = draft.find((m) => m.id === id); + if (modelImport) { + modelImport.bytes = bytes; + modelImport.total_bytes = total_bytes; + modelImport.status = 'downloading'; + } + return draft; + }) + ); + }, + }); + + startAppListening({ + actionCreator: socketModelInstallCompleted, + effect: (action, { dispatch }) => { + const { id } = action.payload.data; + + dispatch( + modelsApi.util.updateQueryData('getModelImports', undefined, (draft) => { + const modelImport = draft.find((m) => m.id === id); + if (modelImport) { + modelImport.status = 'completed'; + } + return draft; + }) + ); + dispatch(api.util.invalidateTags([{ type: 'ModelConfig' }])); + }, + }); + + startAppListening({ + actionCreator: socketModelInstallError, + effect: (action, { dispatch }) => { + const { id, error, error_type } = action.payload.data; + + dispatch( + modelsApi.util.updateQueryData('getModelImports', undefined, (draft) => { + const modelImport = draft.find((m) => m.id === id); + if (modelImport) { + modelImport.status = 'error'; + modelImport.error_reason = error_type; + modelImport.error = error; + } + return draft; + }) + ); + }, + }); +}; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad.ts index aa91c69d75..7009baddf2 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketModelLoad.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketModelLoadCompleted, socketModelLoadStarted } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addModelLoadEventListener = () => { +export const addModelLoadEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketModelLoadStarted, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts index bd5471b299..84073bb427 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketQueueItemStatusChanged.ts @@ -1,12 +1,11 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { queueApi, queueItemsAdapter } from 'services/api/endpoints/queue'; import { socketQueueItemStatusChanged } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addSocketQueueItemStatusChangedEventListener = () => { +export const addSocketQueueItemStatusChangedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketQueueItemStatusChanged, effect: async (action, { dispatch }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError.ts index b655a686de..a1a497dc08 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSessionRetrievalError.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketSessionRetrievalError } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addSessionRetrievalErrorEventListener = () => { +export const addSessionRetrievalErrorEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketSessionRetrievalError, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed.ts index df7d5b4e02..48324cb652 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketSubscribed.ts @@ -1,11 +1,10 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketSubscribedSession } from 'services/events/actions'; -import { startAppListening } from '../..'; - const log = logger('socketio'); -export const addSocketSubscribedEventListener = () => { +export const addSocketSubscribedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketSubscribedSession, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed.ts index 4552fba2c5..7a76a809d6 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketUnsubscribed.ts @@ -1,10 +1,9 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { socketUnsubscribedSession } from 'services/events/actions'; - -import { startAppListening } from '../..'; const log = logger('socketio'); -export const addSocketUnsubscribedEventListener = () => { +export const addSocketUnsubscribedEventListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: socketUnsubscribedSession, effect: (action) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved.ts index 8a38be1b77..6816e25bc1 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/stagingAreaImageSaved.ts @@ -1,11 +1,10 @@ +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { stagingAreaImageSaved } from 'features/canvas/store/actions'; import { addToast } from 'features/system/store/systemSlice'; import { t } from 'i18next'; import { imagesApi } from 'services/api/endpoints/images'; -import { startAppListening } from '..'; - -export const addStagingAreaImageSavedListener = () => { +export const addStagingAreaImageSavedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: stagingAreaImageSaved, effect: async (action, { dispatch, getState }) => { diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts index 752c3b09df..5ee9de3c11 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/updateAllNodesRequested.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { updateAllNodesRequested } from 'features/nodes/store/actions'; import { nodeReplaced } from 'features/nodes/store/nodesSlice'; import { NodeUpdateError } from 'features/nodes/types/error'; @@ -8,15 +9,12 @@ import { addToast } from 'features/system/store/systemSlice'; import { makeToast } from 'features/system/util/makeToast'; import { t } from 'i18next'; -import { startAppListening } from '..'; - -export const addUpdateAllNodesRequestedListener = () => { +export const addUpdateAllNodesRequestedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: updateAllNodesRequested, effect: (action, { dispatch, getState }) => { const log = logger('nodes'); - const nodes = getState().nodes.nodes; - const templates = getState().nodeTemplates.templates; + const { nodes, templates } = getState().nodes; let unableToUpdateCount = 0; diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts index 46f55ef21f..ff5d5f24be 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/upscaleRequested.ts @@ -1,5 +1,6 @@ import { createAction } from '@reduxjs/toolkit'; import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; import { buildAdHocUpscaleGraph } from 'features/nodes/util/graph/buildAdHocUpscaleGraph'; import { createIsAllowedToUpscaleSelector } from 'features/parameters/hooks/useIsAllowedToUpscale'; @@ -8,11 +9,9 @@ import { t } from 'i18next'; import { queueApi } from 'services/api/endpoints/queue'; import type { BatchConfig, ImageDTO } from 'services/api/types'; -import { startAppListening } from '..'; - export const upscaleRequested = createAction<{ imageDTO: ImageDTO }>(`upscale/upscaleRequested`); -export const addUpscaleRequestedListener = () => { +export const addUpscaleRequestedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: upscaleRequested, effect: async (action, { dispatch, getState }) => { @@ -39,16 +38,12 @@ export const addUpscaleRequestedListener = () => { return; } - const { esrganModelName } = state.postprocessing; - const { autoAddBoardId } = state.gallery; - const enqueueBatchArg: BatchConfig = { prepend: true, batch: { graph: buildAdHocUpscaleGraph({ image_name, - esrganModelName, - autoAddBoardId, + state, }), runs: 1, }, diff --git a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts index 0b37271be7..0227597fe9 100644 --- a/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts +++ b/invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/workflowLoadRequested.ts @@ -1,4 +1,5 @@ import { logger } from 'app/logging/logger'; +import type { AppStartListening } from 'app/store/middleware/listenerMiddleware'; import { parseify } from 'common/util/serialize'; import { workflowLoaded, workflowLoadRequested } from 'features/nodes/store/actions'; import { $flow } from 'features/nodes/store/reactFlowInstance'; @@ -6,20 +7,17 @@ import { WorkflowMigrationError, WorkflowVersionError } from 'features/nodes/typ import { validateWorkflow } from 'features/nodes/util/workflow/validateWorkflow'; import { addToast } from 'features/system/store/systemSlice'; import { makeToast } from 'features/system/util/makeToast'; -import { setActiveTab } from 'features/ui/store/uiSlice'; import { t } from 'i18next'; import { z } from 'zod'; import { fromZodError } from 'zod-validation-error'; -import { startAppListening } from '..'; - -export const addWorkflowLoadRequestedListener = () => { +export const addWorkflowLoadRequestedListener = (startAppListening: AppStartListening) => { startAppListening({ actionCreator: workflowLoadRequested, effect: (action, { dispatch, getState }) => { const log = logger('nodes'); const { workflow, asCopy } = action.payload; - const nodeTemplates = getState().nodeTemplates.templates; + const nodeTemplates = getState().nodes.templates; try { const { workflow: validatedWorkflow, warnings } = validateWorkflow(workflow, nodeTemplates); @@ -53,7 +51,6 @@ export const addWorkflowLoadRequestedListener = () => { }); } - dispatch(setActiveTab('nodes')); requestAnimationFrame(() => { $flow.get()?.fitView(); }); diff --git a/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts b/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts new file mode 100644 index 0000000000..4f7118e2eb --- /dev/null +++ b/invokeai/frontend/web/src/app/store/nanostores/bulkDownloadId.ts @@ -0,0 +1,9 @@ +import { atom } from 'nanostores'; + +const DEFAULT_BULK_DOWNLOAD_ID = 'default'; + +/** + * The download id for a bulk download. Used for socket subscriptions. + */ + +export const $bulkDownloadId = atom(DEFAULT_BULK_DOWNLOAD_ID); diff --git a/invokeai/frontend/web/src/app/store/nanostores/store.ts b/invokeai/frontend/web/src/app/store/nanostores/store.ts index aee0f0e6ef..65c59dad5d 100644 --- a/invokeai/frontend/web/src/app/store/nanostores/store.ts +++ b/invokeai/frontend/web/src/app/store/nanostores/store.ts @@ -8,4 +8,26 @@ declare global { } } +/** + * Raised when the redux store is unable to be retrieved. + */ +class ReduxStoreNotInitialized extends Error { + /** + * Create ReduxStoreNotInitialized + * @param {String} message + */ + constructor(message = 'Redux store not initialized') { + super(message); + this.name = this.constructor.name; + } +} + export const $store = atom> | undefined>(); + +export const getStore = () => { + const store = $store.get(); + if (!store) { + throw new ReduxStoreNotInitialized(); + } + return store; +}; diff --git a/invokeai/frontend/web/src/app/store/store.ts b/invokeai/frontend/web/src/app/store/store.ts index e25e1351eb..b538a3eaeb 100644 --- a/invokeai/frontend/web/src/app/store/store.ts +++ b/invokeai/frontend/web/src/app/store/store.ts @@ -3,6 +3,7 @@ import { autoBatchEnhancer, combineReducers, configureStore } from '@reduxjs/too import { logger } from 'app/logging/logger'; import { idbKeyValDriver } from 'app/store/enhancers/reduxRemember/driver'; import { errorHandler } from 'app/store/enhancers/reduxRemember/errors'; +import type { JSONObject } from 'common/types'; import { canvasPersistConfig, canvasSlice } from 'features/canvas/store/canvasSlice'; import { changeBoardModalSlice } from 'features/changeBoardModal/store/slice'; import { @@ -14,9 +15,8 @@ import { dynamicPromptsPersistConfig, dynamicPromptsSlice } from 'features/dynam import { galleryPersistConfig, gallerySlice } from 'features/gallery/store/gallerySlice'; import { hrfPersistConfig, hrfSlice } from 'features/hrf/store/hrfSlice'; import { loraPersistConfig, loraSlice } from 'features/lora/store/loraSlice'; -import { modelManagerPersistConfig, modelManagerSlice } from 'features/modelManager/store/modelManagerSlice'; +import { modelManagerV2PersistConfig, modelManagerV2Slice } from 'features/modelManagerV2/store/modelManagerV2Slice'; import { nodesPersistConfig, nodesSlice } from 'features/nodes/store/nodesSlice'; -import { nodesTemplatesSlice } from 'features/nodes/store/nodeTemplatesSlice'; import { workflowPersistConfig, workflowSlice } from 'features/nodes/store/workflowSlice'; import { generationPersistConfig, generationSlice } from 'features/parameters/store/generationSlice'; import { postprocessingPersistConfig, postprocessingSlice } from 'features/parameters/store/postprocessingSlice'; @@ -33,7 +33,6 @@ import { rememberEnhancer, rememberReducer } from 'redux-remember'; import { serializeError } from 'serialize-error'; import { api } from 'services/api'; import { authToastMiddleware } from 'services/api/authToastMiddleware'; -import type { JsonObject } from 'type-fest'; import { STORAGE_PREFIX } from './constants'; import { actionSanitizer } from './middleware/devtools/actionSanitizer'; @@ -46,7 +45,6 @@ const allReducers = { [gallerySlice.name]: gallerySlice.reducer, [generationSlice.name]: generationSlice.reducer, [nodesSlice.name]: nodesSlice.reducer, - [nodesTemplatesSlice.name]: nodesTemplatesSlice.reducer, [postprocessingSlice.name]: postprocessingSlice.reducer, [systemSlice.name]: systemSlice.reducer, [configSlice.name]: configSlice.reducer, @@ -56,7 +54,7 @@ const allReducers = { [deleteImageModalSlice.name]: deleteImageModalSlice.reducer, [changeBoardModalSlice.name]: changeBoardModalSlice.reducer, [loraSlice.name]: loraSlice.reducer, - [modelManagerSlice.name]: modelManagerSlice.reducer, + [modelManagerV2Slice.name]: modelManagerV2Slice.reducer, [sdxlSlice.name]: sdxlSlice.reducer, [queueSlice.name]: queueSlice.reducer, [workflowSlice.name]: workflowSlice.reducer, @@ -103,7 +101,7 @@ const persistConfigs: { [key in keyof typeof allReducers]?: PersistConfig } = { [dynamicPromptsPersistConfig.name]: dynamicPromptsPersistConfig, [sdxlPersistConfig.name]: sdxlPersistConfig, [loraPersistConfig.name]: loraPersistConfig, - [modelManagerPersistConfig.name]: modelManagerPersistConfig, + [modelManagerV2PersistConfig.name]: modelManagerV2PersistConfig, [hrfPersistConfig.name]: hrfPersistConfig, }; @@ -127,7 +125,7 @@ const unserialize: UnserializeFunction = (data, key) => { { persistedData: parsed, rehydratedData: transformed, - diff: diff(parsed, transformed) as JsonObject, // this is always serializable + diff: diff(parsed, transformed) as JSONObject, // this is always serializable }, `Rehydrated slice "${key}"` ); @@ -138,7 +136,7 @@ const unserialize: UnserializeFunction = (data, key) => { } }; -export const serialize: SerializeFunction = (data, key) => { +const serialize: SerializeFunction = (data, key) => { const persistConfig = persistConfigs[key as keyof typeof persistConfigs]; if (!persistConfig) { throw new Error(`No persist config for slice "${key}"`); @@ -187,7 +185,6 @@ export const createStore = (uniqueStoreKey?: string, persist = true) => }, }); -export type AppGetState = ReturnType['getState']>; export type RootState = ReturnType['getState']>; // eslint-disable-next-line @typescript-eslint/no-explicit-any export type AppThunkDispatch = ThunkDispatch; diff --git a/invokeai/frontend/web/src/app/store/storeHooks.ts b/invokeai/frontend/web/src/app/store/storeHooks.ts index f1a9aa979c..6bc904acb3 100644 --- a/invokeai/frontend/web/src/app/store/storeHooks.ts +++ b/invokeai/frontend/web/src/app/store/storeHooks.ts @@ -1,7 +1,8 @@ import type { AppThunkDispatch, RootState } from 'app/store/store'; import type { TypedUseSelectorHook } from 'react-redux'; -import { useDispatch, useSelector } from 'react-redux'; +import { useDispatch, useSelector, useStore } from 'react-redux'; // Use throughout your app instead of plain `useDispatch` and `useSelector` export const useAppDispatch = () => useDispatch(); export const useAppSelector: TypedUseSelectorHook = useSelector; +export const useAppStore = () => useStore(); diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts index d511812cb4..a2b17b483d 100644 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ b/invokeai/frontend/web/src/app/types/invokeai.ts @@ -88,7 +88,7 @@ export type AppConfig = { scaledBoundingBoxHeight: NumericalParameterConfig; // initial value comes from model scaledBoundingBoxWidth: NumericalParameterConfig; // initial value comes from model canvasCoherenceStrength: NumericalParameterConfig; - canvasCoherenceSteps: NumericalParameterConfig; + canvasCoherenceEdgeSize: NumericalParameterConfig; infillTileSize: NumericalParameterConfig; infillPatchmatchDownscaleSize: NumericalParameterConfig; // Misc advanced diff --git a/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts b/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts index bdcd1ca9a4..b556816502 100644 --- a/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts +++ b/invokeai/frontend/web/src/common/components/InformationalPopover/constants.ts @@ -6,35 +6,53 @@ export type Feature = | 'paramNegativeConditioning' | 'paramPositiveConditioning' | 'paramScheduler' - | 'compositingBlur' + | 'compositingMaskBlur' | 'compositingBlurMethod' | 'compositingCoherencePass' | 'compositingCoherenceMode' - | 'compositingCoherenceSteps' - | 'compositingStrength' + | 'compositingCoherenceEdgeSize' + | 'compositingCoherenceMinDenoise' | 'compositingMaskAdjustments' + | 'controlNet' | 'controlNetBeginEnd' | 'controlNetControlMode' + | 'controlNetProcessor' | 'controlNetResizeMode' - | 'controlNet' | 'controlNetWeight' | 'dynamicPrompts' | 'dynamicPromptsMaxPrompts' | 'dynamicPromptsSeedBehaviour' + | 'imageFit' | 'infillMethod' | 'lora' + | 'loraWeight' | 'noiseUseCPU' + | 'paramAspect' | 'paramCFGScale' | 'paramCFGRescaleMultiplier' | 'paramDenoisingStrength' + | 'paramHeight' + | 'paramHrf' | 'paramIterations' | 'paramModel' | 'paramRatio' | 'paramSeed' | 'paramSteps' + | 'paramUpscaleMethod' | 'paramVAE' | 'paramVAEPrecision' - | 'scaleBeforeProcessing'; + | 'paramWidth' + | 'patchmatchDownScaleSize' + | 'refinerModel' + | 'refinerNegativeAestheticScore' + | 'refinerPositiveAestheticScore' + | 'refinerScheduler' + | 'refinerStart' + | 'refinerSteps' + | 'refinerCfgScale' + | 'scaleBeforeProcessing' + | 'seamlessTilingXAxis' + | 'seamlessTilingYAxis'; export type PopoverData = PopoverProps & { image?: string; @@ -46,21 +64,51 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = { paramNegativeConditioning: { placement: 'right', }, + clipSkip: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', + }, controlNet: { href: 'https://support.invoke.ai/support/solutions/articles/151000105880', }, + controlNetBeginEnd: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178148', + }, + controlNetWeight: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178148', + }, lora: { href: 'https://support.invoke.ai/support/solutions/articles/151000159072', }, + loraWeight: { + href: 'https://support.invoke.ai/support/solutions/articles/151000159072-concepts-low-rank-adaptations-loras-', + }, + compositingMaskBlur: { + href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings', + }, + compositingBlurMethod: { + href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings', + }, compositingCoherenceMode: { - href: 'https://support.invoke.ai/support/solutions/articles/151000158838', + href: 'https://support.invoke.ai/support/solutions/articles/151000158838-compositing-settings', }, infillMethod: { - href: 'https://support.invoke.ai/support/solutions/articles/151000158841', + href: 'https://support.invoke.ai/support/solutions/articles/151000158841-infill-and-scaling', }, scaleBeforeProcessing: { href: 'https://support.invoke.ai/support/solutions/articles/151000158841', }, + paramCFGScale: { + href: 'https://www.youtube.com/watch?v=1OeHEJrsTpI', + }, + paramCFGRescaleMultiplier: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', + }, + paramDenoisingStrength: { + href: 'https://support.invoke.ai/support/solutions/articles/151000094998-image-to-image', + }, + paramHrf: { + href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-', + }, paramIterations: { href: 'https://support.invoke.ai/support/solutions/articles/151000159073', }, @@ -70,7 +118,10 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = { }, paramScheduler: { placement: 'right', - href: 'https://support.invoke.ai/support/solutions/articles/151000159073', + href: 'https://www.youtube.com/watch?v=1OeHEJrsTpI', + }, + paramSeed: { + href: 'https://support.invoke.ai/support/solutions/articles/151000096684-what-is-a-seed-how-do-i-use-it-to-recreate-the-same-image-', }, paramModel: { placement: 'right', @@ -81,15 +132,53 @@ export const POPOVER_DATA: { [key in Feature]?: PopoverData } = { }, controlNetControlMode: { placement: 'right', + href: 'https://support.invoke.ai/support/solutions/articles/151000178148', + }, + controlNetProcessor: { + placement: 'right', + href: 'https://support.invoke.ai/support/solutions/articles/151000105880-using-controlnet', }, controlNetResizeMode: { placement: 'right', + href: 'https://support.invoke.ai/support/solutions/articles/151000178148', }, paramVAE: { placement: 'right', + href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', }, paramVAEPrecision: { placement: 'right', + href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', + }, + paramUpscaleMethod: { + href: 'https://support.invoke.ai/support/solutions/articles/151000096700-how-can-i-get-larger-images-what-does-upscaling-do-', + }, + refinerModel: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + refinerNegativeAestheticScore: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + refinerPositiveAestheticScore: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + refinerScheduler: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + refinerStart: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + refinerSteps: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + refinerCfgScale: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178333-using-the-refiner', + }, + seamlessTilingXAxis: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', + }, + seamlessTilingYAxis: { + href: 'https://support.invoke.ai/support/solutions/articles/151000178161-advanced-settings', }, } as const; diff --git a/invokeai/frontend/web/src/common/components/Nbsp.tsx b/invokeai/frontend/web/src/common/components/Nbsp.tsx deleted file mode 100644 index 05b396239d..0000000000 --- a/invokeai/frontend/web/src/common/components/Nbsp.tsx +++ /dev/null @@ -1 +0,0 @@ -export const Nbsp = () => <>{'\u00A0'}; diff --git a/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts b/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts index 3195426da3..26a17e1d0c 100644 --- a/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts +++ b/invokeai/frontend/web/src/common/hooks/useDownloadImage.ts @@ -1,22 +1,28 @@ +import { useStore } from '@nanostores/react'; import { useAppToaster } from 'app/components/Toaster'; +import { $authToken } from 'app/store/nanostores/authToken'; import { useAppDispatch } from 'app/store/storeHooks'; import { imageDownloaded } from 'features/gallery/store/actions'; import { useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import { useImageUrlToBlob } from './useImageUrlToBlob'; - export const useDownloadImage = () => { const toaster = useAppToaster(); const { t } = useTranslation(); - const imageUrlToBlob = useImageUrlToBlob(); const dispatch = useAppDispatch(); + const authToken = useStore($authToken); const downloadImage = useCallback( async (image_url: string, image_name: string) => { try { - const blob = await imageUrlToBlob(image_url); - + const requestOpts = authToken + ? { + headers: { + Authorization: `Bearer ${authToken}`, + }, + } + : {}; + const blob = await fetch(image_url, requestOpts).then((resp) => resp.blob()); if (!blob) { throw new Error('Unable to create Blob'); } @@ -40,7 +46,7 @@ export const useDownloadImage = () => { }); } }, - [t, toaster, imageUrlToBlob, dispatch] + [t, toaster, dispatch, authToken] ); return { downloadImage }; diff --git a/invokeai/frontend/web/src/common/hooks/useGroupedModelCombobox.ts b/invokeai/frontend/web/src/common/hooks/useGroupedModelCombobox.ts index eb55db79ca..2fffd7bda0 100644 --- a/invokeai/frontend/web/src/common/hooks/useGroupedModelCombobox.ts +++ b/invokeai/frontend/web/src/common/hooks/useGroupedModelCombobox.ts @@ -2,15 +2,15 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; import type { EntityState } from '@reduxjs/toolkit'; import { useAppSelector } from 'app/store/storeHooks'; import type { GroupBase } from 'chakra-react-select'; +import type { ModelIdentifierWithBase } from 'features/nodes/types/common'; import { groupBy, map, reduce } from 'lodash-es'; import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import type { AnyModelConfigEntity } from 'services/api/endpoints/models'; -import { getModelId } from 'services/api/endpoints/models'; +import type { AnyModelConfig } from 'services/api/types'; -type UseGroupedModelComboboxArg = { +type UseGroupedModelComboboxArg = { modelEntities: EntityState | undefined; - selectedModel?: Pick | null; + selectedModel?: ModelIdentifierWithBase | null; onChange: (value: T | null) => void; getIsDisabled?: (model: T) => boolean; isLoading?: boolean; @@ -24,26 +24,26 @@ type UseGroupedModelComboboxReturn = { noOptionsMessage: () => string; }; -export const useGroupedModelCombobox = ( +export const useGroupedModelCombobox = ( arg: UseGroupedModelComboboxArg ): UseGroupedModelComboboxReturn => { const { t } = useTranslation(); - const base_model = useAppSelector((s) => s.generation.model?.base_model ?? 'sdxl'); + const base_model = useAppSelector((s) => s.generation.model?.base ?? 'sdxl'); const { modelEntities, selectedModel, getIsDisabled, onChange, isLoading } = arg; const options = useMemo[]>(() => { if (!modelEntities) { return []; } const modelEntitiesArray = map(modelEntities.entities); - const groupedModels = groupBy(modelEntitiesArray, 'base_model'); + const groupedModels = groupBy(modelEntitiesArray, 'base'); const _options = reduce( groupedModels, (acc, val, label) => { acc.push({ label, options: val.map((model) => ({ - label: model.model_name, - value: model.id, + label: model.name, + value: model.key, isDisabled: getIsDisabled ? getIsDisabled(model) : false, })), }); @@ -57,8 +57,7 @@ export const useGroupedModelCombobox = ( const value = useMemo( () => - options.flatMap((o) => o.options).find((m) => (selectedModel ? m.value === getModelId(selectedModel) : false)) ?? - null, + options.flatMap((o) => o.options).find((m) => (selectedModel ? m.value === selectedModel.key : false)) ?? null, [options, selectedModel] ); diff --git a/invokeai/frontend/web/src/common/hooks/useImage.ts b/invokeai/frontend/web/src/common/hooks/useImage.ts deleted file mode 100644 index 60c973ce59..0000000000 --- a/invokeai/frontend/web/src/common/hooks/useImage.ts +++ /dev/null @@ -1,102 +0,0 @@ -import { useLayoutEffect, useRef, useState } from 'react'; - -// Adapted from https://github.com/konvajs/use-image - -type CrossOrigin = 'anonymous' | 'use-credentials'; -type ReferrerPolicy = - | 'no-referrer' - | 'no-referrer-when-downgrade' - | 'origin' - | 'origin-when-cross-origin' - | 'same-origin' - | 'strict-origin' - | 'strict-origin-when-cross-origin' - | 'unsafe-url'; -type ImageStatus = 'loaded' | 'loading' | 'failed'; - -export const useImage = ( - url: string, - crossOrigin?: CrossOrigin, - referrerpolicy?: ReferrerPolicy -): [undefined | HTMLImageElement, ImageStatus, Blob | null] => { - // lets use refs for image and status - // so we can update them during render - // to have instant update in status/image when new data comes in - const statusRef = useRef('loading'); - const imageRef = useRef(); - const blobRef = useRef(null); - - // we are not going to use token - // but we need to just to trigger state update - const [_, setStateToken] = useState(0); - - // keep track of old props to trigger changes - const oldUrl = useRef(); - const oldCrossOrigin = useRef(); - const oldReferrerPolicy = useRef(); - - if ( - oldUrl.current !== url || - oldCrossOrigin.current !== crossOrigin || - oldReferrerPolicy.current !== referrerpolicy - ) { - statusRef.current = 'loading'; - imageRef.current = undefined; - oldUrl.current = url; - oldCrossOrigin.current = crossOrigin; - oldReferrerPolicy.current = referrerpolicy; - } - - useLayoutEffect( - function () { - if (!url) { - return; - } - const img = document.createElement('img'); - - function onload() { - statusRef.current = 'loaded'; - imageRef.current = img; - const canvas = document.createElement('canvas'); - canvas.width = img.clientWidth; - canvas.height = img.clientHeight; - - const context = canvas.getContext('2d'); - if (context) { - context.drawImage(img, 0, 0); - canvas.toBlob(function (blob) { - blobRef.current = blob; - }, 'image/png'); - } - setStateToken(Math.random()); - } - - function onerror() { - statusRef.current = 'failed'; - imageRef.current = undefined; - setStateToken(Math.random()); - } - - img.addEventListener('load', onload); - img.addEventListener('error', onerror); - if (crossOrigin) { - img.crossOrigin = crossOrigin; - } - if (referrerpolicy) { - img.referrerPolicy = referrerpolicy; - } - img.src = url; - - return function cleanup() { - img.removeEventListener('load', onload); - img.removeEventListener('error', onerror); - }; - }, - [url, crossOrigin, referrerpolicy] - ); - - // return array because it is better to use in case of several useImage hooks - // const [background, backgroundStatus] = useImage(url1); - // const [patter] = useImage(url2); - return [imageRef.current, statusRef.current, blobRef.current]; -}; diff --git a/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts b/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts index 4952fa1c47..b31efed970 100644 --- a/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts +++ b/invokeai/frontend/web/src/common/hooks/useIsReadyToEnqueue.ts @@ -8,7 +8,6 @@ import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types'; import { selectDynamicPromptsSlice } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; import { getShouldProcessPrompt } from 'features/dynamicPrompts/util/getShouldProcessPrompt'; import { selectNodesSlice } from 'features/nodes/store/nodesSlice'; -import { selectNodeTemplatesSlice } from 'features/nodes/store/nodeTemplatesSlice'; import { isInvocationNode } from 'features/nodes/types/invocation'; import { selectGenerationSlice } from 'features/parameters/store/generationSlice'; import { selectSystemSlice } from 'features/system/store/systemSlice'; @@ -23,11 +22,10 @@ const selector = createMemoizedSelector( selectGenerationSlice, selectSystemSlice, selectNodesSlice, - selectNodeTemplatesSlice, selectDynamicPromptsSlice, activeTabNameSelector, ], - (controlAdapters, generation, system, nodes, nodeTemplates, dynamicPrompts, activeTabName) => { + (controlAdapters, generation, system, nodes, dynamicPrompts, activeTabName) => { const { initialImage, model, positivePrompt } = generation; const { isConnected } = system; @@ -54,7 +52,7 @@ const selector = createMemoizedSelector( return; } - const nodeTemplate = nodeTemplates.templates[node.data.type]; + const nodeTemplate = nodes.templates[node.data.type]; if (!nodeTemplate) { // Node type not found @@ -107,7 +105,7 @@ const selector = createMemoizedSelector( number: i + 1, }) ); - } else if (ca.model.base_model !== model?.base_model) { + } else if (ca.model.base !== model?.base) { // This should never happen, just a sanity check reasons.push( i18n.t('parameters.invoke.incompatibleBaseModelForControlAdapter', { diff --git a/invokeai/frontend/web/src/common/hooks/useModelCombobox.ts b/invokeai/frontend/web/src/common/hooks/useModelCombobox.ts index 880b316379..e0718d6413 100644 --- a/invokeai/frontend/web/src/common/hooks/useModelCombobox.ts +++ b/invokeai/frontend/web/src/common/hooks/useModelCombobox.ts @@ -1,14 +1,14 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; import type { EntityState } from '@reduxjs/toolkit'; +import type { ModelIdentifierWithBase } from 'features/nodes/types/common'; import { map } from 'lodash-es'; import { useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import type { AnyModelConfigEntity } from 'services/api/endpoints/models'; -import { getModelId } from 'services/api/endpoints/models'; +import type { AnyModelConfig } from 'services/api/types'; -type UseModelComboboxArg = { +type UseModelComboboxArg = { modelEntities: EntityState | undefined; - selectedModel?: Pick | null; + selectedModel?: ModelIdentifierWithBase | null; onChange: (value: T | null) => void; getIsDisabled?: (model: T) => boolean; optionsFilter?: (model: T) => boolean; @@ -23,9 +23,7 @@ type UseModelComboboxReturn = { noOptionsMessage: () => string; }; -export const useModelCombobox = ( - arg: UseModelComboboxArg -): UseModelComboboxReturn => { +export const useModelCombobox = (arg: UseModelComboboxArg): UseModelComboboxReturn => { const { t } = useTranslation(); const { modelEntities, selectedModel, getIsDisabled, onChange, isLoading, optionsFilter = () => true } = arg; const options = useMemo(() => { @@ -35,14 +33,14 @@ export const useModelCombobox = ( return map(modelEntities.entities) .filter(optionsFilter) .map((model) => ({ - label: model.model_name, - value: model.id, + label: model.name, + value: model.key, isDisabled: getIsDisabled ? getIsDisabled(model) : false, })); }, [optionsFilter, getIsDisabled, modelEntities]); const value = useMemo( - () => options.find((m) => (selectedModel ? m.value === getModelId(selectedModel) : false)), + () => options.find((m) => (selectedModel ? m.value === selectedModel.key : false)), [options, selectedModel] ); diff --git a/invokeai/frontend/web/src/common/hooks/useModelCustomSelect.ts b/invokeai/frontend/web/src/common/hooks/useModelCustomSelect.ts new file mode 100644 index 0000000000..5626f4c395 --- /dev/null +++ b/invokeai/frontend/web/src/common/hooks/useModelCustomSelect.ts @@ -0,0 +1,88 @@ +import type { Item } from '@invoke-ai/ui-library'; +import type { EntityState } from '@reduxjs/toolkit'; +import { EMPTY_ARRAY } from 'app/store/constants'; +import type { ModelIdentifierWithBase } from 'features/nodes/types/common'; +import { MODEL_TYPE_SHORT_MAP } from 'features/parameters/types/constants'; +import { filter } from 'lodash-es'; +import { useCallback, useMemo } from 'react'; +import { useTranslation } from 'react-i18next'; +import type { AnyModelConfig } from 'services/api/types'; + +type UseModelCustomSelectArg = { + data: EntityState | undefined; + isLoading: boolean; + selectedModel?: ModelIdentifierWithBase | null; + onChange: (value: T | null) => void; + modelFilter?: (model: T) => boolean; + isModelDisabled?: (model: T) => boolean; +}; + +type UseModelCustomSelectReturn = { + selectedItem: Item | null; + items: Item[]; + onChange: (item: Item | null) => void; + placeholder: string; +}; + +const modelFilterDefault = () => true; +const isModelDisabledDefault = () => false; + +export const useModelCustomSelect = ({ + data, + isLoading, + selectedModel, + onChange, + modelFilter = modelFilterDefault, + isModelDisabled = isModelDisabledDefault, +}: UseModelCustomSelectArg): UseModelCustomSelectReturn => { + const { t } = useTranslation(); + + const items: Item[] = useMemo( + () => + data + ? filter(data.entities, modelFilter).map((m) => ({ + label: m.name, + value: m.key, + description: m.description, + group: MODEL_TYPE_SHORT_MAP[m.base], + isDisabled: isModelDisabled(m), + })) + : EMPTY_ARRAY, + [data, isModelDisabled, modelFilter] + ); + + const _onChange = useCallback( + (item: Item | null) => { + if (!item || !data) { + return; + } + const model = data.entities[item.value]; + if (!model) { + return; + } + onChange(model); + }, + [data, onChange] + ); + + const selectedItem = useMemo(() => items.find((o) => o.value === selectedModel?.key) ?? null, [selectedModel, items]); + + const placeholder = useMemo(() => { + if (isLoading) { + return t('common.loading'); + } + + if (items.length === 0) { + return t('models.noModelsAvailable'); + } + + return t('models.selectModel'); + }, [isLoading, items, t]); + + return { + items, + onChange: _onChange, + selectedItem, + placeholder, + }; +}; diff --git a/invokeai/frontend/web/src/common/hooks/useSingleAndDoubleClick.ts b/invokeai/frontend/web/src/common/hooks/useSingleAndDoubleClick.ts index 38594369d3..7a02ae54ec 100644 --- a/invokeai/frontend/web/src/common/hooks/useSingleAndDoubleClick.ts +++ b/invokeai/frontend/web/src/common/hooks/useSingleAndDoubleClick.ts @@ -1,7 +1,7 @@ // https://stackoverflow.com/a/73731908 import { useCallback, useEffect, useState } from 'react'; -export type UseSingleAndDoubleClickOptions = { +type UseSingleAndDoubleClickOptions = { onSingleClick: () => void; onDoubleClick: () => void; latency?: number; diff --git a/invokeai/frontend/web/src/common/types.ts b/invokeai/frontend/web/src/common/types.ts new file mode 100644 index 0000000000..f3037dcc2b --- /dev/null +++ b/invokeai/frontend/web/src/common/types.ts @@ -0,0 +1,5 @@ +type JSONValue = string | number | boolean | null | JSONValue[] | { [key: string]: JSONValue }; + +export interface JSONObject { + [k: string]: JSONValue; +} diff --git a/invokeai/frontend/web/src/common/util/generateSeeds.ts b/invokeai/frontend/web/src/common/util/generateSeeds.ts index 588f711148..c79685feda 100644 --- a/invokeai/frontend/web/src/common/util/generateSeeds.ts +++ b/invokeai/frontend/web/src/common/util/generateSeeds.ts @@ -1,7 +1,7 @@ import { NUMPY_RAND_MAX, NUMPY_RAND_MIN } from 'app/constants'; import { random } from 'lodash-es'; -export type GenerateSeedsArg = { +type GenerateSeedsArg = { count: number; start?: number; min?: number; @@ -16,5 +16,3 @@ export const generateSeeds = ({ count, start, min = NUMPY_RAND_MIN, max = NUMPY_ } return seeds; }; - -export const generateOneSeed = (min: number = NUMPY_RAND_MIN, max: number = NUMPY_RAND_MAX) => random(min, max); diff --git a/invokeai/frontend/web/src/common/util/objectKeys.ts b/invokeai/frontend/web/src/common/util/objectKeys.ts new file mode 100644 index 0000000000..bea0905c7f --- /dev/null +++ b/invokeai/frontend/web/src/common/util/objectKeys.ts @@ -0,0 +1,6 @@ +/** + * Get the keys of an object. This is a wrapper around `Object.keys` that types the result as an array of the keys of the object. + * @param obj The object to get the keys of. + * @returns The keys of the object. + */ +export const objectKeys = >(obj: T) => Object.keys(obj) as Array; diff --git a/invokeai/frontend/web/src/common/util/roundDownToMultiple.ts b/invokeai/frontend/web/src/common/util/roundDownToMultiple.ts index 55915ea7c7..792b6d38e4 100644 --- a/invokeai/frontend/web/src/common/util/roundDownToMultiple.ts +++ b/invokeai/frontend/web/src/common/util/roundDownToMultiple.ts @@ -8,7 +8,3 @@ export const roundDownToMultipleMin = (num: number, multiple: number): number => export const roundToMultiple = (num: number, multiple: number): number => { return Math.round(num / multiple) * multiple; }; - -export const roundToMultipleMin = (num: number, multiple: number): number => { - return Math.max(multiple, roundToMultiple(num, multiple)); -}; diff --git a/invokeai/frontend/web/src/common/util/skipMouseEvent.ts b/invokeai/frontend/web/src/common/util/skipMouseEvent.ts deleted file mode 100644 index f6acc55edd..0000000000 --- a/invokeai/frontend/web/src/common/util/skipMouseEvent.ts +++ /dev/null @@ -1,8 +0,0 @@ -import type { MouseEvent } from 'react'; - -/** - * Prevents the default behavior of the event. - */ -export const skipMouseEvent = (e: MouseEvent) => { - e.preventDefault(); -}; diff --git a/invokeai/frontend/web/src/common/util/stopPastePropagation.ts b/invokeai/frontend/web/src/common/util/stopPastePropagation.ts deleted file mode 100644 index f9195a4f58..0000000000 --- a/invokeai/frontend/web/src/common/util/stopPastePropagation.ts +++ /dev/null @@ -1,5 +0,0 @@ -import type { ClipboardEvent } from 'react'; - -export const stopPastePropagation = (e: ClipboardEvent) => { - e.stopPropagation(); -}; diff --git a/invokeai/frontend/web/src/common/util/toast.ts b/invokeai/frontend/web/src/common/util/toast.ts new file mode 100644 index 0000000000..ac61a4a12d --- /dev/null +++ b/invokeai/frontend/web/src/common/util/toast.ts @@ -0,0 +1,6 @@ +import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui-library'; + +export const { toast } = createStandaloneToast({ + theme: theme, + defaultOptions: TOAST_OPTIONS.defaultOptions, +}); diff --git a/invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskCompositer.tsx b/invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskCompositer.tsx index 6098d85359..4fedcaec75 100644 --- a/invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskCompositer.tsx +++ b/invokeai/frontend/web/src/features/canvas/components/IAICanvasMaskCompositer.tsx @@ -9,7 +9,7 @@ import { isNumber } from 'lodash-es'; import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react'; import { Rect } from 'react-konva'; -export const canvasMaskCompositerSelector = createMemoizedSelector(selectCanvasSlice, (canvas) => { +const canvasMaskCompositerSelector = createMemoizedSelector(selectCanvasSlice, (canvas) => { return { stageCoordinates: canvas.stageCoordinates, stageDimensions: canvas.stageDimensions, diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasNanostore.ts b/invokeai/frontend/web/src/features/canvas/store/canvasNanostore.ts index f971886d5e..b225f66677 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasNanostore.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasNanostore.ts @@ -8,7 +8,7 @@ export const $tool = atom('move'); export const $toolStash = atom(null); export const $isDrawing = atom(false); export const $isMouseOverBoundingBox = atom(false); -export const $isMoveBoundingBoxKeyHeld = atom(false); +const $isMoveBoundingBoxKeyHeld = atom(false); export const $isMoveStageKeyHeld = atom(false); export const $isMovingBoundingBox = atom(false); export const $isMovingStage = atom(false); diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts index a337cfaab1..29dc4c9fb8 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSelectors.ts @@ -1,14 +1,8 @@ import { createSelector } from '@reduxjs/toolkit'; -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { selectCanvasSlice } from './canvasSlice'; -import { isCanvasBaseImage } from './canvasTypes'; export const isStagingSelector = createSelector( selectCanvasSlice, (canvas) => canvas.batchIds.length > 0 || canvas.layerState.stagingArea.images.length > 0 ); - -export const initialCanvasImageSelector = createMemoizedSelector(selectCanvasSlice, (canvas) => - canvas.layerState.objects.find(isCanvasBaseImage) -); diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts index cd734d3f00..465bc11409 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasSlice.ts @@ -38,7 +38,7 @@ import { CANVAS_GRID_SIZE_FINE } from './constants'; */ const MAX_HISTORY = 128; -export const initialLayerState: CanvasLayerState = { +const initialLayerState: CanvasLayerState = { objects: [], stagingArea: { images: [], @@ -46,11 +46,10 @@ export const initialLayerState: CanvasLayerState = { }, }; -export const initialCanvasState: CanvasState = { +const initialCanvasState: CanvasState = { _version: 1, boundingBoxCoordinates: { x: 0, y: 0 }, boundingBoxDimensions: { width: 512, height: 512 }, - boundingBoxPreviewFill: { r: 0, g: 0, b: 0, a: 0.5 }, boundingBoxScaleMethod: 'auto', brushColor: { r: 90, g: 90, b: 255, a: 1 }, brushSize: 50, @@ -215,9 +214,6 @@ export const canvasSlice = createSlice({ setStageCoordinates: (state, action: PayloadAction) => { state.stageCoordinates = action.payload; }, - setBoundingBoxPreviewFill: (state, action: PayloadAction) => { - state.boundingBoxPreviewFill = action.payload; - }, setStageScale: (state, action: PayloadAction) => { state.stageScale = action.payload; }, @@ -231,9 +227,6 @@ export const canvasSlice = createSlice({ setShouldLockBoundingBox: (state, action: PayloadAction) => { state.shouldLockBoundingBox = action.payload; }, - toggleShouldLockBoundingBox: (state) => { - state.shouldLockBoundingBox = !state.shouldLockBoundingBox; - }, setShouldShowBoundingBox: (state, action: PayloadAction) => { state.shouldShowBoundingBox = action.payload; }, @@ -573,19 +566,6 @@ export const canvasSlice = createSlice({ }, }), }, - scaledBoundingBoxDimensionsReset: { - reducer: (state, action: PayloadActionWithOptimalDimension) => { - const scaledDimensions = getScaledBoundingBoxDimensions( - state.boundingBoxDimensions, - action.meta.optimalDimension - ); - state.scaledBoundingBoxDimensions = scaledDimensions; - }, - prepare: (payload: void, optimalDimension: number) => ({ - payload: undefined, - meta: { optimalDimension }, - }), - }, setShouldShowStagingImage: (state, action: PayloadAction) => { state.shouldShowStagingImage = action.payload; }, @@ -626,7 +606,7 @@ export const canvasSlice = createSlice({ }, extraReducers: (builder) => { builder.addCase(modelChanged, (state, action) => { - if (action.meta.previousModel?.base_model === action.payload?.base_model) { + if (action.meta.previousModel?.base === action.payload?.base) { // The base model hasn't changed, we don't need to optimize the size return; } @@ -682,7 +662,6 @@ export const { resetCanvasView, setBoundingBoxCoordinates, setBoundingBoxDimensions, - setBoundingBoxPreviewFill, setBoundingBoxScaleMethod, setBrushColor, setBrushSize, @@ -695,7 +674,6 @@ export const { setShouldAutoSave, setShouldCropToBoundingBoxOnSave, setShouldDarkenOutsideBoundingBox, - setShouldLockBoundingBox, setShouldPreserveMaskedArea, setShouldShowBoundingBox, setShouldShowCanvasDebugInfo, @@ -706,7 +684,6 @@ export const { setShouldSnapToGrid, setStageCoordinates, setStageScale, - toggleShouldLockBoundingBox, undo, setScaledBoundingBoxDimensions, setShouldRestrictStrokesToBox, @@ -716,13 +693,12 @@ export const { canvasBatchIdAdded, canvasBatchIdsReset, aspectRatioChanged, - scaledBoundingBoxDimensionsReset, } = canvasSlice.actions; export const selectCanvasSlice = (state: RootState) => state.canvas; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateCanvasState = (state: any): any => { +const migrateCanvasState = (state: any): any => { if (!('_version' in state)) { state._version = 1; state.aspectRatio = initialAspectRatioState; diff --git a/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts b/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts index dc6b764075..82793bffd4 100644 --- a/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts +++ b/invokeai/frontend/web/src/features/canvas/store/canvasTypes.ts @@ -10,14 +10,12 @@ export const LAYER_NAMES_DICT: { label: string; value: CanvasLayer }[] = [ { label: 'Mask', value: 'mask' }, ]; -export const LAYER_NAMES = ['base', 'mask'] as const; - -export const zBoundingBoxScaleMethod = z.enum(['none', 'auto', 'manual']); +const zBoundingBoxScaleMethod = z.enum(['none', 'auto', 'manual']); export type BoundingBoxScaleMethod = z.infer; export const isBoundingBoxScaleMethod = (v: unknown): v is BoundingBoxScaleMethod => zBoundingBoxScaleMethod.safeParse(v).success; -export type CanvasDrawingTool = 'brush' | 'eraser'; +type CanvasDrawingTool = 'brush' | 'eraser'; export type CanvasTool = CanvasDrawingTool | 'move' | 'colorPicker'; @@ -55,7 +53,7 @@ export type CanvasBaseLine = { clip?: IRect; }; -export type CanvasFillRect = { +type CanvasFillRect = { kind: 'fillRect'; layer: 'base'; x: number; @@ -65,7 +63,7 @@ export type CanvasFillRect = { color: RgbaColor; }; -export type CanvasEraseRect = { +type CanvasEraseRect = { kind: 'eraseRect'; layer: 'base'; x: number; @@ -74,7 +72,7 @@ export type CanvasEraseRect = { height: number; }; -export type CanvasObject = CanvasImage | CanvasBaseLine | CanvasMaskLine | CanvasFillRect | CanvasEraseRect; +type CanvasObject = CanvasImage | CanvasBaseLine | CanvasMaskLine | CanvasFillRect | CanvasEraseRect; export type CanvasLayerState = { objects: CanvasObject[]; @@ -85,11 +83,6 @@ export type CanvasLayerState = { }; }; -export type CanvasSession = { - sessionId: string; - boundingBox: IRect; -}; - // type guards export const isCanvasMaskLine = (obj: CanvasObject): obj is CanvasMaskLine => obj.kind === 'line' && obj.layer === 'mask'; @@ -112,7 +105,6 @@ export interface CanvasState { _version: 1; boundingBoxCoordinates: Vector2d; boundingBoxDimensions: Dimensions; - boundingBoxPreviewFill: RgbaColor; boundingBoxScaleMethod: BoundingBoxScaleMethod; brushColor: RgbaColor; brushSize: number; diff --git a/invokeai/frontend/web/src/features/canvas/util/colorToString.ts b/invokeai/frontend/web/src/features/canvas/util/colorToString.ts index 378448dd3f..a4b619c5de 100644 --- a/invokeai/frontend/web/src/features/canvas/util/colorToString.ts +++ b/invokeai/frontend/web/src/features/canvas/util/colorToString.ts @@ -1,16 +1,6 @@ -import type { RgbaColor, RgbColor } from 'react-colorful'; +import type { RgbaColor } from 'react-colorful'; export const rgbaColorToString = (color: RgbaColor): string => { const { r, g, b, a } = color; return `rgba(${r}, ${g}, ${b}, ${a})`; }; - -export const rgbaColorToRgbString = (color: RgbaColor): string => { - const { r, g, b } = color; - return `rgba(${r}, ${g}, ${b})`; -}; - -export const rgbColorToString = (color: RgbColor): string => { - const { r, g, b } = color; - return `rgba(${r}, ${g}, ${b})`; -}; diff --git a/invokeai/frontend/web/src/features/canvas/util/constants.ts b/invokeai/frontend/web/src/features/canvas/util/constants.ts index 2579cc33eb..3291732ecc 100644 --- a/invokeai/frontend/web/src/features/canvas/util/constants.ts +++ b/invokeai/frontend/web/src/features/canvas/util/constants.ts @@ -1,6 +1,3 @@ -// bounding box anchor size -export const TRANSFORMER_ANCHOR_SIZE = 15; - // canvas wheel zoom exponential scale factor export const CANVAS_SCALE_BY = 0.999; diff --git a/invokeai/frontend/web/src/features/canvas/util/roundDimensionsToMultiple.ts b/invokeai/frontend/web/src/features/canvas/util/roundDimensionsToMultiple.ts deleted file mode 100644 index 8e526a54e6..0000000000 --- a/invokeai/frontend/web/src/features/canvas/util/roundDimensionsToMultiple.ts +++ /dev/null @@ -1,11 +0,0 @@ -import { roundToMultiple } from 'common/util/roundDownToMultiple'; -import type { Dimensions } from 'features/canvas/store/canvasTypes'; - -const roundDimensionsToMultiple = (dimensions: Dimensions, multiple: number): Dimensions => { - return { - width: roundToMultiple(dimensions.width, multiple), - height: roundToMultiple(dimensions.height, multiple), - }; -}; - -export default roundDimensionsToMultiple; diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx index 6124823808..42499b015c 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterConfig.tsx @@ -1,5 +1,6 @@ import { Box, Flex, FormControl, FormLabel, Icon, IconButton, Switch } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import ParamControlAdapterModel from 'features/controlAdapters/components/parameters/ParamControlAdapterModel'; import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; import { useControlAdapterType } from 'features/controlAdapters/hooks/useControlAdapterType'; import { @@ -20,7 +21,6 @@ import ControlAdapterShouldAutoConfig from './ControlAdapterShouldAutoConfig'; import ControlNetCanvasImageImports from './imports/ControlNetCanvasImageImports'; import { ParamControlAdapterBeginEnd } from './parameters/ParamControlAdapterBeginEnd'; import ParamControlAdapterControlMode from './parameters/ParamControlAdapterControlMode'; -import ParamControlAdapterModel from './parameters/ParamControlAdapterModel'; import ParamControlAdapterProcessorSelect from './parameters/ParamControlAdapterProcessorSelect'; import ParamControlAdapterResizeMode from './parameters/ParamControlAdapterResizeMode'; import ParamControlAdapterWeight from './parameters/ParamControlAdapterWeight'; diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterProcessorComponent.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterProcessorComponent.tsx index 8679e7aff3..2e37d88e27 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterProcessorComponent.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/ControlAdapterProcessorComponent.tsx @@ -17,7 +17,7 @@ import NormalBaeProcessor from './processors/NormalBaeProcessor'; import PidiProcessor from './processors/PidiProcessor'; import ZoeDepthProcessor from './processors/ZoeDepthProcessor'; -export type Props = { +type Props = { id: string; }; diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterBeginEnd.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterBeginEnd.tsx index a62f7e7d8a..245c182b9f 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterBeginEnd.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterBeginEnd.tsx @@ -1,5 +1,6 @@ import { CompositeRangeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { useControlAdapterBeginEndStepPct } from 'features/controlAdapters/hooks/useControlAdapterBeginEndStepPct'; import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; import { @@ -61,12 +62,10 @@ export const ParamControlAdapterBeginEnd = memo(({ id }: Props) => { } return ( - - {t('controlnet.beginEndStepPercent')} + + + {t('controlnet.beginEndStepPercent')} + generation.model); - const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => { const isEnabled = useControlAdapterIsEnabled(id); const controlAdapterType = useControlAdapterType(id); const model = useControlAdapterModel(id); const dispatch = useAppDispatch(); - const currentBaseModel = useAppSelector((s) => s.generation.model?.base_model); - const mainModel = useAppSelector(selectMainModel); - const { t } = useTranslation(); + const currentBaseModel = useAppSelector((s) => s.generation.model?.base); - const models = useControlAdapterModelEntities(controlAdapterType); + const { data, isLoading } = useControlAdapterModelQuery(controlAdapterType); const _onChange = useCallback( - (model: ControlNetModelConfigEntity | IPAdapterModelConfigEntity | T2IAdapterModelConfigEntity | null) => { + (model: ControlNetModelConfig | IPAdapterModelConfig | T2IAdapterModelConfig | null) => { if (!model) { return; } dispatch( controlAdapterModelChanged({ id, - model: pick(model, 'base_model', 'model_name'), + model: getModelKeyAndBase(model), }) ); }, @@ -55,34 +43,18 @@ const ParamControlAdapterModel = ({ id }: ParamControlAdapterModelProps) => { [controlAdapterType, model] ); - const getIsDisabled = useCallback( - (model: AnyModelConfig): boolean => { - const isCompatible = currentBaseModel === model.base_model; - const hasMainModel = Boolean(currentBaseModel); - return !hasMainModel || !isCompatible; - }, - [currentBaseModel] - ); - - const { options, value, onChange, noOptionsMessage } = useGroupedModelCombobox({ - modelEntities: models, - onChange: _onChange, + const { items, selectedItem, onChange, placeholder } = useModelCustomSelect({ + data, + isLoading, selectedModel, - getIsDisabled, + onChange: _onChange, + modelFilter: (model) => model.base === currentBaseModel, }); return ( - - - - - + + + ); }; diff --git a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterProcessorSelect.tsx b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterProcessorSelect.tsx index 326dcc412a..5257a47128 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterProcessorSelect.tsx +++ b/invokeai/frontend/web/src/features/controlAdapters/components/parameters/ParamControlAdapterProcessorSelect.tsx @@ -2,6 +2,7 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { useControlAdapterIsEnabled } from 'features/controlAdapters/hooks/useControlAdapterIsEnabled'; import { useControlAdapterProcessorNode } from 'features/controlAdapters/hooks/useControlAdapterProcessorNode'; import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants'; @@ -58,7 +59,9 @@ const ParamControlAdapterProcessorSelect = ({ id }: Props) => { } return ( - {t('controlnet.processor')} + + {t('controlnet.processor')} + ); diff --git a/invokeai/frontend/web/src/features/controlAdapters/hooks/useAddControlAdapter.ts b/invokeai/frontend/web/src/features/controlAdapters/hooks/useAddControlAdapter.ts index 51b36968d2..7fd1088767 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/hooks/useAddControlAdapter.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/hooks/useAddControlAdapter.ts @@ -6,14 +6,14 @@ import { useCallback, useMemo } from 'react'; import { useControlAdapterModels } from './useControlAdapterModels'; export const useAddControlAdapter = (type: ControlAdapterType) => { - const baseModel = useAppSelector((s) => s.generation.model?.base_model); + const baseModel = useAppSelector((s) => s.generation.model?.base); const dispatch = useAppDispatch(); const models = useControlAdapterModels(type); const firstModel = useMemo(() => { // prefer to use a model that matches the base model - const firstCompatibleModel = models.filter((m) => (baseModel ? m.base_model === baseModel : true))[0]; + const firstCompatibleModel = models.filter((m) => (baseModel ? m.base === baseModel : true))[0]; if (firstCompatibleModel) { return firstCompatibleModel; diff --git a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapter.ts b/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapter.ts deleted file mode 100644 index 6fdee62922..0000000000 --- a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapter.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; -import { useAppSelector } from 'app/store/storeHooks'; -import { - selectControlAdapterById, - selectControlAdaptersSlice, -} from 'features/controlAdapters/store/controlAdaptersSlice'; -import { useMemo } from 'react'; - -export const useControlAdapter = (id: string) => { - const selector = useMemo( - () => - createMemoizedSelector(selectControlAdaptersSlice, (controlAdapters) => - selectControlAdapterById(controlAdapters, id) - ), - [id] - ); - - const controlAdapter = useAppSelector(selector); - - return controlAdapter; -}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterModelEntities.ts b/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterModelEntities.ts deleted file mode 100644 index 0c8baaacc2..0000000000 --- a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterModelEntities.ts +++ /dev/null @@ -1,23 +0,0 @@ -import type { ControlAdapterType } from 'features/controlAdapters/store/types'; -import { - useGetControlNetModelsQuery, - useGetIPAdapterModelsQuery, - useGetT2IAdapterModelsQuery, -} from 'services/api/endpoints/models'; - -export const useControlAdapterModelEntities = (type?: ControlAdapterType) => { - const { data: controlNetModelsData } = useGetControlNetModelsQuery(); - const { data: t2iAdapterModelsData } = useGetT2IAdapterModelsQuery(); - const { data: ipAdapterModelsData } = useGetIPAdapterModelsQuery(); - - if (type === 'controlnet') { - return controlNetModelsData; - } - if (type === 't2i_adapter') { - return t2iAdapterModelsData; - } - if (type === 'ip_adapter') { - return ipAdapterModelsData; - } - return; -}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterModelQuery.ts b/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterModelQuery.ts new file mode 100644 index 0000000000..1d092497af --- /dev/null +++ b/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterModelQuery.ts @@ -0,0 +1,26 @@ +import type { ControlAdapterType } from 'features/controlAdapters/store/types'; +import { + useGetControlNetModelsQuery, + useGetIPAdapterModelsQuery, + useGetT2IAdapterModelsQuery, +} from 'services/api/endpoints/models'; + +export const useControlAdapterModelQuery = (type: ControlAdapterType) => { + const controlNetModelsQuery = useGetControlNetModelsQuery(); + const t2iAdapterModelsQuery = useGetT2IAdapterModelsQuery(); + const ipAdapterModelsQuery = useGetIPAdapterModelsQuery(); + + if (type === 'controlnet') { + return controlNetModelsQuery; + } + if (type === 't2i_adapter') { + return t2iAdapterModelsQuery; + } + if (type === 'ip_adapter') { + return ipAdapterModelsQuery; + } + + // Assert that the end of the function is not reachable. + const exhaustiveCheck: never = type; + return exhaustiveCheck; +}; diff --git a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterType.ts b/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterType.ts index 4e15dc9e64..fe818f3287 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterType.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/hooks/useControlAdapterType.ts @@ -5,14 +5,16 @@ import { selectControlAdaptersSlice, } from 'features/controlAdapters/store/controlAdaptersSlice'; import { useMemo } from 'react'; +import { assert } from 'tsafe'; export const useControlAdapterType = (id: string) => { const selector = useMemo( () => - createMemoizedSelector( - selectControlAdaptersSlice, - (controlAdapters) => selectControlAdapterById(controlAdapters, id)?.type - ), + createMemoizedSelector(selectControlAdaptersSlice, (controlAdapters) => { + const type = selectControlAdapterById(controlAdapters, id)?.type; + assert(type !== undefined, `Control adapter with id ${id} not found`); + return type; + }), [id] ); diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts index 49b07f16a1..a20e287011 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/controlAdaptersSlice.ts @@ -30,20 +30,18 @@ import type { } from './types'; import { isControlNet, isControlNetOrT2IAdapter, isIPAdapter, isT2IAdapter } from './types'; -export const caAdapter = createEntityAdapter({ +const caAdapter = createEntityAdapter({ selectId: (ca) => ca.id, }); -export const caAdapterSelectors = caAdapter.getSelectors(undefined, getSelectorsOptions); +const caAdapterSelectors = caAdapter.getSelectors(undefined, getSelectorsOptions); export const { selectById: selectControlAdapterById, selectAll: selectControlAdapterAll, - selectEntities: selectControlAdapterEntities, selectIds: selectControlAdapterIds, - selectTotal: selectControlAdapterTotal, } = caAdapterSelectors; -export const initialControlAdaptersState: ControlAdaptersState = caAdapter.getInitialState<{ +const initialControlAdaptersState: ControlAdaptersState = caAdapter.getInitialState<{ _version: 1; pendingControlImages: string[]; }>({ @@ -131,22 +129,6 @@ export const controlAdaptersSlice = createSlice({ return { payload: { id, newId: uuidv4() } }; }, }, - controlAdapterAddedFromImage: { - reducer: ( - state, - action: PayloadAction<{ - id: string; - type: ControlAdapterType; - controlImage: string; - }> - ) => { - const { id, type, controlImage } = action.payload; - caAdapter.addOne(state, buildControlAdapter(id, type, { controlImage })); - }, - prepare: (payload: { type: ControlAdapterType; controlImage: string }) => { - return { payload: { ...payload, id: uuidv4() } }; - }, - }, controlAdapterRemoved: (state, action: PayloadAction<{ id: string }>) => { caAdapter.removeOne(state, action.payload.id); }, @@ -236,7 +218,8 @@ export const controlAdaptersSlice = createSlice({ let processorType: ControlAdapterProcessorType | undefined = undefined; for (const modelSubstring in CONTROLADAPTER_MODEL_DEFAULT_PROCESSORS) { - if (model.model_name.includes(modelSubstring)) { + // TODO(MM2): matching modelSubstring to the model key is no longer a valid way to figure out the default processorType + if (model.key.includes(modelSubstring)) { processorType = CONTROLADAPTER_MODEL_DEFAULT_PROCESSORS[modelSubstring]; break; } @@ -359,7 +342,8 @@ export const controlAdaptersSlice = createSlice({ let processorType: ControlAdapterProcessorType | undefined = undefined; for (const modelSubstring in CONTROLADAPTER_MODEL_DEFAULT_PROCESSORS) { - if (cn.model?.model_name.includes(modelSubstring)) { + // TODO(MM2): matching modelSubstring to the model key is no longer a valid way to figure out the default processorType + if (cn.model?.key.includes(modelSubstring)) { processorType = CONTROLADAPTER_MODEL_DEFAULT_PROCESSORS[modelSubstring]; break; } @@ -405,7 +389,6 @@ export const { controlAdapterAdded, controlAdapterRecalled, controlAdapterDuplicated, - controlAdapterAddedFromImage, controlAdapterRemoved, controlAdapterImageChanged, controlAdapterProcessedImageChanged, @@ -424,16 +407,12 @@ export const { controlAdapterModelCleared, } = controlAdaptersSlice.actions; -export const isAnyControlAdapterAdded = isAnyOf( - controlAdapterAdded, - controlAdapterAddedFromImage, - controlAdapterRecalled -); +export const isAnyControlAdapterAdded = isAnyOf(controlAdapterAdded, controlAdapterRecalled); export const selectControlAdaptersSlice = (state: RootState) => state.controlAdapters; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateControlAdaptersState = (state: any): any => { +const migrateControlAdaptersState = (state: any): any => { if (!('_version' in state)) { state._version = 1; } diff --git a/invokeai/frontend/web/src/features/controlAdapters/store/types.ts b/invokeai/frontend/web/src/features/controlAdapters/store/types.ts index d7fac2a0f8..3665355ecf 100644 --- a/invokeai/frontend/web/src/features/controlAdapters/store/types.ts +++ b/invokeai/frontend/web/src/features/controlAdapters/store/types.ts @@ -4,7 +4,6 @@ import type { ParameterIPAdapterModel, ParameterT2IAdapterModel, } from 'features/parameters/types/parameterSchemas'; -import { isObject } from 'lodash-es'; import type { components } from 'services/api/schema'; import type { CannyImageProcessorInvocation, @@ -81,7 +80,7 @@ export type RequiredDepthAnythingImageProcessorInvocation = O.Required< 'type' | 'model_size' | 'resolution' | 'offload' >; -export const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']); +const zDepthAnythingModelSize = z.enum(['large', 'base', 'small']); export type DepthAnythingModelSize = z.infer; export const isDepthAnythingModelSize = (v: unknown): v is DepthAnythingModelSize => zDepthAnythingModelSize.safeParse(v).success; @@ -186,151 +185,9 @@ export type RequiredControlAdapterProcessorNode = > | { type: 'none' }; -/** - * Type guard for CannyImageProcessorInvocation - */ -export const isCannyImageProcessorInvocation = (obj: unknown): obj is CannyImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'canny_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for ColorMapImageProcessorInvocation - */ -export const isColorMapImageProcessorInvocation = (obj: unknown): obj is ColorMapImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'color_map_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for ContentShuffleImageProcessorInvocation - */ -export const isContentShuffleImageProcessorInvocation = ( - obj: unknown -): obj is ContentShuffleImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'content_shuffle_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for DepthAnythingImageProcessorInvocation - */ -export const isDepthAnythingImageProcessorInvocation = (obj: unknown): obj is DepthAnythingImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'depth_anything_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for HedImageprocessorInvocation - */ -export const isHedImageprocessorInvocation = (obj: unknown): obj is HedImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'hed_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for LineartAnimeImageProcessorInvocation - */ -export const isLineartAnimeImageProcessorInvocation = (obj: unknown): obj is LineartAnimeImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'lineart_anime_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for LineartImageProcessorInvocation - */ -export const isLineartImageProcessorInvocation = (obj: unknown): obj is LineartImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'lineart_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for MediapipeFaceProcessorInvocation - */ -export const isMediapipeFaceProcessorInvocation = (obj: unknown): obj is MediapipeFaceProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'mediapipe_face_processor') { - return true; - } - return false; -}; - -/** - * Type guard for MidasDepthImageProcessorInvocation - */ -export const isMidasDepthImageProcessorInvocation = (obj: unknown): obj is MidasDepthImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'midas_depth_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for MlsdImageProcessorInvocation - */ -export const isMlsdImageProcessorInvocation = (obj: unknown): obj is MlsdImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'mlsd_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for NormalbaeImageProcessorInvocation - */ -export const isNormalbaeImageProcessorInvocation = (obj: unknown): obj is NormalbaeImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'normalbae_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for DWOpenposeImageProcessorInvocation - */ -export const isDWOpenposeImageProcessorInvocation = (obj: unknown): obj is DWOpenposeImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'dw_openpose_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for PidiImageProcessorInvocation - */ -export const isPidiImageProcessorInvocation = (obj: unknown): obj is PidiImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'pidi_image_processor') { - return true; - } - return false; -}; - -/** - * Type guard for ZoeDepthImageProcessorInvocation - */ -export const isZoeDepthImageProcessorInvocation = (obj: unknown): obj is ZoeDepthImageProcessorInvocation => { - if (isObject(obj) && 'type' in obj && obj.type === 'zoe_depth_image_processor') { - return true; - } - return false; -}; - export type ControlMode = NonNullable; -export const zResizeMode = z.enum(['just_resize', 'crop_resize', 'fill_resize', 'just_resize_simple']); +const zResizeMode = z.enum(['just_resize', 'crop_resize', 'fill_resize', 'just_resize_simple']); export type ResizeMode = z.infer; export const isResizeMode = (v: unknown): v is ResizeMode => zResizeMode.safeParse(v).success; diff --git a/invokeai/frontend/web/src/features/dnd/components/DndSortable.tsx b/invokeai/frontend/web/src/features/dnd/components/DndSortable.tsx new file mode 100644 index 0000000000..786e1ce9c2 --- /dev/null +++ b/invokeai/frontend/web/src/features/dnd/components/DndSortable.tsx @@ -0,0 +1,23 @@ +import type { DragEndEvent } from '@dnd-kit/core'; +import { SortableContext, verticalListSortingStrategy } from '@dnd-kit/sortable'; +import type { PropsWithChildren } from 'react'; +import { memo } from 'react'; + +import { DndContextTypesafe } from './DndContextTypesafe'; + +type Props = PropsWithChildren & { + items: string[]; + onDragEnd(event: DragEndEvent): void; +}; + +const DndSortable = (props: Props) => { + return ( + + + {props.children} + + + ); +}; + +export default memo(DndSortable); diff --git a/invokeai/frontend/web/src/features/dnd/types/index.ts b/invokeai/frontend/web/src/features/dnd/types/index.ts index 6e680b4ba9..b2b7820762 100644 --- a/invokeai/frontend/web/src/features/dnd/types/index.ts +++ b/invokeai/frontend/web/src/features/dnd/types/index.ts @@ -18,30 +18,26 @@ type BaseDropData = { id: string; }; -export type CurrentImageDropData = BaseDropData & { +type CurrentImageDropData = BaseDropData & { actionType: 'SET_CURRENT_IMAGE'; }; -export type InitialImageDropData = BaseDropData & { +type InitialImageDropData = BaseDropData & { actionType: 'SET_INITIAL_IMAGE'; }; -export type ControlAdapterDropData = BaseDropData & { +type ControlAdapterDropData = BaseDropData & { actionType: 'SET_CONTROL_ADAPTER_IMAGE'; context: { id: string; }; }; -export type IPAdapterImageDropData = BaseDropData & { - actionType: 'SET_IP_ADAPTER_IMAGE'; -}; - export type CanvasInitialImageDropData = BaseDropData & { actionType: 'SET_CANVAS_INITIAL_IMAGE'; }; -export type NodesImageDropData = BaseDropData & { +type NodesImageDropData = BaseDropData & { actionType: 'SET_NODES_IMAGE'; context: { nodeId: string; @@ -71,7 +67,7 @@ type BaseDragData = { id: string; }; -export type NodeFieldDraggableData = BaseDragData & { +type NodeFieldDraggableData = BaseDragData & { payloadType: 'NODE_FIELD'; payload: { nodeId: string; @@ -114,7 +110,7 @@ export interface TypesafeActive extends Omit { data: React.MutableRefObject; } -export interface TypesafeOver extends Omit { +interface TypesafeOver extends Omit { data: React.MutableRefObject; } @@ -127,10 +123,10 @@ interface DragEvent { } export interface DragStartEvent extends Pick {} -export interface DragMoveEvent extends DragEvent {} -export interface DragOverEvent extends DragMoveEvent {} +interface DragMoveEvent extends DragEvent {} +interface DragOverEvent extends DragMoveEvent {} export interface DragEndEvent extends DragEvent {} -export interface DragCancelEvent extends DragEndEvent {} +interface DragCancelEvent extends DragEndEvent {} export interface DndContextTypesafeProps extends Omit { diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx deleted file mode 100644 index 64901ddb86..0000000000 --- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCombinatorial.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import { FormControl, FormLabel, Switch } from '@invoke-ai/ui-library'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { combinatorialToggled } from 'features/dynamicPrompts/store/dynamicPromptsSlice'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; - -const ParamDynamicPromptsCombinatorial = () => { - const combinatorial = useAppSelector((s) => s.dynamicPrompts.combinatorial); - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const handleChange = useCallback(() => { - dispatch(combinatorialToggled()); - }, [dispatch]); - - return ( - - {t('dynamicPrompts.combinatorial')} - - - ); -}; - -export default memo(ParamDynamicPromptsCombinatorial); diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts index 7bb0b29659..d73db3e810 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts +++ b/invokeai/frontend/web/src/features/dynamicPrompts/store/dynamicPromptsSlice.ts @@ -3,11 +3,11 @@ import { createSlice } from '@reduxjs/toolkit'; import type { PersistConfig, RootState } from 'app/store/store'; import { z } from 'zod'; -export const zSeedBehaviour = z.enum(['PER_ITERATION', 'PER_PROMPT']); -export type SeedBehaviour = z.infer; +const zSeedBehaviour = z.enum(['PER_ITERATION', 'PER_PROMPT']); +type SeedBehaviour = z.infer; export const isSeedBehaviour = (v: unknown): v is SeedBehaviour => zSeedBehaviour.safeParse(v).success; -export interface DynamicPromptsState { +interface DynamicPromptsState { _version: 1; maxPrompts: number; combinatorial: boolean; @@ -18,7 +18,7 @@ export interface DynamicPromptsState { seedBehaviour: SeedBehaviour; } -export const initialDynamicPromptsState: DynamicPromptsState = { +const initialDynamicPromptsState: DynamicPromptsState = { _version: 1, maxPrompts: 100, combinatorial: true, @@ -29,11 +29,9 @@ export const initialDynamicPromptsState: DynamicPromptsState = { seedBehaviour: 'PER_ITERATION', }; -const initialState: DynamicPromptsState = initialDynamicPromptsState; - export const dynamicPromptsSlice = createSlice({ name: 'dynamicPrompts', - initialState, + initialState: initialDynamicPromptsState, reducers: { maxPromptsChanged: (state, action: PayloadAction) => { state.maxPrompts = action.payload; @@ -77,7 +75,7 @@ export const { export const selectDynamicPromptsSlice = (state: RootState) => state.dynamicPrompts; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateDynamicPromptsState = (state: any): any => { +const migrateDynamicPromptsState = (state: any): any => { if (!('_version' in state)) { state._version = 1; } diff --git a/invokeai/frontend/web/src/features/embedding/EmbeddingSelect.tsx b/invokeai/frontend/web/src/features/embedding/EmbeddingSelect.tsx index ffe9d63360..848cb04386 100644 --- a/invokeai/frontend/web/src/features/embedding/EmbeddingSelect.tsx +++ b/invokeai/frontend/web/src/features/embedding/EmbeddingSelect.tsx @@ -6,19 +6,19 @@ import type { EmbeddingSelectProps } from 'features/embedding/types'; import { t } from 'i18next'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; -import type { TextualInversionModelConfigEntity } from 'services/api/endpoints/models'; import { useGetTextualInversionModelsQuery } from 'services/api/endpoints/models'; +import type { TextualInversionModelConfig } from 'services/api/types'; const noOptionsMessage = () => t('embedding.noMatchingEmbedding'); export const EmbeddingSelect = memo(({ onSelect, onClose }: EmbeddingSelectProps) => { const { t } = useTranslation(); - const currentBaseModel = useAppSelector((s) => s.generation.model?.base_model); + const currentBaseModel = useAppSelector((s) => s.generation.model?.base); const getIsDisabled = useCallback( - (embedding: TextualInversionModelConfigEntity): boolean => { - const isCompatible = currentBaseModel === embedding.base_model; + (embedding: TextualInversionModelConfig): boolean => { + const isCompatible = currentBaseModel === embedding.base; const hasMainModel = Boolean(currentBaseModel); return !hasMainModel || !isCompatible; }, @@ -27,11 +27,11 @@ export const EmbeddingSelect = memo(({ onSelect, onClose }: EmbeddingSelectProps const { data, isLoading } = useGetTextualInversionModelsQuery(); const _onChange = useCallback( - (embedding: TextualInversionModelConfigEntity | null) => { + (embedding: TextualInversionModelConfig | null) => { if (!embedding) { return; } - onSelect(embedding.model_name); + onSelect(embedding.name); }, [onSelect] ); diff --git a/invokeai/frontend/web/src/features/embedding/usePrompt.ts b/invokeai/frontend/web/src/features/embedding/usePrompt.ts index c3a708e6ce..b5f245d7f2 100644 --- a/invokeai/frontend/web/src/features/embedding/usePrompt.ts +++ b/invokeai/frontend/web/src/features/embedding/usePrompt.ts @@ -4,7 +4,7 @@ import type { ChangeEventHandler, KeyboardEventHandler, RefObject } from 'react' import { useCallback } from 'react'; import { flushSync } from 'react-dom'; -export type UseInsertEmbeddingArg = { +type UseInsertEmbeddingArg = { prompt: string; textareaRef: RefObject; onChange: (v: string) => void; diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx index 490e8eac9e..ad6c37532e 100644 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/Boards/BoardContextMenu.tsx @@ -5,7 +5,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { autoAddBoardIdChanged, selectGallerySlice } from 'features/gallery/store/gallerySlice'; import type { BoardId } from 'features/gallery/store/types'; import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; -import { addToast } from 'features/system/store/systemSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiDownloadBold, PiPlusBold } from 'react-icons/pi'; @@ -41,35 +40,9 @@ const BoardContextMenu = ({ board, board_id, setBoardToDelete, children }: Props dispatch(autoAddBoardIdChanged(board_id)); }, [board_id, dispatch]); - const handleBulkDownload = useCallback(async () => { - try { - const response = await bulkDownload({ - image_names: [], - board_id: board_id, - }).unwrap(); - - dispatch( - addToast({ - title: t('gallery.preparingDownload'), - status: 'success', - ...(response.response - ? { - description: response.response, - duration: null, - isClosable: true, - } - : {}), - }) - ); - } catch { - dispatch( - addToast({ - title: t('gallery.preparingDownloadFailed'), - status: 'error', - }) - ); - } - }, [t, board_id, bulkDownload, dispatch]); + const handleBulkDownload = useCallback(() => { + bulkDownload({ image_names: [], board_id: board_id }); + }, [board_id, bulkDownload]); const renderMenuFunc = useCallback( () => ( diff --git a/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardContextMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardContextMenuItems.tsx deleted file mode 100644 index 4c1ae0eb83..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/Boards/NoBoardContextMenuItems.tsx +++ /dev/null @@ -1,7 +0,0 @@ -import { memo } from 'react'; - -const NoBoardContextMenuItems = () => { - return <>; -}; - -export default memo(NoBoardContextMenuItems); diff --git a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx index e25324b9ea..896b6efef7 100644 --- a/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/CurrentImage/CurrentImageButtons.tsx @@ -7,11 +7,12 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { DeleteImageButton } from 'features/deleteImageModal/components/DeleteImageButton'; import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice'; import SingleSelectionMenuItems from 'features/gallery/components/ImageContextMenu/SingleSelectionMenuItems'; +import { useImageActions } from 'features/gallery/hooks/useImageActions'; import { sentImageToImg2Img } from 'features/gallery/store/actions'; import { selectLastSelectedImage } from 'features/gallery/store/gallerySelectors'; import { selectGallerySlice } from 'features/gallery/store/gallerySlice'; +import { parseAndRecallImageDimensions } from 'features/metadata/util/handlers'; import ParamUpscalePopover from 'features/parameters/components/Upscale/ParamUpscaleSettings'; -import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters'; import { initialImageSelected } from 'features/parameters/store/actions'; import { useIsQueueMutationInProgress } from 'features/queue/hooks/useIsQueueMutationInProgress'; import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; @@ -33,7 +34,6 @@ import { PiRulerBold, } from 'react-icons/pi'; import { useGetImageDTOQuery } from 'services/api/endpoints/images'; -import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata'; const selectShouldDisableToolbarButtons = createSelector( selectSystemSlice, @@ -58,11 +58,10 @@ const CurrentImageButtons = () => { const toaster = useAppToaster(); const { t } = useTranslation(); - const { recallBothPrompts, recallSeed, recallWidthAndHeight, recallAllParameters } = useRecallParameters(); - const { currentData: imageDTO } = useGetImageDTOQuery(lastSelectedImage?.image_name ?? skipToken); - const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata(lastSelectedImage?.image_name); + const { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, isLoadingMetadata } = + useImageActions(lastSelectedImage?.image_name); const { getAndLoadEmbeddedWorkflow, getAndLoadEmbeddedWorkflowResult } = useGetAndLoadEmbeddedWorkflow({}); @@ -74,51 +73,16 @@ const CurrentImageButtons = () => { }, [getAndLoadEmbeddedWorkflow, lastSelectedImage]); useHotkeys('w', handleLoadWorkflow, [lastSelectedImage]); - - const handleClickUseAllParameters = useCallback(() => { - recallAllParameters(metadata); - }, [metadata, recallAllParameters]); - - useHotkeys('a', handleClickUseAllParameters, [metadata]); - - const handleUseSeed = useCallback(() => { - recallSeed(metadata?.seed); - }, [metadata?.seed, recallSeed]); - - useHotkeys('s', handleUseSeed, [metadata]); - - const handleUsePrompt = useCallback(() => { - recallBothPrompts( - metadata?.positive_prompt, - metadata?.negative_prompt, - metadata?.positive_style_prompt, - metadata?.negative_style_prompt - ); - }, [ - metadata?.negative_prompt, - metadata?.positive_prompt, - metadata?.positive_style_prompt, - metadata?.negative_style_prompt, - recallBothPrompts, - ]); - - useHotkeys('p', handleUsePrompt, [metadata]); - - const handleRemixImage = useCallback(() => { - // Recalls all metadata parameters except seed - recallAllParameters({ - ...metadata, - seed: undefined, - }); - }, [metadata, recallAllParameters]); - - useHotkeys('r', handleRemixImage, [metadata]); + useHotkeys('a', recallAll, [recallAll]); + useHotkeys('s', recallSeed, [recallSeed]); + useHotkeys('p', recallPrompts, [recallPrompts]); + useHotkeys('r', remix, [remix]); const handleUseSize = useCallback(() => { - recallWidthAndHeight(metadata?.width, metadata?.height); - }, [metadata?.width, metadata?.height, recallWidthAndHeight]); + parseAndRecallImageDimensions(lastSelectedImage); + }, [lastSelectedImage]); - useHotkeys('d', handleUseSize, [metadata]); + useHotkeys('d', handleUseSize, [handleUseSize]); const handleSendToImageToImage = useCallback(() => { dispatch(sentImageToImg2Img()); @@ -216,36 +180,30 @@ const CurrentImageButtons = () => { icon={} tooltip={`${t('parameters.remixImage')} (R)`} aria-label={`${t('parameters.remixImage')} (R)`} - isDisabled={!metadata?.positive_prompt} - onClick={handleRemixImage} + isDisabled={!hasMetadata} + onClick={remix} /> } tooltip={`${t('parameters.usePrompt')} (P)`} aria-label={`${t('parameters.usePrompt')} (P)`} - isDisabled={!metadata?.positive_prompt} - onClick={handleUsePrompt} + isDisabled={!hasPrompts} + onClick={recallPrompts} /> } tooltip={`${t('parameters.useSeed')} (S)`} aria-label={`${t('parameters.useSeed')} (S)`} - isDisabled={metadata?.seed === null || metadata?.seed === undefined} - onClick={handleUseSeed} + isDisabled={!hasSeed} + onClick={recallSeed} /> } tooltip={`${t('parameters.useSize')} (D)`} aria-label={`${t('parameters.useSize')} (D)`} - isDisabled={ - metadata?.height === null || - metadata?.height === undefined || - metadata?.width === null || - metadata?.width === undefined - } onClick={handleUseSize} /> { icon={} tooltip={`${t('parameters.useAll')} (A)`} aria-label={`${t('parameters.useAll')} (A)`} - isDisabled={!metadata} - onClick={handleClickUseAllParameters} + isDisabled={!hasMetadata} + onClick={recallAll} /> diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/MultipleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/MultipleSelectionMenuItems.tsx index e8f71c02f3..7b1fa73472 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/MultipleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/MultipleSelectionMenuItems.tsx @@ -5,7 +5,6 @@ import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { imagesToChangeSelected, isModalOpenChanged } from 'features/changeBoardModal/store/slice'; import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice'; import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; -import { addToast } from 'features/system/store/systemSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; import { PiDownloadSimpleBold, PiFoldersBold, PiStarBold, PiStarFill, PiTrashSimpleBold } from 'react-icons/pi'; @@ -44,34 +43,9 @@ const MultipleSelectionMenuItems = () => { unstarImages({ imageDTOs: selection }); }, [unstarImages, selection]); - const handleBulkDownload = useCallback(async () => { - try { - const response = await bulkDownload({ - image_names: selection.map((img) => img.image_name), - }).unwrap(); - - dispatch( - addToast({ - title: t('gallery.preparingDownload'), - status: 'success', - ...(response.response - ? { - description: response.response, - duration: null, - isClosable: true, - } - : {}), - }) - ); - } catch { - dispatch( - addToast({ - title: t('gallery.preparingDownloadFailed'), - status: 'error', - }) - ); - } - }, [t, selection, bulkDownload, dispatch]); + const handleBulkDownload = useCallback(() => { + bulkDownload({ image_names: selection.map((img) => img.image_name) }); + }, [selection, bulkDownload]); const areAllStarred = useMemo(() => { return selection.every((img) => img.starred); diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx index f4d9c7a840..6f49f4afcc 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageContextMenu/SingleSelectionMenuItems.tsx @@ -8,8 +8,8 @@ import { useDownloadImage } from 'common/hooks/useDownloadImage'; import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice'; import { imagesToChangeSelected, isModalOpenChanged } from 'features/changeBoardModal/store/slice'; import { imagesToDeleteSelected } from 'features/deleteImageModal/store/slice'; +import { useImageActions } from 'features/gallery/hooks/useImageActions'; import { sentImageToCanvas, sentImageToImg2Img } from 'features/gallery/store/actions'; -import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters'; import { initialImageSelected } from 'features/parameters/store/actions'; import { selectOptimalDimension } from 'features/parameters/store/generationSlice'; import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; @@ -33,7 +33,6 @@ import { PiTrashSimpleBold, } from 'react-icons/pi'; import { useStarImagesMutation, useUnstarImagesMutation } from 'services/api/endpoints/images'; -import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata'; import type { ImageDTO } from 'services/api/types'; type SingleSelectionMenuItemsProps = { @@ -49,7 +48,9 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { const isCanvasEnabled = useFeatureStatus('unifiedCanvas').isFeatureEnabled; const customStarUi = useStore($customStarUI); const { downloadImage } = useDownloadImage(); - const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata(imageDTO?.image_name); + + const { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, isLoadingMetadata } = + useImageActions(imageDTO?.image_name); const { getAndLoadEmbeddedWorkflow, getAndLoadEmbeddedWorkflowResult } = useGetAndLoadEmbeddedWorkflow({}); @@ -69,28 +70,6 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { dispatch(imagesToDeleteSelected([imageDTO])); }, [dispatch, imageDTO]); - const { recallBothPrompts, recallSeed, recallAllParameters } = useRecallParameters(); - - // Recall parameters handlers - const handleRecallPrompt = useCallback(() => { - recallBothPrompts( - metadata?.positive_prompt, - metadata?.negative_prompt, - metadata?.positive_style_prompt, - metadata?.negative_style_prompt - ); - }, [ - metadata?.negative_prompt, - metadata?.positive_prompt, - metadata?.positive_style_prompt, - metadata?.negative_style_prompt, - recallBothPrompts, - ]); - - const handleRecallSeed = useCallback(() => { - recallSeed(metadata?.seed); - }, [metadata?.seed, recallSeed]); - const handleSendToImageToImage = useCallback(() => { dispatch(sentImageToImg2Img()); dispatch(initialImageSelected(imageDTO)); @@ -111,18 +90,6 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { }); }, [dispatch, imageDTO, t, toaster, optimalDimension]); - const handleUseAllParameters = useCallback(() => { - recallAllParameters(metadata); - }, [metadata, recallAllParameters]); - - const handleRemixImage = useCallback(() => { - // Recalls all metadata parameters except seed - recallAllParameters({ - ...metadata, - seed: undefined, - }); - }, [metadata, recallAllParameters]); - const handleChangeBoard = useCallback(() => { dispatch(imagesToChangeSelected([imageDTO])); dispatch(isModalOpenChanged(true)); @@ -171,33 +138,29 @@ const SingleSelectionMenuItems = (props: SingleSelectionMenuItemsProps) => { : } - onClickCapture={handleRemixImage} - isDisabled={ - isLoadingMetadata || (metadata?.positive_prompt === undefined && metadata?.negative_prompt === undefined) - } + onClickCapture={remix} + isDisabled={isLoadingMetadata || !hasMetadata} > {t('parameters.remixImage')} : } - onClickCapture={handleRecallPrompt} - isDisabled={ - isLoadingMetadata || (metadata?.positive_prompt === undefined && metadata?.negative_prompt === undefined) - } + onClickCapture={recallPrompts} + isDisabled={isLoadingMetadata || !hasPrompts} > {t('parameters.usePrompt')} : } - onClickCapture={handleRecallSeed} - isDisabled={isLoadingMetadata || metadata?.seed === undefined} + onClickCapture={recallSeed} + isDisabled={isLoadingMetadata || !hasSeed} > {t('parameters.useSeed')} : } - onClickCapture={handleUseAllParameters} - isDisabled={isLoadingMetadata || !metadata} + onClickCapture={recallAll} + isDisabled={isLoadingMetadata || !hasMetadata} > {t('parameters.useAll')} diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/types.ts b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/types.ts index e43a55270e..1b7286d6a0 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageGrid/types.ts +++ b/invokeai/frontend/web/src/features/gallery/components/ImageGrid/types.ts @@ -2,7 +2,7 @@ import { atom } from 'nanostores'; import type { RefObject } from 'react'; import type { ListRange, VirtuosoGridHandle } from 'react-virtuoso'; -export type VirtuosoGridRefs = { +type VirtuosoGridRefs = { virtuosoRef?: RefObject; rootRef?: RefObject; virtuosoRangeRef?: RefObject; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx index e9a1461186..cfa679c805 100644 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx +++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx @@ -1,304 +1,54 @@ -import type { - ControlNetMetadataItem, - CoreMetadata, - IPAdapterMetadataItem, - LoRAMetadataItem, - T2IAdapterMetadataItem, -} from 'features/nodes/types/metadata'; -import { useRecallParameters } from 'features/parameters/hooks/useRecallParameters'; -import { - isParameterControlNetModel, - isParameterLoRAModel, - isParameterT2IAdapterModel, -} from 'features/parameters/types/parameterSchemas'; -import { memo, useCallback, useMemo } from 'react'; -import { useTranslation } from 'react-i18next'; - -import ImageMetadataItem from './ImageMetadataItem'; +import { MetadataControlNets } from 'features/metadata/components/MetadataControlNets'; +import { MetadataIPAdapters } from 'features/metadata/components/MetadataIPAdapters'; +import { MetadataItem } from 'features/metadata/components/MetadataItem'; +import { MetadataLoRAs } from 'features/metadata/components/MetadataLoRAs'; +import { MetadataT2IAdapters } from 'features/metadata/components/MetadataT2IAdapters'; +import { handlers } from 'features/metadata/util/handlers'; +import { memo } from 'react'; type Props = { - metadata?: CoreMetadata; + metadata?: unknown; }; const ImageMetadataActions = (props: Props) => { const { metadata } = props; - const { t } = useTranslation(); - - const { - recallPositivePrompt, - recallNegativePrompt, - recallSeed, - recallCfgScale, - recallCfgRescaleMultiplier, - recallModel, - recallScheduler, - recallVaeModel, - recallSteps, - recallWidth, - recallHeight, - recallStrength, - recallHrfEnabled, - recallHrfStrength, - recallHrfMethod, - recallLoRA, - recallControlNet, - recallIPAdapter, - recallT2IAdapter, - recallSDXLPositiveStylePrompt, - recallSDXLNegativeStylePrompt, - } = useRecallParameters(); - - const handleRecallPositivePrompt = useCallback(() => { - recallPositivePrompt(metadata?.positive_prompt); - }, [metadata?.positive_prompt, recallPositivePrompt]); - - const handleRecallNegativePrompt = useCallback(() => { - recallNegativePrompt(metadata?.negative_prompt); - }, [metadata?.negative_prompt, recallNegativePrompt]); - - const handleRecallSDXLPositiveStylePrompt = useCallback(() => { - recallSDXLPositiveStylePrompt(metadata?.positive_style_prompt); - }, [metadata?.positive_style_prompt, recallSDXLPositiveStylePrompt]); - - const handleRecallSDXLNegativeStylePrompt = useCallback(() => { - recallSDXLNegativeStylePrompt(metadata?.negative__style_prompt); - }, [metadata?.negative__style_prompt, recallSDXLNegativeStylePrompt]); - - const handleRecallSeed = useCallback(() => { - recallSeed(metadata?.seed); - }, [metadata?.seed, recallSeed]); - - const handleRecallModel = useCallback(() => { - recallModel(metadata?.model); - }, [metadata?.model, recallModel]); - - const handleRecallWidth = useCallback(() => { - recallWidth(metadata?.width); - }, [metadata?.width, recallWidth]); - - const handleRecallHeight = useCallback(() => { - recallHeight(metadata?.height); - }, [metadata?.height, recallHeight]); - - const handleRecallScheduler = useCallback(() => { - recallScheduler(metadata?.scheduler); - }, [metadata?.scheduler, recallScheduler]); - - const handleRecallVaeModel = useCallback(() => { - recallVaeModel(metadata?.vae); - }, [metadata?.vae, recallVaeModel]); - - const handleRecallSteps = useCallback(() => { - recallSteps(metadata?.steps); - }, [metadata?.steps, recallSteps]); - - const handleRecallCfgScale = useCallback(() => { - recallCfgScale(metadata?.cfg_scale); - }, [metadata?.cfg_scale, recallCfgScale]); - - const handleRecallCfgRescaleMultiplier = useCallback(() => { - recallCfgRescaleMultiplier(metadata?.cfg_rescale_multiplier); - }, [metadata?.cfg_rescale_multiplier, recallCfgRescaleMultiplier]); - - const handleRecallStrength = useCallback(() => { - recallStrength(metadata?.strength); - }, [metadata?.strength, recallStrength]); - - const handleRecallHrfEnabled = useCallback(() => { - recallHrfEnabled(metadata?.hrf_enabled); - }, [metadata?.hrf_enabled, recallHrfEnabled]); - - const handleRecallHrfStrength = useCallback(() => { - recallHrfStrength(metadata?.hrf_strength); - }, [metadata?.hrf_strength, recallHrfStrength]); - - const handleRecallHrfMethod = useCallback(() => { - recallHrfMethod(metadata?.hrf_method); - }, [metadata?.hrf_method, recallHrfMethod]); - - const handleRecallLoRA = useCallback( - (lora: LoRAMetadataItem) => { - recallLoRA(lora); - }, - [recallLoRA] - ); - - const handleRecallControlNet = useCallback( - (controlnet: ControlNetMetadataItem) => { - recallControlNet(controlnet); - }, - [recallControlNet] - ); - - const handleRecallIPAdapter = useCallback( - (ipAdapter: IPAdapterMetadataItem) => { - recallIPAdapter(ipAdapter); - }, - [recallIPAdapter] - ); - - const handleRecallT2IAdapter = useCallback( - (ipAdapter: T2IAdapterMetadataItem) => { - recallT2IAdapter(ipAdapter); - }, - [recallT2IAdapter] - ); - - const validControlNets: ControlNetMetadataItem[] = useMemo(() => { - return metadata?.controlnets - ? metadata.controlnets.filter((controlnet) => isParameterControlNetModel(controlnet.control_model)) - : []; - }, [metadata?.controlnets]); - - const validIPAdapters: IPAdapterMetadataItem[] = useMemo(() => { - return metadata?.ipAdapters - ? metadata.ipAdapters.filter((ipAdapter) => isParameterControlNetModel(ipAdapter.ip_adapter_model)) - : []; - }, [metadata?.ipAdapters]); - - const validT2IAdapters: T2IAdapterMetadataItem[] = useMemo(() => { - return metadata?.t2iAdapters - ? metadata.t2iAdapters.filter((t2iAdapter) => isParameterT2IAdapterModel(t2iAdapter.t2i_adapter_model)) - : []; - }, [metadata?.t2iAdapters]); - if (!metadata || Object.keys(metadata).length === 0) { return null; } return ( <> - {metadata.created_by && } - {metadata.generation_mode && ( - - )} - {metadata.positive_prompt && ( - - )} - {metadata.negative_prompt && ( - - )} - {metadata.positive_style_prompt && ( - - )} - {metadata.negative_style_prompt && ( - - )} - {metadata.seed !== undefined && metadata.seed !== null && ( - - )} - {metadata.model !== undefined && metadata.model !== null && metadata.model.model_name && ( - - )} - {metadata.width && ( - - )} - {metadata.height && ( - - )} - {metadata.scheduler && ( - - )} - - {metadata.steps && ( - - )} - {metadata.cfg_scale !== undefined && metadata.cfg_scale !== null && ( - - )} - {metadata.cfg_rescale_multiplier !== undefined && metadata.cfg_rescale_multiplier !== null && ( - - )} - {metadata.strength && ( - - )} - {metadata.hrf_enabled && ( - - )} - {metadata.hrf_enabled && metadata.hrf_strength && ( - - )} - {metadata.hrf_enabled && metadata.hrf_method && ( - - )} - {metadata.loras && - metadata.loras.map((lora, index) => { - if (isParameterLoRAModel(lora.lora)) { - return ( - - ); - } - })} - {validControlNets.map((controlnet, index) => ( - - ))} - {validIPAdapters.map((ipAdapter, index) => ( - - ))} - {validT2IAdapters.map((t2iAdapter, index) => ( - - ))} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ); }; diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataItem.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataItem.tsx deleted file mode 100644 index c6dbd16269..0000000000 --- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataItem.tsx +++ /dev/null @@ -1,70 +0,0 @@ -import { ExternalLink, Flex, IconButton, Text, Tooltip } from '@invoke-ai/ui-library'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { IoArrowUndoCircleOutline } from 'react-icons/io5'; -import { PiCopyBold } from 'react-icons/pi'; - -type MetadataItemProps = { - isLink?: boolean; - label: string; - onClick?: () => void; - value: number | string | boolean; - labelPosition?: string; - withCopy?: boolean; -}; - -/** - * Component to display an individual metadata item or parameter. - */ -const ImageMetadataItem = ({ label, value, onClick, isLink, labelPosition, withCopy = false }: MetadataItemProps) => { - const { t } = useTranslation(); - - const handleCopy = useCallback(() => navigator.clipboard.writeText(value.toString()), [value]); - - if (!value) { - return null; - } - - return ( - - {onClick && ( - - } - size="xs" - variant="ghost" - fontSize={20} - onClick={onClick} - /> - - )} - {withCopy && ( - - } - size="xs" - variant="ghost" - fontSize={14} - onClick={handleCopy} - /> - - )} - - - {label}: - - {isLink ? ( - - ) : ( - - {value.toString()} - - )} - - - ); -}; - -export default memo(ImageMetadataItem); diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts index b6fdb183e6..1464c23285 100644 --- a/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts +++ b/invokeai/frontend/web/src/features/gallery/hooks/useGalleryNavigation.ts @@ -105,7 +105,7 @@ const getImageFuncs = { down: getDownImage, }; -export type UseGalleryNavigationReturn = { +type UseGalleryNavigationReturn = { handleLeftImage: () => void; handleRightImage: () => void; handleUpImage: () => void; diff --git a/invokeai/frontend/web/src/features/gallery/hooks/useImageActions.ts b/invokeai/frontend/web/src/features/gallery/hooks/useImageActions.ts new file mode 100644 index 0000000000..c3ae0cea5f --- /dev/null +++ b/invokeai/frontend/web/src/features/gallery/hooks/useImageActions.ts @@ -0,0 +1,62 @@ +import { handlers, parseAndRecallAllMetadata, parseAndRecallPrompts } from 'features/metadata/util/handlers'; +import { useCallback, useEffect, useState } from 'react'; +import { useDebouncedMetadata } from 'services/api/hooks/useDebouncedMetadata'; + +export const useImageActions = (image_name?: string) => { + const { metadata, isLoading: isLoadingMetadata } = useDebouncedMetadata(image_name); + const [hasMetadata, setHasMetadata] = useState(false); + const [hasSeed, setHasSeed] = useState(false); + const [hasPrompts, setHasPrompts] = useState(false); + + useEffect(() => { + const parseMetadata = async () => { + if (metadata) { + setHasMetadata(true); + try { + await handlers.seed.parse(metadata); + setHasSeed(true); + } catch { + setHasSeed(false); + } + + const promptParseResults = await Promise.allSettled([ + handlers.positivePrompt.parse(metadata), + handlers.negativePrompt.parse(metadata), + handlers.sdxlPositiveStylePrompt.parse(metadata), + handlers.sdxlNegativeStylePrompt.parse(metadata), + ]); + if (promptParseResults.some((result) => result.status === 'fulfilled')) { + setHasPrompts(true); + } else { + setHasPrompts(false); + } + } else { + setHasMetadata(false); + setHasSeed(false); + setHasPrompts(false); + } + }; + parseMetadata(); + }, [metadata]); + + const recallAll = useCallback(() => { + parseAndRecallAllMetadata(metadata); + }, [metadata]); + + const remix = useCallback(() => { + // Recalls all metadata parameters except seed + parseAndRecallAllMetadata(metadata, ['seed']); + }, [metadata]); + + const recallSeed = useCallback(() => { + handlers.seed.parse(metadata).then((seed) => { + handlers.seed.recall && handlers.seed.recall(seed); + }); + }, [metadata]); + + const recallPrompts = useCallback(() => { + parseAndRecallPrompts(metadata); + }, [metadata]); + + return { recallAll, remix, recallSeed, recallPrompts, hasMetadata, hasSeed, hasPrompts, isLoadingMetadata }; +}; diff --git a/invokeai/frontend/web/src/features/gallery/store/actions.ts b/invokeai/frontend/web/src/features/gallery/store/actions.ts index e62a350756..0b42890a26 100644 --- a/invokeai/frontend/web/src/features/gallery/store/actions.ts +++ b/invokeai/frontend/web/src/features/gallery/store/actions.ts @@ -1,15 +1,4 @@ import { createAction } from '@reduxjs/toolkit'; -import type { ImageUsage } from 'features/deleteImageModal/store/types'; -import type { BoardDTO } from 'services/api/types'; - -export type RequestedBoardImagesDeletionArg = { - board: BoardDTO; - imagesUsage: ImageUsage; -}; - -export const requestedBoardImagesDeletion = createAction( - 'gallery/requestedBoardImagesDeletion' -); export const sentImageToCanvas = createAction('gallery/sentImageToCanvas'); diff --git a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts index f351d91339..b16efc02b3 100644 --- a/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts +++ b/invokeai/frontend/web/src/features/gallery/store/gallerySlice.ts @@ -9,7 +9,7 @@ import type { ImageDTO } from 'services/api/types'; import type { BoardId, GalleryState, GalleryView } from './types'; import { IMAGE_LIMIT, INITIAL_IMAGE_LIMIT } from './types'; -export const initialGalleryState: GalleryState = { +const initialGalleryState: GalleryState = { selection: [], shouldAutoSwitch: true, autoAssignBoardOnClick: true, @@ -117,7 +117,7 @@ const isAnyBoardDeleted = isAnyOf( export const selectGallerySlice = (state: RootState) => state.gallery; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateGalleryState = (state: any): any => { +const migrateGalleryState = (state: any): any => { if (!('_version' in state)) { state._version = 1; } diff --git a/invokeai/frontend/web/src/features/hrf/components/ParamHrfMethod.tsx b/invokeai/frontend/web/src/features/hrf/components/ParamHrfMethod.tsx index 8a94544233..65f65240fc 100644 --- a/invokeai/frontend/web/src/features/hrf/components/ParamHrfMethod.tsx +++ b/invokeai/frontend/web/src/features/hrf/components/ParamHrfMethod.tsx @@ -1,6 +1,7 @@ import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { setHrfMethod } from 'features/hrf/store/hrfSlice'; import { isParameterHRFMethod } from 'features/parameters/types/parameterSchemas'; import { memo, useCallback, useMemo } from 'react'; @@ -30,7 +31,9 @@ const ParamHrfMethodSelect = () => { return ( - {t('hrf.upscaleMethod')} + + {t('hrf.upscaleMethod')} + ); diff --git a/invokeai/frontend/web/src/features/hrf/components/ParamHrfStrength.tsx b/invokeai/frontend/web/src/features/hrf/components/ParamHrfStrength.tsx index c663989b08..3cb9f7e528 100644 --- a/invokeai/frontend/web/src/features/hrf/components/ParamHrfStrength.tsx +++ b/invokeai/frontend/web/src/features/hrf/components/ParamHrfStrength.tsx @@ -1,5 +1,6 @@ import { CompositeNumberInput, CompositeSlider, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { setHrfStrength } from 'features/hrf/store/hrfSlice'; import { memo, useCallback } from 'react'; import { useTranslation } from 'react-i18next'; @@ -25,7 +26,9 @@ const ParamHrfStrength = () => { return ( - {t('parameters.denoisingStrength')} + + {`${t('parameters.denoisingStrength')}`} + { return ( - {t('hrf.enableHrf')} + + {t('hrf.enableHrf')} + ); diff --git a/invokeai/frontend/web/src/features/hrf/store/hrfSlice.ts b/invokeai/frontend/web/src/features/hrf/store/hrfSlice.ts index 4c5769550b..46a562d34f 100644 --- a/invokeai/frontend/web/src/features/hrf/store/hrfSlice.ts +++ b/invokeai/frontend/web/src/features/hrf/store/hrfSlice.ts @@ -3,25 +3,23 @@ import { createSlice } from '@reduxjs/toolkit'; import type { PersistConfig, RootState } from 'app/store/store'; import type { ParameterHRFMethod, ParameterStrength } from 'features/parameters/types/parameterSchemas'; -export interface HRFState { +interface HRFState { _version: 1; hrfEnabled: boolean; hrfStrength: ParameterStrength; hrfMethod: ParameterHRFMethod; } -export const initialHRFState: HRFState = { +const initialHRFState: HRFState = { _version: 1, hrfStrength: 0.45, hrfEnabled: false, hrfMethod: 'ESRGAN', }; -const initialState: HRFState = initialHRFState; - export const hrfSlice = createSlice({ name: 'hrf', - initialState, + initialState: initialHRFState, reducers: { setHrfStrength: (state, action: PayloadAction) => { state.hrfStrength = action.payload; @@ -40,7 +38,7 @@ export const { setHrfEnabled, setHrfStrength, setHrfMethod } = hrfSlice.actions; export const selectHrfSlice = (state: RootState) => state.hrf; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateHRFState = (state: any): any => { +const migrateHRFState = (state: any): any => { if (!('_version' in state)) { state._version = 1; } diff --git a/invokeai/frontend/web/src/features/lora/components/LoRACard.tsx b/invokeai/frontend/web/src/features/lora/components/LoRACard.tsx index caedde875a..ddcdd58e75 100644 --- a/invokeai/frontend/web/src/features/lora/components/LoRACard.tsx +++ b/invokeai/frontend/web/src/features/lora/components/LoRACard.tsx @@ -10,40 +10,43 @@ import { Text, } from '@invoke-ai/ui-library'; import { useAppDispatch } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import type { LoRA } from 'features/lora/store/loraSlice'; import { loraIsEnabledChanged, loraRemoved, loraWeightChanged } from 'features/lora/store/loraSlice'; import { memo, useCallback } from 'react'; import { PiTrashSimpleBold } from 'react-icons/pi'; +import { useGetModelConfigQuery } from 'services/api/endpoints/models'; type LoRACardProps = { lora: LoRA; }; export const LoRACard = memo((props: LoRACardProps) => { - const dispatch = useAppDispatch(); const { lora } = props; + const dispatch = useAppDispatch(); + const { data: loraConfig } = useGetModelConfigQuery(lora.model.key); const handleChange = useCallback( (v: number) => { - dispatch(loraWeightChanged({ id: lora.id, weight: v })); + dispatch(loraWeightChanged({ key: lora.model.key, weight: v })); }, - [dispatch, lora.id] + [dispatch, lora.model.key] ); const handleSetLoraToggle = useCallback(() => { - dispatch(loraIsEnabledChanged({ id: lora.id, isEnabled: !lora.isEnabled })); - }, [dispatch, lora.id, lora.isEnabled]); + dispatch(loraIsEnabledChanged({ key: lora.model.key, isEnabled: !lora.isEnabled })); + }, [dispatch, lora.model.key, lora.isEnabled]); const handleRemoveLora = useCallback(() => { - dispatch(loraRemoved(lora.id)); - }, [dispatch, lora.id]); + dispatch(loraRemoved(lora.model.key)); + }, [dispatch, lora.model.key]); return ( - {lora.model_name} + {loraConfig?.name ?? lora.model.key.substring(0, 8)} @@ -57,29 +60,31 @@ export const LoRACard = memo((props: LoRACardProps) => { - - - - + + + + + + ); }); diff --git a/invokeai/frontend/web/src/features/lora/components/LoRAList.tsx b/invokeai/frontend/web/src/features/lora/components/LoRAList.tsx index 9f37454d16..68d259a852 100644 --- a/invokeai/frontend/web/src/features/lora/components/LoRAList.tsx +++ b/invokeai/frontend/web/src/features/lora/components/LoRAList.tsx @@ -18,7 +18,7 @@ export const LoRAList = memo(() => { return ( {lorasArray.map((lora) => ( - + ))} ); diff --git a/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx b/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx index 30ef99d2f7..e7d40c5eaf 100644 --- a/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx +++ b/invokeai/frontend/web/src/features/lora/components/LoRASelect.tsx @@ -2,12 +2,13 @@ import type { ChakraProps } from '@invoke-ai/ui-library'; import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; import { createMemoizedSelector } from 'app/store/createMemoizedSelector'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; +import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover'; import { useGroupedModelCombobox } from 'common/hooks/useGroupedModelCombobox'; import { loraAdded, selectLoraSlice } from 'features/lora/store/loraSlice'; import { memo, useCallback, useMemo } from 'react'; import { useTranslation } from 'react-i18next'; -import type { LoRAModelConfigEntity } from 'services/api/endpoints/models'; import { useGetLoRAModelsQuery } from 'services/api/endpoints/models'; +import type { LoRAModelConfig } from 'services/api/types'; const selectAddedLoRAs = createMemoizedSelector(selectLoraSlice, (lora) => lora.loras); @@ -16,17 +17,17 @@ const LoRASelect = () => { const { data, isLoading } = useGetLoRAModelsQuery(); const { t } = useTranslation(); const addedLoRAs = useAppSelector(selectAddedLoRAs); - const currentBaseModel = useAppSelector((s) => s.generation.model?.base_model); + const currentBaseModel = useAppSelector((s) => s.generation.model?.base); - const getIsDisabled = (lora: LoRAModelConfigEntity): boolean => { - const isCompatible = currentBaseModel === lora.base_model; - const isAdded = Boolean(addedLoRAs[lora.id]); + const getIsDisabled = (lora: LoRAModelConfig): boolean => { + const isCompatible = currentBaseModel === lora.base; + const isAdded = Boolean(addedLoRAs[lora.key]); const hasMainModel = Boolean(currentBaseModel); return !hasMainModel || !isCompatible || isAdded; }; const _onChange = useCallback( - (lora: LoRAModelConfigEntity | null) => { + (lora: LoRAModelConfig | null) => { if (!lora) { return; } @@ -57,7 +58,9 @@ const LoRASelect = () => { return ( - {t('models.lora')} + + {t('models.lora')} + = { isEnabled: true, }; -export type LoraState = { +type LoraState = { _version: 1; loras: Record; }; -export const initialLoraState: LoraState = { +const initialLoraState: LoraState = { _version: 1, loras: {}, }; @@ -29,40 +30,28 @@ export const loraSlice = createSlice({ name: 'lora', initialState: initialLoraState, reducers: { - loraAdded: (state, action: PayloadAction) => { - const { model_name, id, base_model } = action.payload; - state.loras[id] = { id, model_name, base_model, ...defaultLoRAConfig }; + loraAdded: (state, action: PayloadAction) => { + const model = getModelKeyAndBase(action.payload); + state.loras[model.key] = { ...defaultLoRAConfig, model }; }, - loraRecalled: (state, action: PayloadAction) => { - const { model_name, id, base_model, weight } = action.payload; - state.loras[id] = { id, model_name, base_model, weight, isEnabled: true }; + loraRecalled: (state, action: PayloadAction) => { + state.loras[action.payload.model.key] = action.payload; }, loraRemoved: (state, action: PayloadAction) => { - const id = action.payload; - delete state.loras[id]; + const key = action.payload; + delete state.loras[key]; }, - lorasCleared: (state) => { - state.loras = {}; - }, - loraWeightChanged: (state, action: PayloadAction<{ id: string; weight: number }>) => { - const { id, weight } = action.payload; - const lora = state.loras[id]; + loraWeightChanged: (state, action: PayloadAction<{ key: string; weight: number }>) => { + const { key, weight } = action.payload; + const lora = state.loras[key]; if (!lora) { return; } lora.weight = weight; }, - loraWeightReset: (state, action: PayloadAction) => { - const id = action.payload; - const lora = state.loras[id]; - if (!lora) { - return; - } - lora.weight = defaultLoRAConfig.weight; - }, - loraIsEnabledChanged: (state, action: PayloadAction>) => { - const { id, isEnabled } = action.payload; - const lora = state.loras[id]; + loraIsEnabledChanged: (state, action: PayloadAction<{ key: string; isEnabled: boolean }>) => { + const { key, isEnabled } = action.payload; + const lora = state.loras[key]; if (!lora) { return; } @@ -71,20 +60,12 @@ export const loraSlice = createSlice({ }, }); -export const { - loraAdded, - loraRemoved, - loraWeightChanged, - loraWeightReset, - loraIsEnabledChanged, - lorasCleared, - loraRecalled, -} = loraSlice.actions; +export const { loraAdded, loraRemoved, loraWeightChanged, loraIsEnabledChanged, loraRecalled } = loraSlice.actions; export const selectLoraSlice = (state: RootState) => state.lora; /* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateLoRAState = (state: any): any => { +const migrateLoRAState = (state: any): any => { if (!('_version' in state)) { state._version = 1; } diff --git a/invokeai/frontend/web/src/features/metadata/components/MetadataControlNets.tsx b/invokeai/frontend/web/src/features/metadata/components/MetadataControlNets.tsx new file mode 100644 index 0000000000..ee9618ff74 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/MetadataControlNets.tsx @@ -0,0 +1,72 @@ +import { MetadataItemView } from 'features/metadata/components/MetadataItemView'; +import type { ControlNetConfigMetadata, MetadataHandlers } from 'features/metadata/types'; +import { handlers } from 'features/metadata/util/handlers'; +import { useCallback, useEffect, useMemo, useState } from 'react'; + +type Props = { + metadata: unknown; +}; + +export const MetadataControlNets = ({ metadata }: Props) => { + const [controlNets, setControlNets] = useState([]); + + useEffect(() => { + const parse = async () => { + try { + const parsed = await handlers.controlNets.parse(metadata); + setControlNets(parsed); + } catch (e) { + setControlNets([]); + } + }; + parse(); + }, [metadata]); + + const label = useMemo(() => handlers.controlNets.getLabel(), []); + + return ( + <> + {controlNets.map((controlNet) => ( + + ))} + + ); +}; + +const MetadataViewControlNet = ({ + label, + controlNet, + handlers, +}: { + label: string; + controlNet: ControlNetConfigMetadata; + handlers: MetadataHandlers; +}) => { + const onRecall = useCallback(() => { + if (!handlers.recallItem) { + return; + } + handlers.recallItem(controlNet, true); + }, [handlers, controlNet]); + + const [renderedValue, setRenderedValue] = useState(null); + useEffect(() => { + const _renderValue = async () => { + if (!handlers.renderItemValue) { + setRenderedValue(null); + return; + } + const rendered = await handlers.renderItemValue(controlNet); + setRenderedValue(rendered); + }; + + _renderValue(); + }, [handlers, controlNet]); + + return ; +}; diff --git a/invokeai/frontend/web/src/features/metadata/components/MetadataIPAdapters.tsx b/invokeai/frontend/web/src/features/metadata/components/MetadataIPAdapters.tsx new file mode 100644 index 0000000000..4da27de2cb --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/MetadataIPAdapters.tsx @@ -0,0 +1,67 @@ +import { MetadataItemView } from 'features/metadata/components/MetadataItemView'; +import type { IPAdapterConfigMetadata, MetadataHandlers } from 'features/metadata/types'; +import { handlers } from 'features/metadata/util/handlers'; +import { useCallback, useEffect, useMemo, useState } from 'react'; + +type Props = { + metadata: unknown; +}; + +export const MetadataIPAdapters = ({ metadata }: Props) => { + const [ipAdapters, setIPAdapters] = useState([]); + + useEffect(() => { + const parse = async () => { + try { + const parsed = await handlers.ipAdapters.parse(metadata); + setIPAdapters(parsed); + } catch (e) { + setIPAdapters([]); + } + }; + parse(); + }, [metadata]); + + const label = useMemo(() => handlers.ipAdapters.getLabel(), []); + + return ( + <> + {ipAdapters.map((ipAdapter) => ( + + ))} + + ); +}; + +const MetadataViewIPAdapter = ({ + label, + ipAdapter, + handlers, +}: { + label: string; + ipAdapter: IPAdapterConfigMetadata; + handlers: MetadataHandlers; +}) => { + const onRecall = useCallback(() => { + if (!handlers.recallItem) { + return; + } + handlers.recallItem(ipAdapter, true); + }, [handlers, ipAdapter]); + + const [renderedValue, setRenderedValue] = useState(null); + useEffect(() => { + const _renderValue = async () => { + if (!handlers.renderItemValue) { + setRenderedValue(null); + return; + } + const rendered = await handlers.renderItemValue(ipAdapter); + setRenderedValue(rendered); + }; + + _renderValue(); + }, [handlers, ipAdapter]); + + return ; +}; diff --git a/invokeai/frontend/web/src/features/metadata/components/MetadataItem.tsx b/invokeai/frontend/web/src/features/metadata/components/MetadataItem.tsx new file mode 100644 index 0000000000..66d101f458 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/MetadataItem.tsx @@ -0,0 +1,33 @@ +import { typedMemo } from '@invoke-ai/ui-library'; +import { MetadataItemView } from 'features/metadata/components/MetadataItemView'; +import { useMetadataItem } from 'features/metadata/hooks/useMetadataItem'; +import type { MetadataHandlers } from 'features/metadata/types'; +import { MetadataParseFailedToken } from 'features/metadata/util/parsers'; + +type MetadataItemProps = { + metadata: unknown; + handlers: MetadataHandlers; + direction?: 'row' | 'column'; +}; + +const _MetadataItem = typedMemo(({ metadata, handlers, direction = 'row' }: MetadataItemProps) => { + const { label, isDisabled, value, renderedValue, onRecall } = useMetadataItem(metadata, handlers); + + if (value === MetadataParseFailedToken) { + return null; + } + + return ( + + ); +}); + +export const MetadataItem = typedMemo(_MetadataItem); + +MetadataItem.displayName = 'MetadataItem'; diff --git a/invokeai/frontend/web/src/features/metadata/components/MetadataItemView.tsx b/invokeai/frontend/web/src/features/metadata/components/MetadataItemView.tsx new file mode 100644 index 0000000000..14ccc4ae2b --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/MetadataItemView.tsx @@ -0,0 +1,29 @@ +import { Flex, Text } from '@invoke-ai/ui-library'; +import { RecallButton } from 'features/metadata/components/RecallButton'; +import { memo } from 'react'; + +type MetadataItemViewProps = { + onRecall: () => void; + label: string; + renderedValue: React.ReactNode; + isDisabled: boolean; + direction?: 'row' | 'column'; +}; + +export const MetadataItemView = memo( + ({ label, onRecall, isDisabled, renderedValue, direction = 'row' }: MetadataItemViewProps) => { + return ( + + {onRecall && } + + + {label}: + + {renderedValue} + + + ); + } +); + +MetadataItemView.displayName = 'MetadataItemView'; diff --git a/invokeai/frontend/web/src/features/metadata/components/MetadataLoRAs.tsx b/invokeai/frontend/web/src/features/metadata/components/MetadataLoRAs.tsx new file mode 100644 index 0000000000..7e78985c49 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/MetadataLoRAs.tsx @@ -0,0 +1,68 @@ +import type { LoRA } from 'features/lora/store/loraSlice'; +import { MetadataItemView } from 'features/metadata/components/MetadataItemView'; +import type { MetadataHandlers } from 'features/metadata/types'; +import { handlers } from 'features/metadata/util/handlers'; +import { useCallback, useEffect, useMemo, useState } from 'react'; + +type Props = { + metadata: unknown; +}; + +export const MetadataLoRAs = ({ metadata }: Props) => { + const [loras, setLoRAs] = useState([]); + + useEffect(() => { + const parse = async () => { + try { + const parsed = await handlers.loras.parse(metadata); + setLoRAs(parsed); + } catch (e) { + setLoRAs([]); + } + }; + parse(); + }, [metadata]); + + const label = useMemo(() => handlers.loras.getLabel(), []); + + return ( + <> + {loras.map((lora) => ( + + ))} + + ); +}; + +const MetadataViewLoRA = ({ + label, + lora, + handlers, +}: { + label: string; + lora: LoRA; + handlers: MetadataHandlers; +}) => { + const onRecall = useCallback(() => { + if (!handlers.recallItem) { + return; + } + handlers.recallItem(lora, true); + }, [handlers, lora]); + + const [renderedValue, setRenderedValue] = useState(null); + useEffect(() => { + const _renderValue = async () => { + if (!handlers.renderItemValue) { + setRenderedValue(null); + return; + } + const rendered = await handlers.renderItemValue(lora); + setRenderedValue(rendered); + }; + + _renderValue(); + }, [handlers, lora]); + + return ; +}; diff --git a/invokeai/frontend/web/src/features/metadata/components/MetadataT2IAdapters.tsx b/invokeai/frontend/web/src/features/metadata/components/MetadataT2IAdapters.tsx new file mode 100644 index 0000000000..82575783e9 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/MetadataT2IAdapters.tsx @@ -0,0 +1,72 @@ +import { MetadataItemView } from 'features/metadata/components/MetadataItemView'; +import type { MetadataHandlers, T2IAdapterConfigMetadata } from 'features/metadata/types'; +import { handlers } from 'features/metadata/util/handlers'; +import { useCallback, useEffect, useMemo, useState } from 'react'; + +type Props = { + metadata: unknown; +}; + +export const MetadataT2IAdapters = ({ metadata }: Props) => { + const [t2iAdapters, setT2IAdapters] = useState([]); + + useEffect(() => { + const parse = async () => { + try { + const parsed = await handlers.t2iAdapters.parse(metadata); + setT2IAdapters(parsed); + } catch (e) { + setT2IAdapters([]); + } + }; + parse(); + }, [metadata]); + + const label = useMemo(() => handlers.t2iAdapters.getLabel(), []); + + return ( + <> + {t2iAdapters.map((t2iAdapter) => ( + + ))} + + ); +}; + +const MetadataViewT2IAdapter = ({ + label, + t2iAdapter, + handlers, +}: { + label: string; + t2iAdapter: T2IAdapterConfigMetadata; + handlers: MetadataHandlers; +}) => { + const onRecall = useCallback(() => { + if (!handlers.recallItem) { + return; + } + handlers.recallItem(t2iAdapter, true); + }, [handlers, t2iAdapter]); + + const [renderedValue, setRenderedValue] = useState(null); + useEffect(() => { + const _renderValue = async () => { + if (!handlers.renderItemValue) { + setRenderedValue(null); + return; + } + const rendered = await handlers.renderItemValue(t2iAdapter); + setRenderedValue(rendered); + }; + + _renderValue(); + }, [handlers, t2iAdapter]); + + return ; +}; diff --git a/invokeai/frontend/web/src/features/metadata/components/RecallButton.tsx b/invokeai/frontend/web/src/features/metadata/components/RecallButton.tsx new file mode 100644 index 0000000000..c531481a89 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/components/RecallButton.tsx @@ -0,0 +1,27 @@ +import type { IconButtonProps } from '@invoke-ai/ui-library'; +import { IconButton, Tooltip } from '@invoke-ai/ui-library'; +import { memo } from 'react'; +import { useTranslation } from 'react-i18next'; +import { PiArrowBendUpLeftBold } from 'react-icons/pi'; + +type MetadataItemProps = Omit & { + label: string; +}; + +export const RecallButton = memo(({ label, ...rest }: MetadataItemProps) => { + const { t } = useTranslation(); + + return ( + + } + size="xs" + variant="ghost" + {...rest} + /> + + ); +}); + +RecallButton.displayName = 'RecallButton'; diff --git a/invokeai/frontend/web/src/features/metadata/hooks/useMetadataItem.tsx b/invokeai/frontend/web/src/features/metadata/hooks/useMetadataItem.tsx new file mode 100644 index 0000000000..206d373aa7 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/hooks/useMetadataItem.tsx @@ -0,0 +1,76 @@ +import { Text } from '@invoke-ai/ui-library'; +import type { MetadataHandlers } from 'features/metadata/types'; +import { MetadataParseFailedToken, MetadataParsePendingToken } from 'features/metadata/util/parsers'; +import { useCallback, useEffect, useMemo, useState } from 'react'; +import { useTranslation } from 'react-i18next'; + +const Pending = () => { + const { t } = useTranslation(); + return {t('common.loading')}; +}; + +const Failed = () => { + const { t } = useTranslation(); + return {t('metadata.parsingFailed')}; +}; + +export const useMetadataItem = (metadata: unknown, handlers: MetadataHandlers) => { + const [value, setValue] = useState( + MetadataParsePendingToken + ); + const [renderedValueInternal, setRenderedValueInternal] = useState(null); + + useEffect(() => { + const _parse = async () => { + try { + const parsed = await handlers.parse(metadata); + setValue(parsed); + } catch (e) { + setValue(MetadataParseFailedToken); + } + }; + _parse(); + }, [handlers, metadata]); + + const isDisabled = useMemo(() => value === MetadataParsePendingToken || value === MetadataParseFailedToken, [value]); + + const label = useMemo(() => handlers.getLabel(), [handlers]); + + useEffect(() => { + const _renderValue = async () => { + if (value === MetadataParsePendingToken) { + setRenderedValueInternal(null); + return; + } + if (value === MetadataParseFailedToken) { + setRenderedValueInternal(null); + return; + } + + const rendered = await handlers.renderValue(value); + + setRenderedValueInternal(rendered); + }; + + _renderValue(); + }, [handlers, value]); + + const renderedValue = useMemo(() => { + if (value === MetadataParsePendingToken) { + return ; + } + if (value === MetadataParseFailedToken) { + return ; + } + return {renderedValueInternal}; + }, [renderedValueInternal, value]); + + const onRecall = useCallback(() => { + if (!handlers.recall || value === MetadataParsePendingToken || value === MetadataParseFailedToken) { + return null; + } + handlers.recall(value, true); + }, [handlers, value]); + + return { label, isDisabled, value, renderedValue, onRecall }; +}; diff --git a/invokeai/frontend/web/src/features/metadata/types.ts b/invokeai/frontend/web/src/features/metadata/types.ts new file mode 100644 index 0000000000..0791cdf449 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/types.ts @@ -0,0 +1,137 @@ +import type { ControlNetConfig, IPAdapterConfig, T2IAdapterConfig } from 'features/controlAdapters/store/types'; +import type { O } from 'ts-toolbelt'; + +/** + * Renders a value of type T as a React node. + */ +export type MetadataRenderValueFunc = (value: T) => Promise; + +/** + * Gets the label of the current metadata item as a string. + */ +export type MetadataGetLabelFunc = () => string; + +/** + * A function that recalls a parsed and validated metadata value. + * + * @param value The value to recall. + */ +export type MetadataRecallFunc = (value: T) => void; + +/** + * An async function that receives metadata and returns a parsed value, throwing if the value is invalid or missing. + * + * The function receives an object of unknown type. It is responsible for extracting the relevant data from the metadata + * and returning a value of type T. + * + * The function should throw a MetadataParseError if the metadata is invalid or missing. + * + * @param metadata The metadata to parse. + * @returns A promise that resolves to the parsed value. + * @throws MetadataParseError if the metadata is invalid or missing. + */ +export type MetadataParseFunc = (metadata: unknown) => Promise; + +/** + * A function that performs additional validation logic before recalling a metadata value. It is called with a parsed + * value and should throw if the validation logic fails. + * + * This function is used in cases where some additional logic is required before recalling. For example, when recalling + * a LoRA, we need to check if it is compatible with the current base model. + * + * @param value The value to validate. + * @returns A promise that resolves to the validated value. + * @throws MetadataParseError if the value is invalid. + */ +export type MetadataValidateFunc = (value: T) => Promise; + +export type MetadataHandlers = { + /** + * Gets the label of the current metadata item as a string. + * + * @returns The label of the current metadata item. + */ + getLabel: MetadataGetLabelFunc; + /** + * An async function that receives metadata and returns a parsed metadata value. + * + * @param metadata The metadata to parse. + * @param withToast Whether to show a toast on success or failure. + * @returns A promise that resolves to the parsed value. + * @throws MetadataParseError if the metadata is invalid or missing. + */ + parse: (metadata: unknown, withToast?: boolean) => Promise; + /** + * An async function that receives a metadata item and returns a parsed metadata item value. + * + * This is only provided if the metadata value is an array. + * + * @param item The item to parse. It should be an item from the array. + * @param withToast Whether to show a toast on success or failure. + * @returns A promise that resolves to the parsed value. + * @throws MetadataParseError if the metadata is invalid or missing. + */ + parseItem?: (item: unknown, withToast?: boolean) => Promise; + /** + * An async function that recalls a parsed metadata value. + * + * This function is only provided if the metadata value can be recalled. + * + * @param value The value to recall. + * @param withToast Whether to show a toast on success or failure. + * @returns A promise that resolves when the recall operation is complete. + */ + recall?: (value: TValue, withToast?: boolean) => Promise; + /** + * An async function that recalls a parsed metadata item value. + * + * This function is only provided if the metadata value is an array and the items can be recalled. + * + * @param item The item to recall. It should be an item from the array. + * @param withToast Whether to show a toast on success or failure. + * @returns A promise that resolves when the recall operation is complete. + */ + recallItem?: (item: TItem, withToast?: boolean) => Promise; + /** + * Renders a parsed metadata value as a React node. + * + * @param value The value to render. + * @returns The rendered value. + */ + renderValue: MetadataRenderValueFunc; + /** + * Renders a parsed metadata item value as a React node. + * + * @param item The item to render. + * @returns The rendered item. + */ + renderItemValue?: MetadataRenderValueFunc; +}; + +// TODO(psyche): The types for item handlers should be able to be inferred from the type of the value: +// type MetadataHandlersInferItem = TValue extends Array ? MetadataParseFunc : never +// While this works for the types as expected, I couldn't satisfy TS in the implementations of the handlers. + +type BuildMetadataHandlersArg = { + parser: MetadataParseFunc; + itemParser?: MetadataParseFunc; + recaller?: MetadataRecallFunc; + itemRecaller?: MetadataRecallFunc; + validator?: MetadataValidateFunc; + itemValidator?: MetadataValidateFunc; + getLabel: MetadataGetLabelFunc; + renderValue?: MetadataRenderValueFunc; + renderItemValue?: MetadataRenderValueFunc; +}; + +export type BuildMetadataHandlers = ( + arg: BuildMetadataHandlersArg +) => MetadataHandlers; + +export type ControlNetConfigMetadata = O.NonNullable; +export type T2IAdapterConfigMetadata = O.NonNullable; +export type IPAdapterConfigMetadata = O.NonNullable; +export type AnyControlAdapterConfigMetadata = + | ControlNetConfigMetadata + | T2IAdapterConfigMetadata + | IPAdapterConfigMetadata; diff --git a/invokeai/frontend/web/src/features/metadata/util/handlers.ts b/invokeai/frontend/web/src/features/metadata/util/handlers.ts new file mode 100644 index 0000000000..09d65b7d92 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/util/handlers.ts @@ -0,0 +1,409 @@ +import { objectKeys } from 'common/util/objectKeys'; +import { toast } from 'common/util/toast'; +import type { LoRA } from 'features/lora/store/loraSlice'; +import type { + AnyControlAdapterConfigMetadata, + BuildMetadataHandlers, + MetadataGetLabelFunc, + MetadataHandlers, + MetadataParseFunc, + MetadataRecallFunc, + MetadataRenderValueFunc, + MetadataValidateFunc, +} from 'features/metadata/types'; +import { fetchModelConfig } from 'features/metadata/util/modelFetchingHelpers'; +import { validators } from 'features/metadata/util/validators'; +import type { ModelIdentifierWithBase } from 'features/nodes/types/common'; +import { t } from 'i18next'; + +import { parsers } from './parsers'; +import { recallers } from './recallers'; + +const renderModelConfigValue: MetadataRenderValueFunc = async (value) => { + try { + const modelConfig = await fetchModelConfig(value.key); + return `${modelConfig.name} (${modelConfig.base.toUpperCase()})`; + } catch { + return `${value.key} (${value.base.toUpperCase()})`; + } +}; +const renderLoRAValue: MetadataRenderValueFunc = async (value) => { + try { + const modelConfig = await fetchModelConfig(value.model.key); + return `${modelConfig.name} (${modelConfig.base.toUpperCase()}) - ${value.weight}`; + } catch { + return `${value.model.key} (${value.model.base.toUpperCase()}) - ${value.weight}`; + } +}; +const renderControlAdapterValue: MetadataRenderValueFunc = async (value) => { + try { + const modelConfig = await fetchModelConfig(value.model.key ?? 'none'); + return `${modelConfig.name} (${modelConfig.base.toUpperCase()}) - ${value.weight}`; + } catch { + return `${value.model.key} (${value.model.base.toUpperCase()}) - ${value.weight}`; + } +}; + +const parameterSetToast = (parameter: string, description?: string) => { + toast({ + title: t('toast.parameterSet', { parameter }), + description, + status: 'info', + duration: 2500, + isClosable: true, + }); +}; + +const parameterNotSetToast = (parameter: string, description?: string) => { + toast({ + title: t('toast.parameterNotSet', { parameter }), + description, + status: 'warning', + duration: 2500, + isClosable: true, + }); +}; + +// const allParameterSetToast = (description?: string) => { +// toast({ +// title: t('toast.parametersSet'), +// status: 'info', +// description, +// duration: 2500, +// isClosable: true, +// }); +// }; + +// const allParameterNotSetToast = (description?: string) => { +// toast({ +// title: t('toast.parametersNotSet'), +// status: 'warning', +// description, +// duration: 2500, +// isClosable: true, +// }); +// }; + +const buildParse = + (arg: { + parser: MetadataParseFunc; + getLabel: MetadataGetLabelFunc; + }): MetadataHandlers['parse'] => + async (metadata, withToast = false) => { + try { + const parsed = await arg.parser(metadata); + withToast && parameterSetToast(arg.getLabel()); + return parsed; + } catch (e) { + withToast && parameterNotSetToast(arg.getLabel(), (e as Error).message); + throw e; + } + }; + +const buildParseItem = + (arg: { + itemParser: MetadataParseFunc; + getLabel: MetadataGetLabelFunc; + }): MetadataHandlers['parseItem'] => + async (item, withToast = false) => { + try { + const parsed = await arg.itemParser(item); + withToast && parameterSetToast(arg.getLabel()); + return parsed; + } catch (e) { + withToast && parameterNotSetToast(arg.getLabel(), (e as Error).message); + throw e; + } + }; + +const buildRecall = + (arg: { + recaller: MetadataRecallFunc; + validator?: MetadataValidateFunc; + getLabel: MetadataGetLabelFunc; + }): NonNullable['recall']> => + async (value, withToast = false) => { + try { + arg.validator && (await arg.validator(value)); + await arg.recaller(value); + withToast && parameterSetToast(arg.getLabel()); + } catch (e) { + withToast && parameterNotSetToast(arg.getLabel(), (e as Error).message); + throw e; + } + }; + +const buildRecallItem = + (arg: { + itemRecaller: MetadataRecallFunc; + itemValidator?: MetadataValidateFunc; + getLabel: MetadataGetLabelFunc; + }): NonNullable['recallItem']> => + async (item, withToast = false) => { + try { + arg.itemValidator && (await arg.itemValidator(item)); + await arg.itemRecaller(item); + withToast && parameterSetToast(arg.getLabel()); + } catch (e) { + withToast && parameterNotSetToast(arg.getLabel(), (e as Error).message); + throw e; + } + }; + +const resolveToString = (value: unknown) => new Promise((resolve) => resolve(String(value))); + +const buildHandlers: BuildMetadataHandlers = ({ + getLabel, + parser, + itemParser, + recaller, + itemRecaller, + validator, + itemValidator, + renderValue, + renderItemValue, +}) => ({ + parse: buildParse({ parser, getLabel }), + parseItem: itemParser ? buildParseItem({ itemParser, getLabel }) : undefined, + recall: recaller ? buildRecall({ recaller, validator, getLabel }) : undefined, + recallItem: itemRecaller ? buildRecallItem({ itemRecaller, itemValidator, getLabel }) : undefined, + getLabel, + renderValue: renderValue ?? resolveToString, + renderItemValue: renderItemValue ?? resolveToString, +}); + +export const handlers = { + // Misc + createdBy: buildHandlers({ getLabel: () => t('metadata.createdBy'), parser: parsers.createdBy }), + generationMode: buildHandlers({ getLabel: () => t('metadata.generationMode'), parser: parsers.generationMode }), + + // Core parameters + cfgRescaleMultiplier: buildHandlers({ + getLabel: () => t('metadata.cfgRescaleMultiplier'), + parser: parsers.cfgRescaleMultiplier, + recaller: recallers.cfgRescaleMultiplier, + }), + cfgScale: buildHandlers({ + getLabel: () => t('metadata.cfgScale'), + parser: parsers.cfgScale, + recaller: recallers.cfgScale, + }), + height: buildHandlers({ getLabel: () => t('metadata.height'), parser: parsers.height, recaller: recallers.height }), + negativePrompt: buildHandlers({ + getLabel: () => t('metadata.negativePrompt'), + parser: parsers.negativePrompt, + recaller: recallers.negativePrompt, + }), + positivePrompt: buildHandlers({ + getLabel: () => t('metadata.positivePrompt'), + parser: parsers.positivePrompt, + recaller: recallers.positivePrompt, + }), + scheduler: buildHandlers({ + getLabel: () => t('metadata.scheduler'), + parser: parsers.scheduler, + recaller: recallers.scheduler, + }), + sdxlNegativeStylePrompt: buildHandlers({ + getLabel: () => t('sdxl.negStylePrompt'), + parser: parsers.sdxlNegativeStylePrompt, + recaller: recallers.sdxlNegativeStylePrompt, + }), + sdxlPositiveStylePrompt: buildHandlers({ + getLabel: () => t('sdxl.posStylePrompt'), + parser: parsers.sdxlPositiveStylePrompt, + recaller: recallers.sdxlPositiveStylePrompt, + }), + seed: buildHandlers({ getLabel: () => t('metadata.seed'), parser: parsers.seed, recaller: recallers.seed }), + steps: buildHandlers({ getLabel: () => t('metadata.steps'), parser: parsers.steps, recaller: recallers.steps }), + strength: buildHandlers({ + getLabel: () => t('metadata.strength'), + parser: parsers.strength, + recaller: recallers.strength, + }), + width: buildHandlers({ getLabel: () => t('metadata.width'), parser: parsers.width, recaller: recallers.width }), + + // HRF + hrfEnabled: buildHandlers({ + getLabel: () => t('hrf.metadata.enabled'), + parser: parsers.hrfEnabled, + recaller: recallers.hrfEnabled, + }), + hrfMethod: buildHandlers({ + getLabel: () => t('hrf.metadata.method'), + parser: parsers.hrfMethod, + recaller: recallers.hrfMethod, + }), + hrfStrength: buildHandlers({ + getLabel: () => t('hrf.metadata.strength'), + parser: parsers.hrfStrength, + recaller: recallers.hrfStrength, + }), + + // Refiner + refinerCFGScale: buildHandlers({ + getLabel: () => t('sdxl.cfgScale'), + parser: parsers.refinerCFGScale, + recaller: recallers.refinerCFGScale, + }), + refinerModel: buildHandlers({ + getLabel: () => t('sdxl.refinerModel'), + parser: parsers.refinerModel, + recaller: recallers.refinerModel, + validator: validators.refinerModel, + }), + refinerNegativeAestheticScore: buildHandlers({ + getLabel: () => t('sdxl.posAestheticScore'), + parser: parsers.refinerNegativeAestheticScore, + recaller: recallers.refinerNegativeAestheticScore, + }), + refinerPositiveAestheticScore: buildHandlers({ + getLabel: () => t('sdxl.negAestheticScore'), + parser: parsers.refinerPositiveAestheticScore, + recaller: recallers.refinerPositiveAestheticScore, + }), + refinerScheduler: buildHandlers({ + getLabel: () => t('sdxl.scheduler'), + parser: parsers.refinerScheduler, + recaller: recallers.refinerScheduler, + }), + refinerStart: buildHandlers({ + getLabel: () => t('sdxl.refiner_start'), + parser: parsers.refinerStart, + recaller: recallers.refinerStart, + }), + refinerSteps: buildHandlers({ + getLabel: () => t('sdxl.refiner_steps'), + parser: parsers.refinerSteps, + recaller: recallers.refinerSteps, + }), + + // Models + model: buildHandlers({ + getLabel: () => t('metadata.model'), + parser: parsers.mainModel, + recaller: recallers.model, + renderValue: renderModelConfigValue, + }), + vae: buildHandlers({ + getLabel: () => t('metadata.vae'), + parser: parsers.vaeModel, + recaller: recallers.vae, + renderValue: renderModelConfigValue, + validator: validators.vaeModel, + }), + + // Arrays of models + controlNets: buildHandlers({ + getLabel: () => t('common.controlNet'), + parser: parsers.controlNets, + itemParser: parsers.controlNet, + recaller: recallers.controlNets, + itemRecaller: recallers.controlNet, + validator: validators.controlNets, + itemValidator: validators.controlNet, + renderItemValue: renderControlAdapterValue, + }), + ipAdapters: buildHandlers({ + getLabel: () => t('common.ipAdapter'), + parser: parsers.ipAdapters, + itemParser: parsers.ipAdapter, + recaller: recallers.ipAdapters, + itemRecaller: recallers.ipAdapter, + validator: validators.ipAdapters, + itemValidator: validators.ipAdapter, + renderItemValue: renderControlAdapterValue, + }), + loras: buildHandlers({ + getLabel: () => t('models.lora'), + parser: parsers.loras, + itemParser: parsers.lora, + recaller: recallers.loras, + itemRecaller: recallers.lora, + validator: validators.loras, + itemValidator: validators.lora, + renderItemValue: renderLoRAValue, + }), + t2iAdapters: buildHandlers({ + getLabel: () => t('common.t2iAdapter'), + parser: parsers.t2iAdapters, + itemParser: parsers.t2iAdapter, + recaller: recallers.t2iAdapters, + itemRecaller: recallers.t2iAdapter, + validator: validators.t2iAdapters, + itemValidator: validators.t2iAdapter, + renderItemValue: renderControlAdapterValue, + }), +} as const; + +export const parseAndRecallPrompts = async (metadata: unknown) => { + const results = await Promise.allSettled([ + handlers.positivePrompt.parse(metadata).then((positivePrompt) => { + if (!handlers.positivePrompt.recall) { + return; + } + handlers.positivePrompt?.recall(positivePrompt); + }), + handlers.negativePrompt.parse(metadata).then((negativePrompt) => { + if (!handlers.negativePrompt.recall) { + return; + } + handlers.negativePrompt?.recall(negativePrompt); + }), + handlers.sdxlPositiveStylePrompt.parse(metadata).then((sdxlPositiveStylePrompt) => { + if (!handlers.sdxlPositiveStylePrompt.recall) { + return; + } + handlers.sdxlPositiveStylePrompt?.recall(sdxlPositiveStylePrompt); + }), + handlers.sdxlNegativeStylePrompt.parse(metadata).then((sdxlNegativeStylePrompt) => { + if (!handlers.sdxlNegativeStylePrompt.recall) { + return; + } + handlers.sdxlNegativeStylePrompt?.recall(sdxlNegativeStylePrompt); + }), + ]); + if (results.some((result) => result.status === 'fulfilled')) { + parameterSetToast(t('metadata.allPrompts')); + } +}; + +export const parseAndRecallImageDimensions = async (metadata: unknown) => { + const results = await Promise.allSettled([ + handlers.width.parse(metadata).then((width) => { + if (!handlers.width.recall) { + return; + } + handlers.width?.recall(width); + }), + handlers.height.parse(metadata).then((height) => { + if (!handlers.height.recall) { + return; + } + handlers.height?.recall(height); + }), + ]); + if (results.some((result) => result.status === 'fulfilled')) { + parameterSetToast(t('metadata.imageDimensions')); + } +}; + +export const parseAndRecallAllMetadata = async (metadata: unknown, skip: (keyof typeof handlers)[] = []) => { + const results = await Promise.allSettled( + objectKeys(handlers) + .filter((key) => !skip.includes(key)) + .map((key) => { + const { parse, recall } = handlers[key]; + return parse(metadata).then((value) => { + if (!recall) { + return; + } + /* @ts-expect-error The return type of parse and the input type of recall are guaranteed to be compatible. */ + recall(value); + }); + }) + ); + if (results.some((result) => result.status === 'fulfilled')) { + parameterSetToast(t('toast.parametersSet')); + } +}; diff --git a/invokeai/frontend/web/src/features/metadata/util/modelFetchingHelpers.ts b/invokeai/frontend/web/src/features/metadata/util/modelFetchingHelpers.ts new file mode 100644 index 0000000000..3c0745917a --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/util/modelFetchingHelpers.ts @@ -0,0 +1,112 @@ +import { getStore } from 'app/store/nanostores/store'; +import type { ModelIdentifierWithBase } from 'features/nodes/types/common'; +import { isModelIdentifier, isModelIdentifierV2 } from 'features/nodes/types/common'; +import { modelsApi } from 'services/api/endpoints/models'; +import type { AnyModelConfig, BaseModelType, ModelType } from 'services/api/types'; + +/** + * Raised when a model config is unable to be fetched. + */ +class ModelConfigNotFoundError extends Error { + /** + * Create ModelConfigNotFoundError + * @param {String} message + */ + constructor(message: string) { + super(message); + this.name = this.constructor.name; + } +} + +/** + * Raised when a fetched model config is of an unexpected type. + */ +export class InvalidModelConfigError extends Error { + /** + * Create InvalidModelConfigError + * @param {String} message + */ + constructor(message: string) { + super(message); + this.name = this.constructor.name; + } +} + +/** + * Fetches the model config for a given model key. + * @param key The model key. + * @returns A promise that resolves to the model config. + * @throws {ModelConfigNotFoundError} If the model config is unable to be fetched. + */ +export const fetchModelConfig = async (key: string): Promise => { + const { dispatch } = getStore(); + try { + const req = dispatch(modelsApi.endpoints.getModelConfig.initiate(key)); + req.unsubscribe(); + return await req.unwrap(); + } catch { + throw new ModelConfigNotFoundError(`Unable to retrieve model config for key ${key}`); + } +}; + +/** + * Fetches the model config for a given model name, base model, and model type. This provides backwards compatibility + * for MM1 model identifiers. + * @param name The model name. + * @param base The base model. + * @param type The model type. + * @returns A promise that resolves to the model config. + * @throws {ModelConfigNotFoundError} If the model config is unable to be fetched. + */ +const fetchModelConfigByAttrs = async (name: string, base: BaseModelType, type: ModelType): Promise => { + const { dispatch } = getStore(); + try { + const req = dispatch(modelsApi.endpoints.getModelConfigByAttrs.initiate({ name, base, type })); + req.unsubscribe(); + return await req.unwrap(); + } catch { + throw new ModelConfigNotFoundError(`Unable to retrieve model config for name/base/type ${name}/${base}/${type}`); + } +}; + +/** + * Fetches the model config for a given model key and type, and ensures that the model config is of a specific type. + * @param key The model key. + * @param typeGuard A type guard function that checks if the model config is of the expected type. + * @returns A promise that resolves to the model config. The model config is guaranteed to be of the expected type. + * @throws {InvalidModelConfigError} If the model config is unable to be fetched or is of an unexpected type. + */ +export const fetchModelConfigWithTypeGuard = async ( + key: string, + typeGuard: (config: AnyModelConfig) => config is T +) => { + const modelConfig = await fetchModelConfig(key); + if (!typeGuard(modelConfig)) { + throw new InvalidModelConfigError(`Invalid model type for key ${key}: ${modelConfig.type}`); + } + return modelConfig; +}; + +/** + * Fetches the model key from a model identifier. This includes fetching the key for MM1 format model identifiers. + * @param modelIdentifier The model identifier. The MM2 format `{key: string}` simply extracts the key. The MM1 format + * `{model_name: string, base_model: BaseModelType}` must do a network request to fetch the key. + * @param type The type of model to fetch. This is used to fetch the key for MM1 format model identifiers. + * @param message An optional custom message to include in the error if the model identifier is invalid. + * @returns A promise that resolves to the model key. + * @throws {InvalidModelConfigError} If the model identifier is invalid. + */ +export const getModelKey = async (modelIdentifier: unknown, type: ModelType, message?: string): Promise => { + if (isModelIdentifier(modelIdentifier)) { + return modelIdentifier.key; + } + if (isModelIdentifierV2(modelIdentifier)) { + return (await fetchModelConfigByAttrs(modelIdentifier.model_name, modelIdentifier.base_model, type)).key; + } + throw new InvalidModelConfigError(message || `Invalid model identifier: ${modelIdentifier}`); +}; + +export const getModelKeyAndBase = (modelConfig: AnyModelConfig): ModelIdentifierWithBase => ({ + key: modelConfig.key, + base: modelConfig.base, +}); diff --git a/invokeai/frontend/web/src/features/metadata/util/parsers.ts b/invokeai/frontend/web/src/features/metadata/util/parsers.ts new file mode 100644 index 0000000000..30ec37991c --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/util/parsers.ts @@ -0,0 +1,413 @@ +import { CONTROLNET_PROCESSORS } from 'features/controlAdapters/store/constants'; +import { + initialControlNet, + initialIPAdapter, + initialT2IAdapter, +} from 'features/controlAdapters/util/buildControlAdapter'; +import type { LoRA } from 'features/lora/store/loraSlice'; +import { defaultLoRAConfig } from 'features/lora/store/loraSlice'; +import type { + ControlNetConfigMetadata, + IPAdapterConfigMetadata, + MetadataParseFunc, + T2IAdapterConfigMetadata, +} from 'features/metadata/types'; +import { fetchModelConfigWithTypeGuard, getModelKey } from 'features/metadata/util/modelFetchingHelpers'; +import { + zControlField, + zIPAdapterField, + zModelIdentifierWithBase, + zT2IAdapterField, +} from 'features/nodes/types/common'; +import type { + ParameterCFGRescaleMultiplier, + ParameterCFGScale, + ParameterHeight, + ParameterHRFEnabled, + ParameterHRFMethod, + ParameterModel, + ParameterNegativePrompt, + ParameterNegativeStylePromptSDXL, + ParameterPositivePrompt, + ParameterPositiveStylePromptSDXL, + ParameterScheduler, + ParameterSDXLRefinerModel, + ParameterSDXLRefinerNegativeAestheticScore, + ParameterSDXLRefinerPositiveAestheticScore, + ParameterSDXLRefinerStart, + ParameterSeed, + ParameterSteps, + ParameterStrength, + ParameterVAEModel, + ParameterWidth, +} from 'features/parameters/types/parameterSchemas'; +import { + isParameterCFGRescaleMultiplier, + isParameterCFGScale, + isParameterHeight, + isParameterHRFEnabled, + isParameterHRFMethod, + isParameterLoRAWeight, + isParameterNegativePrompt, + isParameterNegativeStylePromptSDXL, + isParameterPositivePrompt, + isParameterPositiveStylePromptSDXL, + isParameterScheduler, + isParameterSDXLRefinerNegativeAestheticScore, + isParameterSDXLRefinerPositiveAestheticScore, + isParameterSDXLRefinerStart, + isParameterSeed, + isParameterSteps, + isParameterStrength, + isParameterWidth, +} from 'features/parameters/types/parameterSchemas'; +import { get, isArray, isString } from 'lodash-es'; +import { + isControlNetModelConfig, + isIPAdapterModelConfig, + isLoRAModelConfig, + isNonRefinerMainModelConfig, + isRefinerMainModelModelConfig, + isT2IAdapterModelConfig, + isVAEModelConfig, +} from 'services/api/types'; +import { v4 as uuidv4 } from 'uuid'; + +export const MetadataParsePendingToken = Symbol('pending'); +export const MetadataParseFailedToken = Symbol('failed'); +/** + * Raised when metadata parsing fails. + */ +class MetadataParseError extends Error { + /** + * Create MetadataParseError + * @param {String} message + */ + constructor(message: string) { + super(message); + this.name = this.constructor.name; + } +} + +/** + * An async function that a property from an object and validates its type using a type guard. If the property is missing + * or invalid, the function should throw a MetadataParseError. + * @param obj The object to get the property from. + * @param property The property to get. + * @param typeGuard A type guard function to check the type of the property. Provide `undefined` to opt out of type + * validation and always return the property value. + * @returns A promise that resolves to the property value if it exists and is of the expected type. + * @throws MetadataParseError if a type guard is provided and the property is not of the expected type. + */ +const getProperty = ( + obj: unknown, + property: string, + typeGuard: (val: unknown) => val is T = (val: unknown): val is T => true +): Promise => { + return new Promise((resolve, reject) => { + const val = get(obj, property) as unknown; + if (typeGuard(val)) { + resolve(val); + } + reject(new MetadataParseError(`Property ${property} is not of expected type`)); + }); +}; + +const parseCreatedBy: MetadataParseFunc = (metadata) => getProperty(metadata, 'created_by', isString); + +const parseGenerationMode: MetadataParseFunc = (metadata) => getProperty(metadata, 'generation_mode', isString); + +const parsePositivePrompt: MetadataParseFunc = (metadata) => + getProperty(metadata, 'positive_prompt', isParameterPositivePrompt); + +const parseNegativePrompt: MetadataParseFunc = (metadata) => + getProperty(metadata, 'negative_prompt', isParameterNegativePrompt); + +const parseSDXLPositiveStylePrompt: MetadataParseFunc = (metadata) => + getProperty(metadata, 'positive_style_prompt', isParameterPositiveStylePromptSDXL); + +const parseSDXLNegativeStylePrompt: MetadataParseFunc = (metadata) => + getProperty(metadata, 'negative_style_prompt', isParameterNegativeStylePromptSDXL); + +const parseSeed: MetadataParseFunc = (metadata) => getProperty(metadata, 'seed', isParameterSeed); + +const parseCFGScale: MetadataParseFunc = (metadata) => + getProperty(metadata, 'cfg_scale', isParameterCFGScale); + +const parseCFGRescaleMultiplier: MetadataParseFunc = (metadata) => + getProperty(metadata, 'cfg_rescale_multiplier', isParameterCFGRescaleMultiplier); + +const parseScheduler: MetadataParseFunc = (metadata) => + getProperty(metadata, 'scheduler', isParameterScheduler); + +const parseWidth: MetadataParseFunc = (metadata) => getProperty(metadata, 'width', isParameterWidth); + +const parseHeight: MetadataParseFunc = (metadata) => + getProperty(metadata, 'height', isParameterHeight); + +const parseSteps: MetadataParseFunc = (metadata) => getProperty(metadata, 'steps', isParameterSteps); + +const parseStrength: MetadataParseFunc = (metadata) => + getProperty(metadata, 'strength', isParameterStrength); + +const parseHRFEnabled: MetadataParseFunc = (metadata) => + getProperty(metadata, 'hrf_enabled', isParameterHRFEnabled); + +const parseHRFStrength: MetadataParseFunc = (metadata) => + getProperty(metadata, 'hrf_strength', isParameterStrength); + +const parseHRFMethod: MetadataParseFunc = (metadata) => + getProperty(metadata, 'hrf_method', isParameterHRFMethod); + +const parseRefinerSteps: MetadataParseFunc = (metadata) => + getProperty(metadata, 'refiner_steps', isParameterSteps); + +const parseRefinerCFGScale: MetadataParseFunc = (metadata) => + getProperty(metadata, 'refiner_cfg_scale', isParameterCFGScale); + +const parseRefinerScheduler: MetadataParseFunc = (metadata) => + getProperty(metadata, 'refiner_scheduler', isParameterScheduler); + +const parseRefinerPositiveAestheticScore: MetadataParseFunc = (metadata) => + getProperty(metadata, 'refiner_positive_aesthetic_score', isParameterSDXLRefinerPositiveAestheticScore); + +const parseRefinerNegativeAestheticScore: MetadataParseFunc = (metadata) => + getProperty(metadata, 'refiner_negative_aesthetic_score', isParameterSDXLRefinerNegativeAestheticScore); + +const parseRefinerStart: MetadataParseFunc = (metadata) => + getProperty(metadata, 'refiner_start', isParameterSDXLRefinerStart); + +const parseMainModel: MetadataParseFunc = async (metadata) => { + const model = await getProperty(metadata, 'model', undefined); + const key = await getModelKey(model, 'main'); + const mainModelConfig = await fetchModelConfigWithTypeGuard(key, isNonRefinerMainModelConfig); + const modelIdentifier = zModelIdentifierWithBase.parse(mainModelConfig); + return modelIdentifier; +}; + +const parseRefinerModel: MetadataParseFunc = async (metadata) => { + const refiner_model = await getProperty(metadata, 'refiner_model', undefined); + const key = await getModelKey(refiner_model, 'main'); + const refinerModelConfig = await fetchModelConfigWithTypeGuard(key, isRefinerMainModelModelConfig); + const modelIdentifier = zModelIdentifierWithBase.parse(refinerModelConfig); + return modelIdentifier; +}; + +const parseVAEModel: MetadataParseFunc = async (metadata) => { + const vae = await getProperty(metadata, 'vae', undefined); + const key = await getModelKey(vae, 'vae'); + const vaeModelConfig = await fetchModelConfigWithTypeGuard(key, isVAEModelConfig); + const modelIdentifier = zModelIdentifierWithBase.parse(vaeModelConfig); + return modelIdentifier; +}; + +const parseLoRA: MetadataParseFunc = async (metadataItem) => { + // Previously, the LoRA model identifier parts were stored in the LoRA metadata: `{key: ..., weight: 0.75}` + const modelV1 = await getProperty(metadataItem, 'lora', undefined); + // Now, the LoRA model is stored in a `model` property of the LoRA metadata: `{model: {key: ...}, weight: 0.75}` + const modelV2 = await getProperty(metadataItem, 'model', undefined); + const weight = await getProperty(metadataItem, 'weight', undefined); + const key = await getModelKey(modelV2 ?? modelV1, 'lora'); + const loraModelConfig = await fetchModelConfigWithTypeGuard(key, isLoRAModelConfig); + + return { + model: zModelIdentifierWithBase.parse(loraModelConfig), + weight: isParameterLoRAWeight(weight) ? weight : defaultLoRAConfig.weight, + isEnabled: true, + }; +}; + +const parseAllLoRAs: MetadataParseFunc = async (metadata) => { + const lorasRaw = await getProperty(metadata, 'loras', isArray); + const parseResults = await Promise.allSettled(lorasRaw.map((lora) => parseLoRA(lora))); + const loras = parseResults + .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') + .map((result) => result.value); + return loras; +}; + +const parseControlNet: MetadataParseFunc = async (metadataItem) => { + const control_model = await getProperty(metadataItem, 'control_model'); + const key = await getModelKey(control_model, 'controlnet'); + const controlNetModel = await fetchModelConfigWithTypeGuard(key, isControlNetModelConfig); + + const image = zControlField.shape.image.nullish().catch(null).parse(getProperty(metadataItem, 'image')); + const control_weight = zControlField.shape.control_weight + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'control_weight')); + const begin_step_percent = zControlField.shape.begin_step_percent + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'begin_step_percent')); + const end_step_percent = zControlField.shape.end_step_percent + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'end_step_percent')); + const control_mode = zControlField.shape.control_mode + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'control_mode')); + const resize_mode = zControlField.shape.resize_mode + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'resize_mode')); + + const processorType = 'none'; + const processorNode = CONTROLNET_PROCESSORS.none.default; + + const controlNet: ControlNetConfigMetadata = { + type: 'controlnet', + isEnabled: true, + model: zModelIdentifierWithBase.parse(controlNetModel), + weight: typeof control_weight === 'number' ? control_weight : initialControlNet.weight, + beginStepPct: begin_step_percent ?? initialControlNet.beginStepPct, + endStepPct: end_step_percent ?? initialControlNet.endStepPct, + controlMode: control_mode ?? initialControlNet.controlMode, + resizeMode: resize_mode ?? initialControlNet.resizeMode, + controlImage: image?.image_name ?? null, + processedControlImage: image?.image_name ?? null, + processorType, + processorNode, + shouldAutoConfig: true, + id: uuidv4(), + }; + + return controlNet; +}; + +const parseAllControlNets: MetadataParseFunc = async (metadata) => { + const controlNetsRaw = await getProperty(metadata, 'controlnets', isArray); + const parseResults = await Promise.allSettled(controlNetsRaw.map((cn) => parseControlNet(cn))); + const controlNets = parseResults + .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') + .map((result) => result.value); + return controlNets; +}; + +const parseT2IAdapter: MetadataParseFunc = async (metadataItem) => { + const t2i_adapter_model = await getProperty(metadataItem, 't2i_adapter_model'); + const key = await getModelKey(t2i_adapter_model, 't2i_adapter'); + const t2iAdapterModel = await fetchModelConfigWithTypeGuard(key, isT2IAdapterModelConfig); + + const image = zT2IAdapterField.shape.image.nullish().catch(null).parse(getProperty(metadataItem, 'image')); + const weight = zT2IAdapterField.shape.weight.nullish().catch(null).parse(getProperty(metadataItem, 'weight')); + const begin_step_percent = zT2IAdapterField.shape.begin_step_percent + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'begin_step_percent')); + const end_step_percent = zT2IAdapterField.shape.end_step_percent + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'end_step_percent')); + const resize_mode = zT2IAdapterField.shape.resize_mode + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'resize_mode')); + + const processorType = 'none'; + const processorNode = CONTROLNET_PROCESSORS.none.default; + + const t2iAdapter: T2IAdapterConfigMetadata = { + type: 't2i_adapter', + isEnabled: true, + model: zModelIdentifierWithBase.parse(t2iAdapterModel), + weight: typeof weight === 'number' ? weight : initialT2IAdapter.weight, + beginStepPct: begin_step_percent ?? initialT2IAdapter.beginStepPct, + endStepPct: end_step_percent ?? initialT2IAdapter.endStepPct, + resizeMode: resize_mode ?? initialT2IAdapter.resizeMode, + controlImage: image?.image_name ?? null, + processedControlImage: image?.image_name ?? null, + processorType, + processorNode, + shouldAutoConfig: true, + id: uuidv4(), + }; + + return t2iAdapter; +}; + +const parseAllT2IAdapters: MetadataParseFunc = async (metadata) => { + const t2iAdaptersRaw = await getProperty(metadata, 't2iAdapters', isArray); + const parseResults = await Promise.allSettled(t2iAdaptersRaw.map((t2iAdapter) => parseT2IAdapter(t2iAdapter))); + const t2iAdapters = parseResults + .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') + .map((result) => result.value); + return t2iAdapters; +}; + +const parseIPAdapter: MetadataParseFunc = async (metadataItem) => { + const ip_adapter_model = await getProperty(metadataItem, 'ip_adapter_model'); + const key = await getModelKey(ip_adapter_model, 'ip_adapter'); + const ipAdapterModel = await fetchModelConfigWithTypeGuard(key, isIPAdapterModelConfig); + + const image = zIPAdapterField.shape.image.nullish().catch(null).parse(getProperty(metadataItem, 'image')); + const weight = zIPAdapterField.shape.weight.nullish().catch(null).parse(getProperty(metadataItem, 'weight')); + const begin_step_percent = zIPAdapterField.shape.begin_step_percent + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'begin_step_percent')); + const end_step_percent = zIPAdapterField.shape.end_step_percent + .nullish() + .catch(null) + .parse(getProperty(metadataItem, 'end_step_percent')); + + const ipAdapter: IPAdapterConfigMetadata = { + id: uuidv4(), + type: 'ip_adapter', + isEnabled: true, + model: zModelIdentifierWithBase.parse(ipAdapterModel), + controlImage: image?.image_name ?? null, + weight: weight ?? initialIPAdapter.weight, + beginStepPct: begin_step_percent ?? initialIPAdapter.beginStepPct, + endStepPct: end_step_percent ?? initialIPAdapter.endStepPct, + }; + + return ipAdapter; +}; + +const parseAllIPAdapters: MetadataParseFunc = async (metadata) => { + const ipAdaptersRaw = await getProperty(metadata, 'ipAdapters', isArray); + const parseResults = await Promise.allSettled(ipAdaptersRaw.map((ipAdapter) => parseIPAdapter(ipAdapter))); + const ipAdapters = parseResults + .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') + .map((result) => result.value); + return ipAdapters; +}; + +export const parsers = { + createdBy: parseCreatedBy, + generationMode: parseGenerationMode, + positivePrompt: parsePositivePrompt, + negativePrompt: parseNegativePrompt, + sdxlPositiveStylePrompt: parseSDXLPositiveStylePrompt, + sdxlNegativeStylePrompt: parseSDXLNegativeStylePrompt, + seed: parseSeed, + cfgScale: parseCFGScale, + cfgRescaleMultiplier: parseCFGRescaleMultiplier, + scheduler: parseScheduler, + width: parseWidth, + height: parseHeight, + steps: parseSteps, + strength: parseStrength, + hrfEnabled: parseHRFEnabled, + hrfStrength: parseHRFStrength, + hrfMethod: parseHRFMethod, + refinerSteps: parseRefinerSteps, + refinerCFGScale: parseRefinerCFGScale, + refinerScheduler: parseRefinerScheduler, + refinerPositiveAestheticScore: parseRefinerPositiveAestheticScore, + refinerNegativeAestheticScore: parseRefinerNegativeAestheticScore, + refinerStart: parseRefinerStart, + mainModel: parseMainModel, + refinerModel: parseRefinerModel, + vaeModel: parseVAEModel, + lora: parseLoRA, + loras: parseAllLoRAs, + controlNet: parseControlNet, + controlNets: parseAllControlNets, + t2iAdapter: parseT2IAdapter, + t2iAdapters: parseAllT2IAdapters, + ipAdapter: parseIPAdapter, + ipAdapters: parseAllIPAdapters, +} as const; diff --git a/invokeai/frontend/web/src/features/metadata/util/recallers.ts b/invokeai/frontend/web/src/features/metadata/util/recallers.ts new file mode 100644 index 0000000000..f6036fe0cf --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/util/recallers.ts @@ -0,0 +1,241 @@ +import { getStore } from 'app/store/nanostores/store'; +import { controlAdapterRecalled } from 'features/controlAdapters/store/controlAdaptersSlice'; +import { setHrfEnabled, setHrfMethod, setHrfStrength } from 'features/hrf/store/hrfSlice'; +import type { LoRA } from 'features/lora/store/loraSlice'; +import { loraRecalled } from 'features/lora/store/loraSlice'; +import type { + ControlNetConfigMetadata, + IPAdapterConfigMetadata, + MetadataRecallFunc, + T2IAdapterConfigMetadata, +} from 'features/metadata/types'; +import { modelSelected } from 'features/parameters/store/actions'; +import { + heightRecalled, + setCfgRescaleMultiplier, + setCfgScale, + setImg2imgStrength, + setNegativePrompt, + setPositivePrompt, + setScheduler, + setSeed, + setSteps, + vaeSelected, + widthRecalled, +} from 'features/parameters/store/generationSlice'; +import type { + ParameterCFGRescaleMultiplier, + ParameterCFGScale, + ParameterHeight, + ParameterHRFEnabled, + ParameterHRFMethod, + ParameterModel, + ParameterNegativePrompt, + ParameterNegativeStylePromptSDXL, + ParameterPositivePrompt, + ParameterPositiveStylePromptSDXL, + ParameterScheduler, + ParameterSDXLRefinerModel, + ParameterSDXLRefinerNegativeAestheticScore, + ParameterSDXLRefinerPositiveAestheticScore, + ParameterSDXLRefinerStart, + ParameterSeed, + ParameterSteps, + ParameterStrength, + ParameterVAEModel, + ParameterWidth, +} from 'features/parameters/types/parameterSchemas'; +import { + refinerModelChanged, + setNegativeStylePromptSDXL, + setPositiveStylePromptSDXL, + setRefinerCFGScale, + setRefinerNegativeAestheticScore, + setRefinerPositiveAestheticScore, + setRefinerScheduler, + setRefinerStart, + setRefinerSteps, +} from 'features/sdxl/store/sdxlSlice'; + +const recallPositivePrompt: MetadataRecallFunc = (positivePrompt) => { + getStore().dispatch(setPositivePrompt(positivePrompt)); +}; + +const recallNegativePrompt: MetadataRecallFunc = (negativePrompt) => { + getStore().dispatch(setNegativePrompt(negativePrompt)); +}; + +const recallSDXLPositiveStylePrompt: MetadataRecallFunc = (positiveStylePrompt) => { + getStore().dispatch(setPositiveStylePromptSDXL(positiveStylePrompt)); +}; + +const recallSDXLNegativeStylePrompt: MetadataRecallFunc = (negativeStylePrompt) => { + getStore().dispatch(setNegativeStylePromptSDXL(negativeStylePrompt)); +}; + +const recallSeed: MetadataRecallFunc = (seed) => { + getStore().dispatch(setSeed(seed)); +}; + +const recallCFGScale: MetadataRecallFunc = (cfgScale) => { + getStore().dispatch(setCfgScale(cfgScale)); +}; + +const recallCFGRescaleMultiplier: MetadataRecallFunc = (cfgRescaleMultiplier) => { + getStore().dispatch(setCfgRescaleMultiplier(cfgRescaleMultiplier)); +}; + +const recallScheduler: MetadataRecallFunc = (scheduler) => { + getStore().dispatch(setScheduler(scheduler)); +}; + +const recallWidth: MetadataRecallFunc = (width) => { + getStore().dispatch(widthRecalled(width)); +}; + +const recallHeight: MetadataRecallFunc = (height) => { + getStore().dispatch(heightRecalled(height)); +}; + +const recallSteps: MetadataRecallFunc = (steps) => { + getStore().dispatch(setSteps(steps)); +}; + +const recallStrength: MetadataRecallFunc = (strength) => { + getStore().dispatch(setImg2imgStrength(strength)); +}; + +const recallHRFEnabled: MetadataRecallFunc = (hrfEnabled) => { + getStore().dispatch(setHrfEnabled(hrfEnabled)); +}; + +const recallHRFStrength: MetadataRecallFunc = (hrfStrength) => { + getStore().dispatch(setHrfStrength(hrfStrength)); +}; + +const recallHRFMethod: MetadataRecallFunc = (hrfMethod) => { + getStore().dispatch(setHrfMethod(hrfMethod)); +}; + +const recallRefinerSteps: MetadataRecallFunc = (refinerSteps) => { + getStore().dispatch(setRefinerSteps(refinerSteps)); +}; + +const recallRefinerCFGScale: MetadataRecallFunc = (refinerCFGScale) => { + getStore().dispatch(setRefinerCFGScale(refinerCFGScale)); +}; + +const recallRefinerScheduler: MetadataRecallFunc = (refinerScheduler) => { + getStore().dispatch(setRefinerScheduler(refinerScheduler)); +}; + +const recallRefinerPositiveAestheticScore: MetadataRecallFunc = ( + refinerPositiveAestheticScore +) => { + getStore().dispatch(setRefinerPositiveAestheticScore(refinerPositiveAestheticScore)); +}; + +const recallRefinerNegativeAestheticScore: MetadataRecallFunc = ( + refinerNegativeAestheticScore +) => { + getStore().dispatch(setRefinerNegativeAestheticScore(refinerNegativeAestheticScore)); +}; + +const recallRefinerStart: MetadataRecallFunc = (refinerStart) => { + getStore().dispatch(setRefinerStart(refinerStart)); +}; + +const recallModel: MetadataRecallFunc = (model) => { + getStore().dispatch(modelSelected(model)); +}; + +const recallRefinerModel: MetadataRecallFunc = (refinerModel) => { + getStore().dispatch(refinerModelChanged(refinerModel)); +}; + +const recallVAE: MetadataRecallFunc = (vaeModel) => { + if (!vaeModel) { + getStore().dispatch(vaeSelected(null)); + return; + } + getStore().dispatch(vaeSelected(vaeModel)); +}; + +const recallLoRA: MetadataRecallFunc = (lora) => { + getStore().dispatch(loraRecalled(lora)); +}; + +const recallAllLoRAs: MetadataRecallFunc = (loras) => { + const { dispatch } = getStore(); + loras.forEach((lora) => { + dispatch(loraRecalled(lora)); + }); +}; + +const recallControlNet: MetadataRecallFunc = (controlNet) => { + getStore().dispatch(controlAdapterRecalled(controlNet)); +}; + +const recallControlNets: MetadataRecallFunc = (controlNets) => { + const { dispatch } = getStore(); + controlNets.forEach((controlNet) => { + dispatch(controlAdapterRecalled(controlNet)); + }); +}; + +const recallT2IAdapter: MetadataRecallFunc = (t2iAdapter) => { + getStore().dispatch(controlAdapterRecalled(t2iAdapter)); +}; + +const recallT2IAdapters: MetadataRecallFunc = (t2iAdapters) => { + const { dispatch } = getStore(); + t2iAdapters.forEach((t2iAdapter) => { + dispatch(controlAdapterRecalled(t2iAdapter)); + }); +}; + +const recallIPAdapter: MetadataRecallFunc = (ipAdapter) => { + getStore().dispatch(controlAdapterRecalled(ipAdapter)); +}; + +const recallIPAdapters: MetadataRecallFunc = (ipAdapters) => { + const { dispatch } = getStore(); + ipAdapters.forEach((ipAdapter) => { + dispatch(controlAdapterRecalled(ipAdapter)); + }); +}; + +export const recallers = { + positivePrompt: recallPositivePrompt, + negativePrompt: recallNegativePrompt, + sdxlPositiveStylePrompt: recallSDXLPositiveStylePrompt, + sdxlNegativeStylePrompt: recallSDXLNegativeStylePrompt, + seed: recallSeed, + cfgScale: recallCFGScale, + cfgRescaleMultiplier: recallCFGRescaleMultiplier, + scheduler: recallScheduler, + width: recallWidth, + height: recallHeight, + steps: recallSteps, + strength: recallStrength, + hrfEnabled: recallHRFEnabled, + hrfStrength: recallHRFStrength, + hrfMethod: recallHRFMethod, + refinerSteps: recallRefinerSteps, + refinerCFGScale: recallRefinerCFGScale, + refinerScheduler: recallRefinerScheduler, + refinerPositiveAestheticScore: recallRefinerPositiveAestheticScore, + refinerNegativeAestheticScore: recallRefinerNegativeAestheticScore, + refinerStart: recallRefinerStart, + model: recallModel, + refinerModel: recallRefinerModel, + vae: recallVAE, + lora: recallLoRA, + loras: recallAllLoRAs, + controlNets: recallControlNets, + controlNet: recallControlNet, + t2iAdapters: recallT2IAdapters, + t2iAdapter: recallT2IAdapter, + ipAdapters: recallIPAdapters, + ipAdapter: recallIPAdapter, +} as const; diff --git a/invokeai/frontend/web/src/features/metadata/util/validators.ts b/invokeai/frontend/web/src/features/metadata/util/validators.ts new file mode 100644 index 0000000000..66454778f2 --- /dev/null +++ b/invokeai/frontend/web/src/features/metadata/util/validators.ts @@ -0,0 +1,122 @@ +import { getStore } from 'app/store/nanostores/store'; +import type { LoRA } from 'features/lora/store/loraSlice'; +import type { + ControlNetConfigMetadata, + IPAdapterConfigMetadata, + MetadataValidateFunc, + T2IAdapterConfigMetadata, +} from 'features/metadata/types'; +import { InvalidModelConfigError } from 'features/metadata/util/modelFetchingHelpers'; +import type { ParameterSDXLRefinerModel, ParameterVAEModel } from 'features/parameters/types/parameterSchemas'; +import type { BaseModelType } from 'services/api/types'; + +/** + * Checks the given base model type against the currently-selected model's base type and throws an error if they are + * incompatible. + * @param base The base model type to validate. + * @param message An optional message to use in the error if the base model is incompatible. + */ +const validateBaseCompatibility = (base?: BaseModelType, message?: string) => { + if (!base) { + throw new InvalidModelConfigError(message || 'Missing base'); + } + const currentBase = getStore().getState().generation.model?.base; + if (currentBase && base !== currentBase) { + throw new InvalidModelConfigError(message || `Incompatible base models: ${base} and ${currentBase}`); + } +}; + +const validateRefinerModel: MetadataValidateFunc = (refinerModel) => { + validateBaseCompatibility('sdxl', 'Refiner incompatible with currently-selected model'); + return new Promise((resolve) => resolve(refinerModel)); +}; + +const validateVAEModel: MetadataValidateFunc = (vaeModel) => { + validateBaseCompatibility(vaeModel.base, 'VAE incompatible with currently-selected model'); + return new Promise((resolve) => resolve(vaeModel)); +}; + +const validateLoRA: MetadataValidateFunc = (lora) => { + validateBaseCompatibility(lora.model.base, 'LoRA incompatible with currently-selected model'); + return new Promise((resolve) => resolve(lora)); +}; + +const validateLoRAs: MetadataValidateFunc = (loras) => { + const validatedLoRAs: LoRA[] = []; + loras.forEach((lora) => { + try { + validateBaseCompatibility(lora.model.base, 'LoRA incompatible with currently-selected model'); + validatedLoRAs.push(lora); + } catch { + // This is a no-op - we want to continue validating the rest of the LoRAs, and an empty list is valid. + } + }); + return new Promise((resolve) => resolve(validatedLoRAs)); +}; + +const validateControlNet: MetadataValidateFunc = (controlNet) => { + validateBaseCompatibility(controlNet.model?.base, 'ControlNet incompatible with currently-selected model'); + return new Promise((resolve) => resolve(controlNet)); +}; + +const validateControlNets: MetadataValidateFunc = (controlNets) => { + const validatedControlNets: ControlNetConfigMetadata[] = []; + controlNets.forEach((controlNet) => { + try { + validateBaseCompatibility(controlNet.model?.base, 'ControlNet incompatible with currently-selected model'); + validatedControlNets.push(controlNet); + } catch { + // This is a no-op - we want to continue validating the rest of the ControlNets, and an empty list is valid. + } + }); + return new Promise((resolve) => resolve(validatedControlNets)); +}; + +const validateT2IAdapter: MetadataValidateFunc = (t2iAdapter) => { + validateBaseCompatibility(t2iAdapter.model?.base, 'T2I Adapter incompatible with currently-selected model'); + return new Promise((resolve) => resolve(t2iAdapter)); +}; + +const validateT2IAdapters: MetadataValidateFunc = (t2iAdapters) => { + const validatedT2IAdapters: T2IAdapterConfigMetadata[] = []; + t2iAdapters.forEach((t2iAdapter) => { + try { + validateBaseCompatibility(t2iAdapter.model?.base, 'T2I Adapter incompatible with currently-selected model'); + validatedT2IAdapters.push(t2iAdapter); + } catch { + // This is a no-op - we want to continue validating the rest of the T2I Adapters, and an empty list is valid. + } + }); + return new Promise((resolve) => resolve(validatedT2IAdapters)); +}; + +const validateIPAdapter: MetadataValidateFunc = (ipAdapter) => { + validateBaseCompatibility(ipAdapter.model?.base, 'IP Adapter incompatible with currently-selected model'); + return new Promise((resolve) => resolve(ipAdapter)); +}; + +const validateIPAdapters: MetadataValidateFunc = (ipAdapters) => { + const validatedIPAdapters: IPAdapterConfigMetadata[] = []; + ipAdapters.forEach((ipAdapter) => { + try { + validateBaseCompatibility(ipAdapter.model?.base, 'IP Adapter incompatible with currently-selected model'); + validatedIPAdapters.push(ipAdapter); + } catch { + // This is a no-op - we want to continue validating the rest of the IP Adapters, and an empty list is valid. + } + }); + return new Promise((resolve) => resolve(validatedIPAdapters)); +}; + +export const validators = { + refinerModel: validateRefinerModel, + vaeModel: validateVAEModel, + lora: validateLoRA, + loras: validateLoRAs, + controlNet: validateControlNet, + controlNets: validateControlNets, + t2iAdapter: validateT2IAdapter, + t2iAdapters: validateT2IAdapters, + ipAdapter: validateIPAdapter, + ipAdapters: validateIPAdapters, +} as const; diff --git a/invokeai/frontend/web/src/features/modelManager/components/SyncModels/SyncModelsButton.tsx b/invokeai/frontend/web/src/features/modelManager/components/SyncModels/SyncModelsButton.tsx deleted file mode 100644 index 8a49bc2585..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/components/SyncModels/SyncModelsButton.tsx +++ /dev/null @@ -1,32 +0,0 @@ -import type { ButtonProps } from '@invoke-ai/ui-library'; -import { Button } from '@invoke-ai/ui-library'; -import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiArrowsClockwiseBold } from 'react-icons/pi'; - -import { useSyncModels } from './useSyncModels'; - -export const SyncModelsButton = memo((props: Omit) => { - const { t } = useTranslation(); - const { syncModels, isLoading } = useSyncModels(); - const isSyncModelEnabled = useFeatureStatus('syncModels').isFeatureEnabled; - - if (!isSyncModelEnabled) { - return null; - } - - return ( - - ); -}); - -SyncModelsButton.displayName = 'SyncModelsButton'; diff --git a/invokeai/frontend/web/src/features/modelManager/store/modelManagerSlice.ts b/invokeai/frontend/web/src/features/modelManager/store/modelManagerSlice.ts deleted file mode 100644 index c450e64b3c..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/store/modelManagerSlice.ts +++ /dev/null @@ -1,47 +0,0 @@ -import type { PayloadAction } from '@reduxjs/toolkit'; -import { createSlice } from '@reduxjs/toolkit'; -import type { PersistConfig, RootState } from 'app/store/store'; - -type ModelManagerState = { - _version: 1; - searchFolder: string | null; - advancedAddScanModel: string | null; -}; - -export const initialModelManagerState: ModelManagerState = { - _version: 1, - searchFolder: null, - advancedAddScanModel: null, -}; - -export const modelManagerSlice = createSlice({ - name: 'modelmanager', - initialState: initialModelManagerState, - reducers: { - setSearchFolder: (state, action: PayloadAction) => { - state.searchFolder = action.payload; - }, - setAdvancedAddScanModel: (state, action: PayloadAction) => { - state.advancedAddScanModel = action.payload; - }, - }, -}); - -export const { setSearchFolder, setAdvancedAddScanModel } = modelManagerSlice.actions; - -export const selectModelManagerSlice = (state: RootState) => state.modelmanager; - -/* eslint-disable-next-line @typescript-eslint/no-explicit-any */ -export const migrateModelManagerState = (state: any): any => { - if (!('_version' in state)) { - state._version = 1; - } - return state; -}; - -export const modelManagerPersistConfig: PersistConfig = { - name: modelManagerSlice.name, - initialState: initialModelManagerState, - migrate: migrateModelManagerState, - persistDenylist: [], -}; diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AddModels.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AddModels.tsx deleted file mode 100644 index cb50334c99..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AddModels.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import { Button, ButtonGroup, Flex } from '@invoke-ai/ui-library'; -import { memo, useCallback, useState } from 'react'; -import { useTranslation } from 'react-i18next'; - -import AdvancedAddModels from './AdvancedAddModels'; -import SimpleAddModels from './SimpleAddModels'; - -const AddModels = () => { - const { t } = useTranslation(); - const [addModelMode, setAddModelMode] = useState<'simple' | 'advanced'>('simple'); - const handleAddModelSimple = useCallback(() => setAddModelMode('simple'), []); - const handleAddModelAdvanced = useCallback(() => setAddModelMode('advanced'), []); - return ( - - - - - - - {addModelMode === 'simple' && } - {addModelMode === 'advanced' && } - - - ); -}; - -export default memo(AddModels); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddCheckpoint.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddCheckpoint.tsx deleted file mode 100644 index 31f4312eb5..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddCheckpoint.tsx +++ /dev/null @@ -1,168 +0,0 @@ -import { Button, Checkbox, Flex, FormControl, FormErrorMessage, FormLabel, Input } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { setAdvancedAddScanModel } from 'features/modelManager/store/modelManagerSlice'; -import BaseModelSelect from 'features/modelManager/subpanels/shared/BaseModelSelect'; -import CheckpointConfigsSelect from 'features/modelManager/subpanels/shared/CheckpointConfigsSelect'; -import ModelVariantSelect from 'features/modelManager/subpanels/shared/ModelVariantSelect'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import type { CSSProperties, FocusEventHandler } from 'react'; -import { memo, useCallback, useState } from 'react'; -import type { SubmitHandler } from 'react-hook-form'; -import { useForm } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; -import { useAddMainModelsMutation } from 'services/api/endpoints/models'; -import type { CheckpointModelConfig } from 'services/api/types'; - -import { getModelName } from './util'; - -type AdvancedAddCheckpointProps = { - model_path?: string; -}; - -const AdvancedAddCheckpoint = (props: AdvancedAddCheckpointProps) => { - const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const { model_path } = props; - - const { - register, - handleSubmit, - control, - getValues, - setValue, - formState: { errors }, - reset, - } = useForm({ - defaultValues: { - model_name: model_path ? getModelName(model_path) : '', - base_model: 'sd-1', - model_type: 'main', - path: model_path ? model_path : '', - description: '', - model_format: 'checkpoint', - error: undefined, - vae: '', - variant: 'normal', - config: 'configs\\stable-diffusion\\v1-inference.yaml', - }, - mode: 'onChange', - }); - - const [addMainModel] = useAddMainModelsMutation(); - - const [useCustomConfig, setUseCustomConfig] = useState(false); - - const onSubmit = useCallback>( - (values) => { - addMainModel({ - body: values, - }) - .unwrap() - .then((_) => { - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelAdded', { - modelName: values.model_name, - }), - status: 'success', - }) - ) - ); - reset(); - - // Close Advanced Panel in Scan Models tab - if (model_path) { - dispatch(setAdvancedAddScanModel(null)); - } - }) - .catch((error) => { - if (error) { - dispatch( - addToast( - makeToast({ - title: t('toast.modelAddFailed'), - status: 'error', - }) - ) - ); - } - }); - }, - [addMainModel, dispatch, model_path, reset, t] - ); - - const onBlur: FocusEventHandler = useCallback( - (e) => { - if (getValues().model_name === '') { - const modelName = getModelName(e.currentTarget.value); - if (modelName) { - setValue('model_name', modelName as string); - } - } - }, - [getValues, setValue] - ); - - const handleChangeUseCustomConfig = useCallback(() => setUseCustomConfig((prev) => !prev), []); - - return ( -

- - - {t('modelManager.model')} - value.trim().length > 3 || 'Must be at least 3 characters', - })} - /> - {errors.model_name?.message && {errors.model_name?.message}} - - control={control} name="base_model" /> - - {t('modelManager.modelLocation')} - value.trim().length > 0 || 'Must provide a path', - onBlur, - })} - /> - {errors.path?.message && {errors.path?.message}} - - - {t('modelManager.description')} - - - - {t('modelManager.vaeLocation')} - - - control={control} name="variant" /> - - {!useCustomConfig ? ( - - ) : ( - - {t('modelManager.config')} - - - )} - - {t('modelManager.useCustomConfig')} - - - - - -
- ); -}; - -const formStyles: CSSProperties = { - width: '100%', -}; - -export default memo(AdvancedAddCheckpoint); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddDiffusers.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddDiffusers.tsx deleted file mode 100644 index 07b28d35fd..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddDiffusers.tsx +++ /dev/null @@ -1,148 +0,0 @@ -import { Button, Flex, FormControl, FormErrorMessage, FormLabel, Input } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { setAdvancedAddScanModel } from 'features/modelManager/store/modelManagerSlice'; -import BaseModelSelect from 'features/modelManager/subpanels/shared/BaseModelSelect'; -import ModelVariantSelect from 'features/modelManager/subpanels/shared/ModelVariantSelect'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import type { CSSProperties, FocusEventHandler } from 'react'; -import { memo, useCallback } from 'react'; -import type { SubmitHandler } from 'react-hook-form'; -import { useForm } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; -import { useAddMainModelsMutation } from 'services/api/endpoints/models'; -import type { DiffusersModelConfig } from 'services/api/types'; - -import { getModelName } from './util'; - -type AdvancedAddDiffusersProps = { - model_path?: string; -}; - -const AdvancedAddDiffusers = (props: AdvancedAddDiffusersProps) => { - const { t } = useTranslation(); - const dispatch = useAppDispatch(); - const { model_path } = props; - - const [addMainModel] = useAddMainModelsMutation(); - - const { - register, - handleSubmit, - control, - getValues, - setValue, - formState: { errors }, - reset, - } = useForm({ - defaultValues: { - model_name: model_path ? getModelName(model_path, false) : '', - base_model: 'sd-1', - model_type: 'main', - path: model_path ? model_path : '', - description: '', - model_format: 'diffusers', - error: undefined, - vae: '', - variant: 'normal', - }, - mode: 'onChange', - }); - - const onSubmit = useCallback>( - (values) => { - addMainModel({ - body: values, - }) - .unwrap() - .then((_) => { - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelAdded', { - modelName: values.model_name, - }), - status: 'success', - }) - ) - ); - reset(); - // Close Advanced Panel in Scan Models tab - if (model_path) { - dispatch(setAdvancedAddScanModel(null)); - } - }) - .catch((error) => { - if (error) { - dispatch( - addToast( - makeToast({ - title: t('toast.modelAddFailed'), - status: 'error', - }) - ) - ); - } - }); - }, - [addMainModel, dispatch, model_path, reset, t] - ); - - const onBlur: FocusEventHandler = useCallback( - (e) => { - if (getValues().model_name === '') { - const modelName = getModelName(e.currentTarget.value, false); - if (modelName) { - setValue('model_name', modelName as string); - } - } - }, - [getValues, setValue] - ); - - return ( -
- - - {t('modelManager.name')} - value.trim().length > 3 || 'Must be at least 3 characters', - })} - /> - {errors.model_name?.message && {errors.model_name?.message}} - - control={control} name="base_model" /> - - {t('modelManager.modelLocation')} - value.trim().length > 0 || 'Must provide a path', - onBlur, - })} - /> - {errors.path?.message && {errors.path?.message}} - - - {t('modelManager.description')} - - - - {t('modelManager.vaeLocation')} - - - control={control} name="variant" /> - - - -
- ); -}; - -const formStyles: CSSProperties = { - width: '100%', -}; - -export default memo(AdvancedAddDiffusers); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddModels.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddModels.tsx deleted file mode 100644 index fc03c6bb1f..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/AdvancedAddModels.tsx +++ /dev/null @@ -1,50 +0,0 @@ -import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; -import { Combobox, Flex, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { memo, useCallback, useMemo, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { z } from 'zod'; - -import AdvancedAddCheckpoint from './AdvancedAddCheckpoint'; -import AdvancedAddDiffusers from './AdvancedAddDiffusers'; - -export const zManualAddMode = z.enum(['diffusers', 'checkpoint']); -export type ManualAddMode = z.infer; -export const isManualAddMode = (v: unknown): v is ManualAddMode => zManualAddMode.safeParse(v).success; - -const AdvancedAddModels = () => { - const [advancedAddMode, setAdvancedAddMode] = useState('diffusers'); - - const { t } = useTranslation(); - const handleChange: ComboboxOnChange = useCallback((v) => { - if (!isManualAddMode(v?.value)) { - return; - } - setAdvancedAddMode(v.value); - }, []); - - const options: ComboboxOption[] = useMemo( - () => [ - { label: t('modelManager.diffusersModels'), value: 'diffusers' }, - { label: t('modelManager.checkpointOrSafetensors'), value: 'checkpoint' }, - ], - [t] - ); - - const value = useMemo(() => options.find((o) => o.value === advancedAddMode), [options, advancedAddMode]); - - return ( - - - {t('modelManager.modelType')} - - - - - {advancedAddMode === 'diffusers' && } - {advancedAddMode === 'checkpoint' && } - - - ); -}; - -export default memo(AdvancedAddModels); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/FoundModelsList.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/FoundModelsList.tsx deleted file mode 100644 index 206a955e29..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/FoundModelsList.tsx +++ /dev/null @@ -1,176 +0,0 @@ -import { Button, Flex, FormControl, FormLabel, Input, Text } from '@invoke-ai/ui-library'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; -import { setAdvancedAddScanModel } from 'features/modelManager/store/modelManagerSlice'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import { difference, forEach, intersection, map, values } from 'lodash-es'; -import type { ChangeEvent, MouseEvent } from 'react'; -import { memo, useCallback, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { ALL_BASE_MODELS } from 'services/api/constants'; -import type { SearchFolderResponse } from 'services/api/endpoints/models'; -import { - useGetMainModelsQuery, - useGetModelsInFolderQuery, - useImportMainModelsMutation, -} from 'services/api/endpoints/models'; - -const FoundModelsList = () => { - const searchFolder = useAppSelector((s) => s.modelmanager.searchFolder); - const [nameFilter, setNameFilter] = useState(''); - - // Get paths of models that are already installed - const { data: installedModels } = useGetMainModelsQuery(ALL_BASE_MODELS); - - // Get all model paths from a given directory - const { foundModels, alreadyInstalled, filteredModels } = useGetModelsInFolderQuery( - { - search_path: searchFolder ? searchFolder : '', - }, - { - selectFromResult: ({ data }) => { - const installedModelValues = values(installedModels?.entities); - const installedModelPaths = map(installedModelValues, 'path'); - // Only select models those that aren't already installed to Invoke - const notInstalledModels = difference(data, installedModelPaths); - const alreadyInstalled = intersection(data, installedModelPaths); - return { - foundModels: data, - alreadyInstalled: foundModelsFilter(alreadyInstalled, nameFilter), - filteredModels: foundModelsFilter(notInstalledModels, nameFilter), - }; - }, - } - ); - - const [importMainModel, { isLoading }] = useImportMainModelsMutation(); - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const quickAddHandler = useCallback( - (e: MouseEvent) => { - const model_name = e.currentTarget.id.split('\\').splice(-1)[0]; - importMainModel({ - body: { - location: e.currentTarget.id, - }, - }) - .unwrap() - .then((_) => { - dispatch( - addToast( - makeToast({ - title: `Added Model: ${model_name}`, - status: 'success', - }) - ) - ); - }) - .catch((error) => { - if (error) { - dispatch( - addToast( - makeToast({ - title: t('toast.modelAddFailed'), - status: 'error', - }) - ) - ); - } - }); - }, - [dispatch, importMainModel, t] - ); - - const handleSearchFilter = useCallback((e: ChangeEvent) => { - setNameFilter(e.target.value); - }, []); - - const handleClickSetAdvanced = useCallback((model: string) => dispatch(setAdvancedAddScanModel(model)), [dispatch]); - - const renderModels = ({ models, showActions = true }: { models: string[]; showActions?: boolean }) => { - return models.map((model) => { - return ( - - - {model.split('\\').slice(-1)[0]} - - {model} - - - {showActions ? ( - - - - - ) : ( - - {t('common.installed')} - - )} - - ); - }); - }; - - const renderFoundModels = () => { - if (!searchFolder) { - return null; - } - - if (!foundModels || foundModels.length === 0) { - return ( - - {t('modelManager.noModels')} - - ); - } - - return ( - - - {t('modelManager.search')} - - - - - {t('modelManager.modelsFound')}: {foundModels.length} - - - {t('common.notInstalled')}: {filteredModels.length} - - - - - - {renderModels({ models: filteredModels })} - {renderModels({ models: alreadyInstalled, showActions: false })} - - - - ); - }; - - return renderFoundModels(); -}; - -const foundModelsFilter = (data: SearchFolderResponse | undefined, nameFilter: string) => { - const filteredModels: SearchFolderResponse = []; - forEach(data, (model) => { - if (!model) { - return null; - } - - if (model.includes(nameFilter)) { - filteredModels.push(model); - } - }); - return filteredModels; -}; - -export default memo(FoundModelsList); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/ScanAdvancedAddModels.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/ScanAdvancedAddModels.tsx deleted file mode 100644 index 445eb7ed5a..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/ScanAdvancedAddModels.tsx +++ /dev/null @@ -1,95 +0,0 @@ -import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; -import { Box, Combobox, Flex, FormControl, FormLabel, IconButton, Text } from '@invoke-ai/ui-library'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { setAdvancedAddScanModel } from 'features/modelManager/store/modelManagerSlice'; -import { memo, useCallback, useEffect, useMemo, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiXBold } from 'react-icons/pi'; - -import AdvancedAddCheckpoint from './AdvancedAddCheckpoint'; -import AdvancedAddDiffusers from './AdvancedAddDiffusers'; -import type { ManualAddMode } from './AdvancedAddModels'; -import { isManualAddMode } from './AdvancedAddModels'; - -const ScanAdvancedAddModels = () => { - const advancedAddScanModel = useAppSelector((s) => s.modelmanager.advancedAddScanModel); - - const { t } = useTranslation(); - - const options: ComboboxOption[] = useMemo( - () => [ - { label: t('modelManager.diffusersModels'), value: 'diffusers' }, - { label: t('modelManager.checkpointOrSafetensors'), value: 'checkpoint' }, - ], - [t] - ); - - const [advancedAddMode, setAdvancedAddMode] = useState('diffusers'); - - const [isCheckpoint, setIsCheckpoint] = useState(true); - - useEffect(() => { - advancedAddScanModel && ['.ckpt', '.safetensors', '.pth', '.pt'].some((ext) => advancedAddScanModel.endsWith(ext)) - ? setAdvancedAddMode('checkpoint') - : setAdvancedAddMode('diffusers'); - }, [advancedAddScanModel, setAdvancedAddMode, isCheckpoint]); - - const dispatch = useAppDispatch(); - - const handleClickSetAdvanced = useCallback(() => dispatch(setAdvancedAddScanModel(null)), [dispatch]); - - const handleChangeAddMode = useCallback((v) => { - if (!isManualAddMode(v?.value)) { - return; - } - setAdvancedAddMode(v.value); - if (v.value === 'checkpoint') { - setIsCheckpoint(true); - } else { - setIsCheckpoint(false); - } - }, []); - - const value = useMemo(() => options.find((o) => o.value === advancedAddMode), [options, advancedAddMode]); - - if (!advancedAddScanModel) { - return null; - } - - return ( - - - - {isCheckpoint || advancedAddMode === 'checkpoint' ? 'Add Checkpoint Model' : 'Add Diffusers Model'} - - } - aria-label={t('modelManager.closeAdvanced')} - onClick={handleClickSetAdvanced} - size="sm" - /> - - - {t('modelManager.modelType')} - - - {isCheckpoint ? ( - - ) : ( - - )} - - ); -}; - -export default memo(ScanAdvancedAddModels); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/ScanModels.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/ScanModels.tsx deleted file mode 100644 index f3287270a2..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/ScanModels.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import { Flex } from '@invoke-ai/ui-library'; -import { memo } from 'react'; - -import FoundModelsList from './FoundModelsList'; -import ScanAdvancedAddModels from './ScanAdvancedAddModels'; -import SearchFolderForm from './SearchFolderForm'; - -const ScanModels = () => { - return ( - - - - - - - - - - ); -}; - -export default memo(ScanModels); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/SearchFolderForm.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/SearchFolderForm.tsx deleted file mode 100644 index dc1d39f7aa..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/SearchFolderForm.tsx +++ /dev/null @@ -1,103 +0,0 @@ -import { Flex, IconButton, Input, Text } from '@invoke-ai/ui-library'; -import { useForm } from '@mantine/form'; -import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; -import { setAdvancedAddScanModel, setSearchFolder } from 'features/modelManager/store/modelManagerSlice'; -import type { CSSProperties } from 'react'; -import { memo, useCallback } from 'react'; -import { useTranslation } from 'react-i18next'; -import { PiArrowsCounterClockwiseBold, PiMagnifyingGlassBold, PiTrashSimpleBold } from 'react-icons/pi'; -import { useGetModelsInFolderQuery } from 'services/api/endpoints/models'; - -type SearchFolderForm = { - folder: string; -}; - -function SearchFolderForm() { - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const searchFolder = useAppSelector((s) => s.modelmanager.searchFolder); - - const { refetch: refetchFoundModels } = useGetModelsInFolderQuery({ - search_path: searchFolder ? searchFolder : '', - }); - - const searchFolderForm = useForm({ - initialValues: { - folder: '', - }, - }); - - const searchFolderFormSubmitHandler = useCallback( - (values: SearchFolderForm) => { - dispatch(setSearchFolder(values.folder)); - }, - [dispatch] - ); - - const scanAgainHandler = useCallback(() => { - refetchFoundModels(); - }, [refetchFoundModels]); - - const handleClickClearCheckpointFolder = useCallback(() => { - dispatch(setSearchFolder(null)); - dispatch(setAdvancedAddScanModel(null)); - }, [dispatch]); - - return ( -
searchFolderFormSubmitHandler(values))} style={formStyles}> - - - - {t('common.folder')} - - {!searchFolder ? ( - - ) : ( - - {searchFolder} - - )} - - - - {!searchFolder ? ( - } - fontSize={18} - size="sm" - type="submit" - /> - ) : ( - } - onClick={scanAgainHandler} - fontSize={18} - size="sm" - /> - )} - - } - size="sm" - onClick={handleClickClearCheckpointFolder} - isDisabled={!searchFolder} - colorScheme="red" - /> - - -
- ); -} - -export default memo(SearchFolderForm); - -const formStyles: CSSProperties = { - width: '100%', -}; diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/SimpleAddModels.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/SimpleAddModels.tsx deleted file mode 100644 index d7f705aedc..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/SimpleAddModels.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import type { ComboboxOption } from '@invoke-ai/ui-library'; -import { Button, Combobox, Flex, FormControl, FormLabel, Input } from '@invoke-ai/ui-library'; -import { useForm } from '@mantine/form'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import type { CSSProperties } from 'react'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; -import { useImportMainModelsMutation } from 'services/api/endpoints/models'; - -const options: ComboboxOption[] = [ - { label: 'None', value: 'none' }, - { label: 'v_prediction', value: 'v_prediction' }, - { label: 'epsilon', value: 'epsilon' }, - { label: 'sample', value: 'sample' }, -]; - -type ExtendedImportModelConfig = { - location: string; - prediction_type?: 'v_prediction' | 'epsilon' | 'sample' | 'none' | undefined; -}; - -const SimpleAddModels = () => { - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const [importMainModel, { isLoading }] = useImportMainModelsMutation(); - - const addModelForm = useForm({ - initialValues: { - location: '', - prediction_type: undefined, - }, - }); - - const handleAddModelSubmit = (values: ExtendedImportModelConfig) => { - const importModelResponseBody = { - location: values.location, - prediction_type: values.prediction_type === 'none' ? undefined : values.prediction_type, - }; - - importMainModel({ body: importModelResponseBody }) - .unwrap() - .then((_) => { - dispatch( - addToast( - makeToast({ - title: t('toast.modelAddedSimple'), - status: 'success', - }) - ) - ); - addModelForm.reset(); - }) - .catch((error) => { - if (error) { - dispatch( - addToast( - makeToast({ - title: `${error.data.detail} `, - status: 'error', - }) - ) - ); - } - }); - }; - - return ( -
handleAddModelSubmit(v))} style={formStyles}> - - - {t('modelManager.modelLocation')} - - - - {t('modelManager.predictionType')} - - - - -
- ); -}; - -const formStyles: CSSProperties = { - width: '100%', -}; - -export default memo(SimpleAddModels); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/util.ts b/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/util.ts deleted file mode 100644 index 7314a86908..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/AddModelsPanel/util.ts +++ /dev/null @@ -1,15 +0,0 @@ -export function getModelName(filepath: string, isCheckpoint: boolean = true) { - let regex; - if (isCheckpoint) { - regex = new RegExp('[^\\\\/]+(?=\\.)'); - } else { - regex = new RegExp('[^\\\\/]+(?=[\\\\/]?$)'); - } - - const match = filepath.match(regex); - if (match) { - return match[0]; - } else { - return ''; - } -} diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ImportModelsPanel.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ImportModelsPanel.tsx deleted file mode 100644 index 18fcef9614..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ImportModelsPanel.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { Button, ButtonGroup, Flex } from '@invoke-ai/ui-library'; -import { memo, useCallback, useState } from 'react'; -import { useTranslation } from 'react-i18next'; - -import AddModels from './AddModelsPanel/AddModels'; -import ScanModels from './AddModelsPanel/ScanModels'; - -type AddModelTabs = 'add' | 'scan'; - -const ImportModelsPanel = () => { - const [addModelTab, setAddModelTab] = useState('add'); - const { t } = useTranslation(); - - const handleClickAddTab = useCallback(() => setAddModelTab('add'), []); - const handleClickScanTab = useCallback(() => setAddModelTab('scan'), []); - - return ( - - - - - - - {addModelTab === 'add' && } - {addModelTab === 'scan' && } - - ); -}; - -export default memo(ImportModelsPanel); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/MergeModelsPanel.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/MergeModelsPanel.tsx deleted file mode 100644 index fe90db5b8a..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/MergeModelsPanel.tsx +++ /dev/null @@ -1,352 +0,0 @@ -import type { ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; -import { - Button, - Checkbox, - Combobox, - CompositeNumberInput, - CompositeSlider, - Flex, - FormControl, - FormHelperText, - FormLabel, - Input, - Radio, - RadioGroup, - Text, - Tooltip, -} from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import { pickBy } from 'lodash-es'; -import type { ChangeEvent } from 'react'; -import { memo, useCallback, useMemo, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { ALL_BASE_MODELS } from 'services/api/constants'; -import { useGetMainModelsQuery, useMergeMainModelsMutation } from 'services/api/endpoints/models'; -import type { BaseModelType, MergeModelConfig } from 'services/api/types'; - -const baseModelTypeSelectOptions: ComboboxOption[] = [ - { label: 'Stable Diffusion 1', value: 'sd-1' }, - { label: 'Stable Diffusion 2', value: 'sd-2' }, -]; - -type MergeInterpolationMethods = 'weighted_sum' | 'sigmoid' | 'inv_sigmoid' | 'add_difference'; - -const MergeModelsPanel = () => { - const { t } = useTranslation(); - const dispatch = useAppDispatch(); - - const { data } = useGetMainModelsQuery(ALL_BASE_MODELS); - - const [mergeModels, { isLoading }] = useMergeMainModelsMutation(); - - const [baseModel, setBaseModel] = useState('sd-1'); - const valueBaseModel = useMemo(() => baseModelTypeSelectOptions.find((o) => o.value === baseModel), [baseModel]); - const sd1DiffusersModels = pickBy( - data?.entities, - (value, _) => value?.model_format === 'diffusers' && value?.base_model === 'sd-1' - ); - - const sd2DiffusersModels = pickBy( - data?.entities, - (value, _) => value?.model_format === 'diffusers' && value?.base_model === 'sd-2' - ); - - const modelsMap = useMemo(() => { - return { - 'sd-1': sd1DiffusersModels, - 'sd-2': sd2DiffusersModels, - }; - }, [sd1DiffusersModels, sd2DiffusersModels]); - - const [modelOne, setModelOne] = useState( - Object.keys(modelsMap[baseModel as keyof typeof modelsMap])?.[0] ?? null - ); - const [modelTwo, setModelTwo] = useState( - Object.keys(modelsMap[baseModel as keyof typeof modelsMap])?.[1] ?? null - ); - const [modelThree, setModelThree] = useState(null); - - const [mergedModelName, setMergedModelName] = useState(''); - const [modelMergeAlpha, setModelMergeAlpha] = useState(0.5); - - const [modelMergeInterp, setModelMergeInterp] = useState('weighted_sum'); - - const [modelMergeSaveLocType, setModelMergeSaveLocType] = useState<'root' | 'custom'>('root'); - - const [modelMergeCustomSaveLoc, setModelMergeCustomSaveLoc] = useState(''); - - const [modelMergeForce, setModelMergeForce] = useState(false); - - const optionsModelOne = useMemo( - () => - Object.keys(modelsMap[baseModel as keyof typeof modelsMap]) - .filter((model) => model !== modelTwo && model !== modelThree) - .map((model) => ({ label: model, value: model })), - [modelsMap, baseModel, modelTwo, modelThree] - ); - - const optionsModelTwo = useMemo( - () => - Object.keys(modelsMap[baseModel as keyof typeof modelsMap]) - .filter((model) => model !== modelOne && model !== modelThree) - .map((model) => ({ label: model, value: model })), - [modelsMap, baseModel, modelOne, modelThree] - ); - - const optionsModelThree = useMemo( - () => - Object.keys(modelsMap[baseModel as keyof typeof modelsMap]) - .filter((model) => model !== modelOne && model !== modelTwo) - .map((model) => ({ label: model, value: model })), - [modelsMap, baseModel, modelOne, modelTwo] - ); - - const onChangeBaseModel = useCallback((v) => { - if (!v) { - return; - } - if (!(v.value === 'sd-1' || v.value === 'sd-2')) { - return; - } - setBaseModel(v.value); - setModelOne(null); - setModelTwo(null); - }, []); - - const onChangeModelOne = useCallback((v) => { - if (!v) { - return; - } - setModelOne(v.value); - }, []); - const onChangeModelTwo = useCallback((v) => { - if (!v) { - return; - } - setModelTwo(v.value); - }, []); - const onChangeModelThree = useCallback((v) => { - if (!v) { - setModelThree(null); - setModelMergeInterp('add_difference'); - } else { - setModelThree(v.value); - setModelMergeInterp('weighted_sum'); - } - }, []); - - const valueModelOne = useMemo(() => optionsModelOne.find((o) => o.value === modelOne), [modelOne, optionsModelOne]); - const valueModelTwo = useMemo(() => optionsModelTwo.find((o) => o.value === modelTwo), [modelTwo, optionsModelTwo]); - const valueModelThree = useMemo( - () => optionsModelThree.find((o) => o.value === modelThree), - [modelThree, optionsModelThree] - ); - - const handleChangeMergedModelName = useCallback( - (e: ChangeEvent) => setMergedModelName(e.target.value), - [] - ); - const handleChangeModelMergeAlpha = useCallback((v: number) => setModelMergeAlpha(v), []); - const handleResetModelMergeAlpha = useCallback(() => setModelMergeAlpha(0.5), []); - const handleChangeMergeInterp = useCallback((v: MergeInterpolationMethods) => setModelMergeInterp(v), []); - const handleChangeMergeSaveLocType = useCallback((v: 'root' | 'custom') => setModelMergeSaveLocType(v), []); - const handleChangeMergeCustomSaveLoc = useCallback( - (e: ChangeEvent) => setModelMergeCustomSaveLoc(e.target.value), - [] - ); - const handleChangeModelMergeForce = useCallback( - (e: ChangeEvent) => setModelMergeForce(e.target.checked), - [] - ); - - const mergeModelsHandler = useCallback(() => { - const models_names: string[] = []; - - let modelsToMerge: (string | null)[] = [modelOne, modelTwo, modelThree]; - modelsToMerge = modelsToMerge.filter((model) => model !== null); - modelsToMerge.forEach((model) => { - const n = model?.split('/')?.[2]; - if (n) { - models_names.push(n); - } - }); - - const mergeModelsInfo: MergeModelConfig['body'] = { - model_names: models_names, - merged_model_name: mergedModelName !== '' ? mergedModelName : models_names.join('-'), - alpha: modelMergeAlpha, - interp: modelMergeInterp, - force: modelMergeForce, - merge_dest_directory: modelMergeSaveLocType === 'root' ? undefined : modelMergeCustomSaveLoc, - }; - - mergeModels({ - base_model: baseModel, - body: { body: mergeModelsInfo }, - }) - .unwrap() - .then((_) => { - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelsMerged'), - status: 'success', - }) - ) - ); - }) - .catch((error) => { - if (error) { - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelsMergeFailed'), - status: 'error', - }) - ) - ); - } - }); - }, [ - baseModel, - dispatch, - mergeModels, - mergedModelName, - modelMergeAlpha, - modelMergeCustomSaveLoc, - modelMergeForce, - modelMergeInterp, - modelMergeSaveLocType, - modelOne, - modelThree, - modelTwo, - t, - ]); - - return ( - - - {t('modelManager.modelMergeHeaderHelp1')} - - {t('modelManager.modelMergeHeaderHelp2')} - - - - - - {t('modelManager.modelType')} - - - - {t('modelManager.modelOne')} - - - - {t('modelManager.modelTwo')} - - - - {t('modelManager.modelThree')} - - - - - - {t('modelManager.mergedModelName')} - - - - - - {t('modelManager.alpha')} - - - {t('modelManager.modelMergeAlphaHelp')} - - - - - - {t('modelManager.interpolationType')} - - - - {modelThree === null ? ( - <> - - {t('modelManager.weightedSum')} - - - {t('modelManager.sigmoid')} - - - {t('modelManager.inverseSigmoid')} - - - ) : ( - - - {t('modelManager.addDifference')} - - - )} - - - - - - - - {t('modelManager.mergedModelSaveLocation')} - - - - - {t('modelManager.invokeAIFolder')} - - - - {t('modelManager.custom')} - - - - - - {modelMergeSaveLocType === 'custom' && ( - - {t('modelManager.mergedModelCustomSaveLocation')} - - - )} - - - - {t('modelManager.ignoreMismatch')} - - - - - - ); -}; - -export default memo(MergeModelsPanel); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel.tsx deleted file mode 100644 index 6b9abdbfec..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel.tsx +++ /dev/null @@ -1,67 +0,0 @@ -import { Flex, Text } from '@invoke-ai/ui-library'; -import { memo, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { ALL_BASE_MODELS } from 'services/api/constants'; -import type { - DiffusersModelConfigEntity, - LoRAModelConfigEntity, - MainModelConfigEntity, -} from 'services/api/endpoints/models'; -import { useGetLoRAModelsQuery, useGetMainModelsQuery } from 'services/api/endpoints/models'; - -import CheckpointModelEdit from './ModelManagerPanel/CheckpointModelEdit'; -import DiffusersModelEdit from './ModelManagerPanel/DiffusersModelEdit'; -import LoRAModelEdit from './ModelManagerPanel/LoRAModelEdit'; -import ModelList from './ModelManagerPanel/ModelList'; - -const ModelManagerPanel = () => { - const [selectedModelId, setSelectedModelId] = useState(); - const { mainModel } = useGetMainModelsQuery(ALL_BASE_MODELS, { - selectFromResult: ({ data }) => ({ - mainModel: selectedModelId ? data?.entities[selectedModelId] : undefined, - }), - }); - const { loraModel } = useGetLoRAModelsQuery(undefined, { - selectFromResult: ({ data }) => ({ - loraModel: selectedModelId ? data?.entities[selectedModelId] : undefined, - }), - }); - - const model = mainModel ? mainModel : loraModel; - - return ( - - - - - ); -}; - -type ModelEditProps = { - model: MainModelConfigEntity | LoRAModelConfigEntity | undefined; -}; - -const ModelEdit = (props: ModelEditProps) => { - const { t } = useTranslation(); - const { model } = props; - - if (model?.model_format === 'checkpoint') { - return ; - } - - if (model?.model_format === 'diffusers') { - return ; - } - - if (model?.model_type === 'lora') { - return ; - } - - return ( - - {t('modelManager.noModelSelected')} - - ); -}; - -export default memo(ModelManagerPanel); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/CheckpointModelEdit.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/CheckpointModelEdit.tsx deleted file mode 100644 index f4d271187d..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/CheckpointModelEdit.tsx +++ /dev/null @@ -1,187 +0,0 @@ -import { - Badge, - Button, - Checkbox, - Divider, - Flex, - FormControl, - FormErrorMessage, - FormLabel, - Input, - Text, -} from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import BaseModelSelect from 'features/modelManager/subpanels/shared/BaseModelSelect'; -import CheckpointConfigsSelect from 'features/modelManager/subpanels/shared/CheckpointConfigsSelect'; -import ModelVariantSelect from 'features/modelManager/subpanels/shared/ModelVariantSelect'; -import { MODEL_TYPE_MAP } from 'features/parameters/types/constants'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import { memo, useCallback, useEffect, useState } from 'react'; -import type { SubmitHandler } from 'react-hook-form'; -import { useForm } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; -import type { CheckpointModelConfigEntity } from 'services/api/endpoints/models'; -import { useGetCheckpointConfigsQuery, useUpdateMainModelsMutation } from 'services/api/endpoints/models'; -import type { CheckpointModelConfig } from 'services/api/types'; - -import ModelConvert from './ModelConvert'; - -type CheckpointModelEditProps = { - model: CheckpointModelConfigEntity; -}; - -const CheckpointModelEdit = (props: CheckpointModelEditProps) => { - const { model } = props; - - const [updateMainModel, { isLoading }] = useUpdateMainModelsMutation(); - const { data: availableCheckpointConfigs } = useGetCheckpointConfigsQuery(); - - const [useCustomConfig, setUseCustomConfig] = useState(false); - - useEffect(() => { - if (!availableCheckpointConfigs?.includes(model.config)) { - setUseCustomConfig(true); - } - }, [availableCheckpointConfigs, model.config]); - - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const { - register, - handleSubmit, - control, - formState: { errors }, - reset, - } = useForm({ - defaultValues: { - model_name: model.model_name ? model.model_name : '', - base_model: model.base_model, - model_type: 'main', - path: model.path ? model.path : '', - description: model.description ? model.description : '', - model_format: 'checkpoint', - vae: model.vae ? model.vae : '', - config: model.config ? model.config : '', - variant: model.variant, - }, - mode: 'onChange', - }); - - const handleChangeUseCustomConfig = useCallback(() => setUseCustomConfig((prev) => !prev), []); - - const onSubmit = useCallback>( - (values) => { - const responseBody = { - base_model: model.base_model, - model_name: model.model_name, - body: values, - }; - updateMainModel(responseBody) - .unwrap() - .then((payload) => { - reset(payload as CheckpointModelConfig, { keepDefaultValues: true }); - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelUpdated'), - status: 'success', - }) - ) - ); - }) - .catch((_) => { - reset(); - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelUpdateFailed'), - status: 'error', - }) - ) - ); - }); - }, - [dispatch, model.base_model, model.model_name, reset, t, updateMainModel] - ); - - return ( - - - - - {model.model_name} - - - {MODEL_TYPE_MAP[model.base_model]} {t('modelManager.model')} - - - {![''].includes(model.base_model) ? ( - - ) : ( - - {t('modelManager.conversionNotSupported')} - - )} - - - - -
- - - {t('modelManager.name')} - value.trim().length > 3 || 'Must be at least 3 characters', - })} - /> - {errors.model_name?.message && {errors.model_name?.message}} - - - {t('modelManager.description')} - - - control={control} name="base_model" /> - control={control} name="variant" /> - - {t('modelManager.modelLocation')} - value.trim().length > 0 || 'Must provide a path', - })} - /> - {errors.path?.message && {errors.path?.message}} - - - {t('modelManager.vaeLocation')} - - - - - {!useCustomConfig ? ( - - ) : ( - - {t('modelManager.config')} - - - )} - - {t('modelManager.useCustomConfig')} - - - - - - -
-
-
- ); -}; - -export default memo(CheckpointModelEdit); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/DiffusersModelEdit.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/DiffusersModelEdit.tsx deleted file mode 100644 index 4670f32157..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/DiffusersModelEdit.tsx +++ /dev/null @@ -1,135 +0,0 @@ -import { Button, Divider, Flex, FormControl, FormErrorMessage, FormLabel, Input, Text } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import BaseModelSelect from 'features/modelManager/subpanels/shared/BaseModelSelect'; -import ModelVariantSelect from 'features/modelManager/subpanels/shared/ModelVariantSelect'; -import { MODEL_TYPE_MAP } from 'features/parameters/types/constants'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import { memo, useCallback } from 'react'; -import type { SubmitHandler } from 'react-hook-form'; -import { useForm } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; -import type { DiffusersModelConfigEntity } from 'services/api/endpoints/models'; -import { useUpdateMainModelsMutation } from 'services/api/endpoints/models'; -import type { DiffusersModelConfig } from 'services/api/types'; - -type DiffusersModelEditProps = { - model: DiffusersModelConfigEntity; -}; - -const DiffusersModelEdit = (props: DiffusersModelEditProps) => { - const { model } = props; - - const [updateMainModel, { isLoading }] = useUpdateMainModelsMutation(); - - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const { - register, - handleSubmit, - control, - formState: { errors }, - reset, - } = useForm({ - defaultValues: { - model_name: model.model_name ? model.model_name : '', - base_model: model.base_model, - model_type: 'main', - path: model.path ? model.path : '', - description: model.description ? model.description : '', - model_format: 'diffusers', - vae: model.vae ? model.vae : '', - variant: model.variant, - }, - mode: 'onChange', - }); - - const onSubmit = useCallback>( - (values) => { - const responseBody = { - base_model: model.base_model, - model_name: model.model_name, - body: values, - }; - - updateMainModel(responseBody) - .unwrap() - .then((payload) => { - reset(payload as DiffusersModelConfig, { keepDefaultValues: true }); - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelUpdated'), - status: 'success', - }) - ) - ); - }) - .catch((_) => { - reset(); - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelUpdateFailed'), - status: 'error', - }) - ) - ); - }); - }, - [dispatch, model.base_model, model.model_name, reset, t, updateMainModel] - ); - - return ( - - - - {model.model_name} - - - {MODEL_TYPE_MAP[model.base_model]} {t('modelManager.model')} - - - - -
- - - {t('modelManager.name')} - value.trim().length > 3 || 'Must be at least 3 characters', - })} - /> - {errors.model_name?.message && {errors.model_name?.message}} - - - {t('modelManager.description')} - - - control={control} name="base_model" /> - control={control} name="variant" /> - - {t('modelManager.modelLocation')} - value.trim().length > 0 || 'Must provide a path', - })} - /> - {errors.path?.message && {errors.path?.message}} - - - {t('modelManager.vaeLocation')} - - - - -
-
- ); -}; - -export default memo(DiffusersModelEdit); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/LoRAModelEdit.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/LoRAModelEdit.tsx deleted file mode 100644 index 2baf735bee..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/LoRAModelEdit.tsx +++ /dev/null @@ -1,129 +0,0 @@ -import { Button, Divider, Flex, FormControl, FormErrorMessage, FormLabel, Input, Text } from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import BaseModelSelect from 'features/modelManager/subpanels/shared/BaseModelSelect'; -import { LORA_MODEL_FORMAT_MAP, MODEL_TYPE_MAP } from 'features/parameters/types/constants'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import { memo, useCallback } from 'react'; -import type { SubmitHandler } from 'react-hook-form'; -import { useForm } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; -import type { LoRAModelConfigEntity } from 'services/api/endpoints/models'; -import { useUpdateLoRAModelsMutation } from 'services/api/endpoints/models'; -import type { LoRAModelConfig } from 'services/api/types'; - -type LoRAModelEditProps = { - model: LoRAModelConfigEntity; -}; - -const LoRAModelEdit = (props: LoRAModelEditProps) => { - const { model } = props; - - const [updateLoRAModel, { isLoading }] = useUpdateLoRAModelsMutation(); - - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - - const { - register, - handleSubmit, - control, - formState: { errors }, - reset, - } = useForm({ - defaultValues: { - model_name: model.model_name ? model.model_name : '', - base_model: model.base_model, - model_type: 'lora', - path: model.path ? model.path : '', - description: model.description ? model.description : '', - model_format: model.model_format, - }, - mode: 'onChange', - }); - - const onSubmit = useCallback>( - (values) => { - const responseBody = { - base_model: model.base_model, - model_name: model.model_name, - body: values, - }; - - updateLoRAModel(responseBody) - .unwrap() - .then((payload) => { - reset(payload as LoRAModelConfig, { keepDefaultValues: true }); - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelUpdated'), - status: 'success', - }) - ) - ); - }) - .catch((_) => { - reset(); - dispatch( - addToast( - makeToast({ - title: t('modelManager.modelUpdateFailed'), - status: 'error', - }) - ) - ); - }); - }, - [dispatch, model.base_model, model.model_name, reset, t, updateLoRAModel] - ); - - return ( - - - - {model.model_name} - - - {MODEL_TYPE_MAP[model.base_model]} {t('modelManager.model')} ⋅ {LORA_MODEL_FORMAT_MAP[model.model_format]}{' '} - {t('common.format')} - - - - -
- - - {t('modelManager.name')} - value.trim().length > 3 || 'Must be at least 3 characters', - })} - /> - {errors.model_name?.message && {errors.model_name?.message}} - - - {t('modelManager.description')} - - - control={control} name="base_model" /> - - - {t('modelManager.modelLocation')} - value.trim().length > 0 || 'Must provide a path', - })} - /> - {errors.path?.message && {errors.path?.message}} - - - -
-
- ); -}; - -export default memo(LoRAModelEdit); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/ModelConvert.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/ModelConvert.tsx deleted file mode 100644 index 6e34d5039e..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/ModelConvert.tsx +++ /dev/null @@ -1,165 +0,0 @@ -import { - Button, - ConfirmationAlertDialog, - Flex, - FormControl, - FormLabel, - Input, - ListItem, - Radio, - RadioGroup, - Text, - Tooltip, - UnorderedList, - useDisclosure, -} from '@invoke-ai/ui-library'; -import { useAppDispatch } from 'app/store/storeHooks'; -import { addToast } from 'features/system/store/systemSlice'; -import { makeToast } from 'features/system/util/makeToast'; -import type { ChangeEvent } from 'react'; -import { memo, useCallback, useEffect, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { useConvertMainModelsMutation } from 'services/api/endpoints/models'; -import type { CheckpointModelConfig } from 'services/api/types'; - -interface ModelConvertProps { - model: CheckpointModelConfig; -} - -type SaveLocation = 'InvokeAIRoot' | 'Custom'; - -const ModelConvert = (props: ModelConvertProps) => { - const { model } = props; - const dispatch = useAppDispatch(); - const { t } = useTranslation(); - const [convertModel, { isLoading }] = useConvertMainModelsMutation(); - const { isOpen, onOpen, onClose } = useDisclosure(); - const [saveLocation, setSaveLocation] = useState('InvokeAIRoot'); - const [customSaveLocation, setCustomSaveLocation] = useState(''); - - useEffect(() => { - setSaveLocation('InvokeAIRoot'); - }, [model]); - - const modelConvertCancelHandler = useCallback(() => { - setSaveLocation('InvokeAIRoot'); - }, []); - - const handleChangeSaveLocation = useCallback((v: string) => { - setSaveLocation(v as SaveLocation); - }, []); - const handleChangeCustomSaveLocation = useCallback((e: ChangeEvent) => { - setCustomSaveLocation(e.target.value); - }, []); - - const modelConvertHandler = useCallback(() => { - const queryArg = { - base_model: model.base_model, - model_name: model.model_name, - convert_dest_directory: saveLocation === 'Custom' ? customSaveLocation : undefined, - }; - - if (saveLocation === 'Custom' && customSaveLocation === '') { - dispatch( - addToast( - makeToast({ - title: t('modelManager.noCustomLocationProvided'), - status: 'error', - }) - ) - ); - return; - } - - dispatch( - addToast( - makeToast({ - title: `${t('modelManager.convertingModelBegin')}: ${model.model_name}`, - status: 'info', - }) - ) - ); - - convertModel(queryArg) - .unwrap() - .then(() => { - dispatch( - addToast( - makeToast({ - title: `${t('modelManager.modelConverted')}: ${model.model_name}`, - status: 'success', - }) - ) - ); - }) - .catch(() => { - dispatch( - addToast( - makeToast({ - title: `${t('modelManager.modelConversionFailed')}: ${model.model_name}`, - status: 'error', - }) - ) - ); - }); - }, [convertModel, customSaveLocation, dispatch, model.base_model, model.model_name, saveLocation, t]); - - return ( - <> - - - - {t('modelManager.convertToDiffusersHelpText1')} - - {t('modelManager.convertToDiffusersHelpText2')} - {t('modelManager.convertToDiffusersHelpText3')} - {t('modelManager.convertToDiffusersHelpText4')} - {t('modelManager.convertToDiffusersHelpText5')} - - {t('modelManager.convertToDiffusersHelpText6')} - - - - - {t('modelManager.convertToDiffusersSaveLocation')} - - - - - {t('modelManager.invokeRoot')} - - - - {t('modelManager.custom')} - - - - - {saveLocation === 'Custom' && ( - - {t('modelManager.customSaveLocation')} - - - )} - - - - ); -}; - -export default memo(ModelConvert); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/ModelList.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/ModelList.tsx deleted file mode 100644 index 94db3d20c3..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerPanel/ModelList.tsx +++ /dev/null @@ -1,204 +0,0 @@ -import { Button, ButtonGroup, Flex, FormControl, FormLabel, Input, Spinner, Text } from '@invoke-ai/ui-library'; -import type { EntityState } from '@reduxjs/toolkit'; -import { forEach } from 'lodash-es'; -import type { ChangeEvent, PropsWithChildren } from 'react'; -import { memo, useCallback, useState } from 'react'; -import { useTranslation } from 'react-i18next'; -import { ALL_BASE_MODELS } from 'services/api/constants'; -import type { LoRAModelConfigEntity, MainModelConfigEntity } from 'services/api/endpoints/models'; -import { useGetLoRAModelsQuery, useGetMainModelsQuery } from 'services/api/endpoints/models'; - -import ModelListItem from './ModelListItem'; - -type ModelListProps = { - selectedModelId: string | undefined; - setSelectedModelId: (name: string | undefined) => void; -}; - -type ModelFormat = 'all' | 'checkpoint' | 'diffusers'; - -type ModelType = 'main' | 'lora'; - -type CombinedModelFormat = ModelFormat | 'lora'; - -const ModelList = (props: ModelListProps) => { - const { selectedModelId, setSelectedModelId } = props; - const { t } = useTranslation(); - const [nameFilter, setNameFilter] = useState(''); - const [modelFormatFilter, setModelFormatFilter] = useState('all'); - - const { filteredDiffusersModels, isLoadingDiffusersModels } = useGetMainModelsQuery(ALL_BASE_MODELS, { - selectFromResult: ({ data, isLoading }) => ({ - filteredDiffusersModels: modelsFilter(data, 'main', 'diffusers', nameFilter), - isLoadingDiffusersModels: isLoading, - }), - }); - - const { filteredCheckpointModels, isLoadingCheckpointModels } = useGetMainModelsQuery(ALL_BASE_MODELS, { - selectFromResult: ({ data, isLoading }) => ({ - filteredCheckpointModels: modelsFilter(data, 'main', 'checkpoint', nameFilter), - isLoadingCheckpointModels: isLoading, - }), - }); - - const { filteredLoraModels, isLoadingLoraModels } = useGetLoRAModelsQuery(undefined, { - selectFromResult: ({ data, isLoading }) => ({ - filteredLoraModels: modelsFilter(data, 'lora', undefined, nameFilter), - isLoadingLoraModels: isLoading, - }), - }); - - const handleSearchFilter = useCallback((e: ChangeEvent) => { - setNameFilter(e.target.value); - }, []); - - return ( - - - - - - - - - - - {t('modelManager.search')} - - - - - {/* Diffusers List */} - {isLoadingDiffusersModels && } - {['all', 'diffusers'].includes(modelFormatFilter) && - !isLoadingDiffusersModels && - filteredDiffusersModels.length > 0 && ( - - )} - {/* Checkpoints List */} - {isLoadingCheckpointModels && } - {['all', 'checkpoint'].includes(modelFormatFilter) && - !isLoadingCheckpointModels && - filteredCheckpointModels.length > 0 && ( - - )} - - {/* LoRAs List */} - {isLoadingLoraModels && } - {['all', 'lora'].includes(modelFormatFilter) && !isLoadingLoraModels && filteredLoraModels.length > 0 && ( - - )} - - - - ); -}; - -export default memo(ModelList); - -const modelsFilter = ( - data: EntityState | undefined, - model_type: ModelType, - model_format: ModelFormat | undefined, - nameFilter: string -) => { - const filteredModels: T[] = []; - forEach(data?.entities, (model) => { - if (!model) { - return; - } - - const matchesFilter = model.model_name.toLowerCase().includes(nameFilter.toLowerCase()); - - const matchesFormat = model_format === undefined || model.model_format === model_format; - const matchesType = model.model_type === model_type; - - if (matchesFilter && matchesFormat && matchesType) { - filteredModels.push(model); - } - }); - return filteredModels; -}; - -const StyledModelContainer = memo((props: PropsWithChildren) => { - return ( - - {props.children} - - ); -}); - -StyledModelContainer.displayName = 'StyledModelContainer'; - -type ModelListWrapperProps = { - title: string; - modelList: MainModelConfigEntity[] | LoRAModelConfigEntity[]; - selected: ModelListProps; -}; - -const ModelListWrapper = memo((props: ModelListWrapperProps) => { - const { title, modelList, selected } = props; - return ( - - - - {title} - - {modelList.map((model) => ( - - ))} - - - ); -}); - -ModelListWrapper.displayName = 'ModelListWrapper'; - -const FetchingModelsLoader = memo(({ loadingMessage }: { loadingMessage?: string }) => { - return ( - - - - {loadingMessage ? loadingMessage : 'Fetching...'} - - - ); -}); - -FetchingModelsLoader.displayName = 'FetchingModelsLoader'; diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerSettingsPanel.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerSettingsPanel.tsx deleted file mode 100644 index 50118d9b97..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerSettingsPanel.tsx +++ /dev/null @@ -1,14 +0,0 @@ -import { Flex } from '@invoke-ai/ui-library'; -import { memo } from 'react'; - -import SyncModels from './ModelManagerSettingsPanel/SyncModels'; - -const ModelManagerSettingsPanel = () => { - return ( - - - - ); -}; - -export default memo(ModelManagerSettingsPanel); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerSettingsPanel/SyncModels.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerSettingsPanel/SyncModels.tsx deleted file mode 100644 index a4af3e1517..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/ModelManagerSettingsPanel/SyncModels.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import { Flex, Text } from '@invoke-ai/ui-library'; -import { SyncModelsButton } from 'features/modelManager/components/SyncModels/SyncModelsButton'; -import { memo } from 'react'; -import { useTranslation } from 'react-i18next'; - -const SyncModels = () => { - const { t } = useTranslation(); - - return ( - - - {t('modelManager.syncModels')} - - {t('modelManager.syncModelsDesc')} - - - - - ); -}; - -export default memo(SyncModels); diff --git a/invokeai/frontend/web/src/features/modelManager/subpanels/shared/CheckpointConfigsSelect.tsx b/invokeai/frontend/web/src/features/modelManager/subpanels/shared/CheckpointConfigsSelect.tsx deleted file mode 100644 index 569f1abbba..0000000000 --- a/invokeai/frontend/web/src/features/modelManager/subpanels/shared/CheckpointConfigsSelect.tsx +++ /dev/null @@ -1,32 +0,0 @@ -import type { ChakraProps, ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library'; -import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library'; -import { memo, useCallback, useMemo } from 'react'; -import { useController, type UseControllerProps } from 'react-hook-form'; -import { useTranslation } from 'react-i18next'; -import { useGetCheckpointConfigsQuery } from 'services/api/endpoints/models'; -import type { CheckpointModelConfig } from 'services/api/types'; - -const sx: ChakraProps['sx'] = { w: 'full' }; - -const CheckpointConfigsSelect = (props: UseControllerProps) => { - const { data } = useGetCheckpointConfigsQuery(); - const { t } = useTranslation(); - const options = useMemo(() => (data ? data.map((i) => ({ label: i, value: i })) : []), [data]); - const { field } = useController(props); - const value = useMemo(() => options.find((o) => o.value === field.value), [field.value, options]); - const onChange = useCallback( - (v) => { - field.onChange(v?.value); - }, - [field] - ); - - return ( - - {t('modelManager.configFile')} - - - ); -}; - -export default memo(CheckpointConfigsSelect); diff --git a/invokeai/frontend/web/src/features/modelManager/components/SyncModels/SyncModelsIconButton.tsx b/invokeai/frontend/web/src/features/modelManagerV2/components/SyncModels/SyncModelsIconButton.tsx similarity index 100% rename from invokeai/frontend/web/src/features/modelManager/components/SyncModels/SyncModelsIconButton.tsx rename to invokeai/frontend/web/src/features/modelManagerV2/components/SyncModels/SyncModelsIconButton.tsx diff --git a/invokeai/frontend/web/src/features/modelManager/components/SyncModels/useSyncModels.ts b/invokeai/frontend/web/src/features/modelManagerV2/components/SyncModels/useSyncModels.ts similarity index 100% rename from invokeai/frontend/web/src/features/modelManager/components/SyncModels/useSyncModels.ts rename to invokeai/frontend/web/src/features/modelManagerV2/components/SyncModels/useSyncModels.ts diff --git a/invokeai/frontend/web/src/features/modelManagerV2/store/modelManagerV2Slice.ts b/invokeai/frontend/web/src/features/modelManagerV2/store/modelManagerV2Slice.ts new file mode 100644 index 0000000000..46f8979c8e --- /dev/null +++ b/invokeai/frontend/web/src/features/modelManagerV2/store/modelManagerV2Slice.ts @@ -0,0 +1,58 @@ +import type { PayloadAction } from '@reduxjs/toolkit'; +import { createSlice } from '@reduxjs/toolkit'; +import type { PersistConfig } from 'app/store/store'; + +type ModelManagerState = { + _version: 1; + selectedModelKey: string | null; + selectedModelMode: 'edit' | 'view'; + searchTerm: string; + filteredModelType: string | null; +}; + +const initialModelManagerState: ModelManagerState = { + _version: 1, + selectedModelKey: null, + selectedModelMode: 'view', + filteredModelType: null, + searchTerm: '', +}; + +export const modelManagerV2Slice = createSlice({ + name: 'modelmanagerV2', + initialState: initialModelManagerState, + reducers: { + setSelectedModelKey: (state, action: PayloadAction) => { + state.selectedModelMode = 'view'; + state.selectedModelKey = action.payload; + }, + setSelectedModelMode: (state, action: PayloadAction<'view' | 'edit'>) => { + state.selectedModelMode = action.payload; + }, + setSearchTerm: (state, action: PayloadAction) => { + state.searchTerm = action.payload; + }, + + setFilteredModelType: (state, action: PayloadAction) => { + state.filteredModelType = action.payload; + }, + }, +}); + +export const { setSelectedModelKey, setSearchTerm, setFilteredModelType, setSelectedModelMode } = + modelManagerV2Slice.actions; + +/* eslint-disable-next-line @typescript-eslint/no-explicit-any */ +const migrateModelManagerState = (state: any): any => { + if (!('_version' in state)) { + state._version = 1; + } + return state; +}; + +export const modelManagerV2PersistConfig: PersistConfig = { + name: modelManagerV2Slice.name, + initialState: initialModelManagerState, + migrate: migrateModelManagerState, + persistDenylist: [], +}; diff --git a/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/AdvancedImport.tsx b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/AdvancedImport.tsx new file mode 100644 index 0000000000..56abf7b09a --- /dev/null +++ b/invokeai/frontend/web/src/features/modelManagerV2/subpanels/AddModelPanel/AdvancedImport.tsx @@ -0,0 +1,228 @@ +import { Button, Flex, FormControl, FormErrorMessage, FormLabel, Input, Text, Textarea } from '@invoke-ai/ui-library'; +import { useAppDispatch } from 'app/store/storeHooks'; +import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent'; +import BaseModelSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/BaseModelSelect'; +import BooleanSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/BooleanSelect'; +import ModelFormatSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/ModelFormatSelect'; +import ModelTypeSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/ModelTypeSelect'; +import ModelVariantSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/ModelVariantSelect'; +import PredictionTypeSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/PredictionTypeSelect'; +import RepoVariantSelect from 'features/modelManagerV2/subpanels/ModelPanel/Fields/RepoVariantSelect'; +import { addToast } from 'features/system/store/systemSlice'; +import { makeToast } from 'features/system/util/makeToast'; +import { isNil, omitBy } from 'lodash-es'; +import { useCallback, useEffect } from 'react'; +import type { SubmitHandler } from 'react-hook-form'; +import { useForm } from 'react-hook-form'; +import { useTranslation } from 'react-i18next'; +import { useInstallModelMutation } from 'services/api/endpoints/models'; +import type { AnyModelConfig } from 'services/api/types'; + +export const AdvancedImport = () => { + const dispatch = useAppDispatch(); + + const [installModel] = useInstallModelMutation(); + + const { t } = useTranslation(); + + const { + register, + handleSubmit, + control, + formState: { errors }, + setValue, + resetField, + reset, + watch, + } = useForm({ + defaultValues: { + name: '', + base: 'sd-1', + type: 'main', + path: '', + description: '', + format: 'diffusers', + vae: '', + variant: 'normal', + }, + mode: 'onChange', + }); + + const onSubmit = useCallback>( + (values) => { + installModel({ + source: values.path, + config: omitBy(values, isNil), + }) + .unwrap() + .then((_) => { + dispatch( + addToast( + makeToast({ + title: t('modelManager.modelAdded', { + modelName: values.name, + }), + status: 'success', + }) + ) + ); + reset(); + }) + .catch((error) => { + if (error) { + dispatch( + addToast( + makeToast({ + title: t('toast.modelAddFailed'), + status: 'error', + }) + ) + ); + } + }); + }, + [installModel, dispatch, t, reset] + ); + + const watchedModelType = watch('type'); + const watchedModelFormat = watch('format'); + + useEffect(() => { + if (watchedModelType === 'main') { + setValue('format', 'diffusers'); + setValue('repo_variant', ''); + setValue('variant', 'normal'); + } + if (watchedModelType === 'lora') { + setValue('format', 'lycoris'); + } else if (watchedModelType === 'embedding') { + setValue('format', 'embedding_file'); + } else if (watchedModelType === 'ip_adapter') { + setValue('format', 'invokeai'); + } else { + setValue('format', 'diffusers'); + } + resetField('upcast_attention'); + resetField('ztsnr_training'); + resetField('vae'); + resetField('config'); + resetField('prediction_type'); + resetField('image_encoder_model_id'); + }, [watchedModelType, resetField, setValue]); + + return ( + +
+ + + + {t('modelManager.modelType')} + control={control} name="type" /> + + + {t('modelManager.advancedImportInfo')} + + + + + + + {t('modelManager.name')} + value.trim().length >= 3 || 'Must be at least 3 characters', + })} + /> + {errors.name?.message && {errors.name?.message}} + + + + + + {t('modelManager.description')} +