diff --git a/.github/workflows/test-invoke-conda.yml b/.github/workflows/test-invoke-conda.yml deleted file mode 100644 index fd6caf47fe..0000000000 --- a/.github/workflows/test-invoke-conda.yml +++ /dev/null @@ -1,161 +0,0 @@ -name: Test invoke.py -on: - push: - branches: - - 'main' - pull_request: - branches: - - 'main' - types: - - 'ready_for_review' - - 'opened' - - 'synchronize' - - 'converted_to_draft' - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - fail_if_pull_request_is_draft: - if: github.event.pull_request.draft == true - runs-on: ubuntu-22.04 - steps: - - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass. - run: exit 1 - - matrix: - if: github.event.pull_request.draft == false - strategy: - matrix: - stable-diffusion-model: - - 'stable-diffusion-1.5' - environment-yaml: - - environment-lin-amd.yml - - environment-lin-cuda.yml - - environment-mac.yml - - environment-win-cuda.yml - include: - - environment-yaml: environment-lin-amd.yml - os: ubuntu-22.04 - curl-command: curl - github-env: $GITHUB_ENV - default-shell: bash -l {0} - - environment-yaml: environment-lin-cuda.yml - os: ubuntu-22.04 - curl-command: curl - github-env: $GITHUB_ENV - default-shell: bash -l {0} - - environment-yaml: environment-mac.yml - os: macos-12 - curl-command: curl - github-env: $GITHUB_ENV - default-shell: bash -l {0} - - environment-yaml: environment-win-cuda.yml - os: windows-2022 - curl-command: curl.exe - github-env: $env:GITHUB_ENV - default-shell: pwsh - - stable-diffusion-model: stable-diffusion-1.5 - stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt - stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1 - stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt - name: ${{ matrix.environment-yaml }} on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - env: - CONDA_ENV_NAME: invokeai - INVOKEAI_ROOT: '${{ github.workspace }}/invokeai' - defaults: - run: - shell: ${{ matrix.default-shell }} - steps: - - name: Checkout sources - id: checkout-sources - uses: actions/checkout@v3 - - - name: create models.yaml from example - run: | - mkdir -p ${{ env.INVOKEAI_ROOT }}/configs - cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml - - - name: create environment.yml - run: cp "environments-and-requirements/${{ matrix.environment-yaml }}" environment.yml - - - name: Use cached conda packages - id: use-cached-conda-packages - uses: actions/cache@v3 - with: - path: ~/conda_pkgs_dir - key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-yaml) }} - - - name: Activate Conda Env - id: activate-conda-env - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: ${{ env.CONDA_ENV_NAME }} - environment-file: environment.yml - miniconda-version: latest - - - name: set test prompt to main branch validation - if: ${{ github.ref == 'refs/heads/main' }} - run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }} - - - name: set test prompt to development branch validation - if: ${{ github.ref == 'refs/heads/development' }} - run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }} - - - name: set test prompt to Pull Request validation - if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }} - run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} - - - name: Use Cached Stable Diffusion Model - id: cache-sd-model - uses: actions/cache@v3 - env: - cache-name: cache-${{ matrix.stable-diffusion-model }} - with: - path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }} - key: ${{ env.cache-name }} - - - name: Download ${{ matrix.stable-diffusion-model }} - id: download-stable-diffusion-model - if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }} - run: | - mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}" - ${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }} - - - name: run configure_invokeai.py - id: run-preload-models - run: | - python scripts/configure_invokeai.py --skip-sd-weights --yes - - - name: cat invokeai.init - id: cat-invokeai - run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init - - - name: Run the tests - id: run-tests - if: matrix.os != 'windows-2022' - run: | - time python scripts/invoke.py \ - --no-patchmatch \ - --no-nsfw_checker \ - --model ${{ matrix.stable-diffusion-model }} \ - --from_file ${{ env.TEST_PROMPTS }} \ - --root="${{ env.INVOKEAI_ROOT }}" \ - --outdir="${{ env.INVOKEAI_ROOT }}/outputs" - - - name: export conda env - id: export-conda-env - if: matrix.os != 'windows-2022' - run: | - mkdir -p outputs/img-samples - conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml - - - name: Archive results - if: matrix.os != 'windows-2022' - id: archive-results - uses: actions/upload-artifact@v3 - with: - name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }} - path: ${{ env.INVOKEAI_ROOT }}/outputs diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index b12a7ca879..34c90f10c5 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -4,8 +4,6 @@ on: branches: - 'main' pull_request: - branches: - - 'main' types: - 'ready_for_review' - 'opened' @@ -17,14 +15,14 @@ concurrency: cancel-in-progress: true jobs: - fail_if_pull_request_is_draft: - if: github.event.pull_request.draft == true - runs-on: ubuntu-18.04 - steps: - - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass. - run: exit 1 + # fail_if_pull_request_is_draft: + # if: github.event.pull_request.draft == true && github.head_ref != 'dev/diffusers' + # runs-on: ubuntu-18.04 + # steps: + # - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass. + # run: exit 1 matrix: - if: github.event.pull_request.draft == false + if: github.event.pull_request.draft == false || github.head_ref == 'dev/diffusers' strategy: matrix: stable-diffusion-model: @@ -40,26 +38,23 @@ jobs: include: - requirements-file: requirements-lin-cuda.txt os: ubuntu-22.04 - curl-command: curl github-env: $GITHUB_ENV - requirements-file: requirements-lin-amd.txt os: ubuntu-22.04 - curl-command: curl github-env: $GITHUB_ENV - requirements-file: requirements-mac-mps-cpu.txt os: macOS-12 - curl-command: curl github-env: $GITHUB_ENV - requirements-file: requirements-win-colab-cuda.txt os: windows-2022 - curl-command: curl.exe github-env: $env:GITHUB_ENV - - stable-diffusion-model: stable-diffusion-1.5 - stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt - stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1 - stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }} runs-on: ${{ matrix.os }} + env: + INVOKE_MODEL_RECONFIGURE: '--yes' + INVOKEAI_ROOT: '${{ github.workspace }}/invokeai' + PYTHONUNBUFFERED: 1 + HAVE_SECRETS: ${{ secrets.HUGGINGFACE_TOKEN != '' }} steps: - name: Checkout sources id: checkout-sources @@ -77,10 +72,17 @@ jobs: echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }} echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }} - - name: create models.yaml from example - run: | - mkdir -p ${{ env.INVOKEAI_ROOT }}/configs - cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml + - name: Use Cached diffusers-1.5 + id: cache-sd-model + uses: actions/cache@v3 + env: + cache-name: huggingface-${{ matrix.stable-diffusion-model }} + with: + path: | + ${{ env.INVOKEAI_ROOT }}/models/runwayml + ${{ env.INVOKEAI_ROOT }}/models/stabilityai + ${{ env.INVOKEAI_ROOT }}/models/CompVis + key: ${{ env.cache-name }} - name: set test prompt to main branch validation if: ${{ github.ref == 'refs/heads/main' }} @@ -110,30 +112,31 @@ jobs: - name: install requirements run: pip3 install -r '${{ matrix.requirements-file }}' - - name: Use Cached Stable Diffusion Model - id: cache-sd-model - uses: actions/cache@v3 - env: - cache-name: cache-${{ matrix.stable-diffusion-model }} - with: - path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }} - key: ${{ env.cache-name }} - - - name: Download ${{ matrix.stable-diffusion-model }} - id: download-stable-diffusion-model - if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }} - run: | - mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}" - ${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }} - - name: run configure_invokeai.py id: run-preload-models - run: python3 scripts/configure_invokeai.py --skip-sd-weights --yes + env: + HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }} + run: > + configure_invokeai.py + --yes + --full-precision # can't use fp16 weights without a GPU - name: Run the tests id: run-tests if: matrix.os != 'windows-2022' - run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}" + env: + # Set offline mode to make sure configure preloaded successfully. + HF_HUB_OFFLINE: 1 + HF_DATASETS_OFFLINE: 1 + TRANSFORMERS_OFFLINE: 1 + run: > + python3 scripts/invoke.py + --no-patchmatch + --no-nsfw_checker + --model ${{ matrix.stable-diffusion-model }} + --from_file ${{ env.TEST_PROMPTS }} + --root="${{ env.INVOKEAI_ROOT }}" + --outdir="${{ env.INVOKEAI_OUTDIR }}" - name: Archive results id: archive-results diff --git a/README.md b/README.md index d01b89023f..07501a4242 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,10 @@ [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] -[![CI checks on main badge]][CI checks on main link] [![CI checks on dev badge]][CI checks on dev link] [![latest commit to dev badge]][latest commit to dev link] +[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] -[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github -[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment [CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github [CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml [discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord @@ -26,19 +24,13 @@ [github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen [github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github [github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers -[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900 -[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development +[latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900 +[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main [latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github [latest release link]: https://github.com/invoke-ai/InvokeAI/releases -This is a fork of -[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), -the open source text-to-image generator. It provides a streamlined -process with various new features and options to aid the image -generation process. It runs on Windows, macOS and Linux machines, with -GPU cards with as little as 4 GB of RAM. It provides both a polished -Web interface (see below), and an easy-to-use command-line interface. +InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products. **Quick links**: [[How to Install](#installation)] [Discord Server] [Documentation and Tutorials] [Code and Downloads] [Bug Reports] [Discussion, Ideas & Q&A] @@ -46,6 +38,9 @@ _Note: InvokeAI is rapidly evolving. Please use the [Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature requests. Be sure to use the provided templates. They will help us diagnose issues faster._ + +![canvas preview](docs/assets/canvas_preview.png) + # Getting Started with InvokeAI For full installation and upgrade instructions, please see: @@ -58,10 +53,7 @@ For full installation and upgrade instructions, please see: 5. Wait a while, until it is done. 6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh` 7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090. -8. Type `banana sushi` in the box on the top left and click `Invoke`: - -
- +8. Type `banana sushi` in the box on the top left and click `Invoke` ## Table of Contents @@ -76,7 +68,7 @@ For full installation and upgrade instructions, please see: 8. [Support](#support) 9. [Further Reading](#further-reading) -### Installation +## Installation This fork is supported across Linux, Windows and Macintosh. Linux users can use either an Nvidia-based card (with CUDA support) or an @@ -108,52 +100,42 @@ to render 512x512 images. - At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies. -**Note** +## Features -If you have a Nvidia 10xx series card (e.g. the 1080ti), please -run the dream script in full-precision mode as shown below. +Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/) -Similarly, specify full-precision mode on Apple M1 hardware. +### *Web Server & UI* +InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server. -Precision is auto configured based on the device. If however you encounter -errors like 'expected type Float but found Half' or 'not implemented for Half' -you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command +### *Unified Canvas* +The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more. -```bash -(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32 -``` -Or by updating your InvokeAI configuration file with this argument. +### *Advanced Prompt Syntax* +InvokeAI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space. -### Features +### *Command Line Interface* +For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool. -#### Major Features +### Other features +- *Support for both ckpt and diffusers models* +- *SD 2.0, 2.1 support* +- *Noise Control & Tresholding* +- *Popular Sampler Support* +- *Upscaling & Face Restoration Tools* +- *Embedding Manager & Support* +- *Model Manager & Support* -- [Web Server](https://invoke-ai.github.io/InvokeAI/features/WEB/) -- [Interactive Command Line Interface](https://invoke-ai.github.io/InvokeAI/features/CLI/) -- [Image To Image](https://invoke-ai.github.io/InvokeAI/features/IMG2IMG/) -- [Inpainting Support](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/) -- [Outpainting Support](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/) -- [Upscaling, face-restoration and outpainting](https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/) -- [Reading Prompts From File](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#reading-prompts-from-a-file) -- [Prompt Blending](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-blending) -- [Thresholding and Perlin Noise Initialization Options](https://invoke-ai.github.io/InvokeAI/features/OTHER/#thresholding-and-perlin-noise-initialization-options) -- [Negative/Unconditioned Prompts](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts) -- [Variations](https://invoke-ai.github.io/InvokeAI/features/VARIATIONS/) -- [Personalizing Text-to-Image Generation](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) -- [Simplified API for text to image generation](https://invoke-ai.github.io/InvokeAI/features/OTHER/#simplified-api) - -#### Other Features - -- [Google Colab](https://invoke-ai.github.io/InvokeAI/features/OTHER/#google-colab) -- [Seamless Tiling](https://invoke-ai.github.io/InvokeAI/features/OTHER/#seamless-tiling) -- [Shortcut: Reusing Seeds](https://invoke-ai.github.io/InvokeAI/features/OTHER/#shortcuts-reusing-seeds) -- [Preload Models](https://invoke-ai.github.io/InvokeAI/features/OTHER/#preload-models) +### Coming Soon +- *Node-Based Architecture & UI* +- And more... ### Latest Changes -For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases) +For our latest changes, view our [Release +Notes](https://github.com/invoke-ai/InvokeAI/releases) and the +[CHANGELOG](docs/CHANGELOG.md). -### Troubleshooting +## Troubleshooting Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation problems and other issues. @@ -183,13 +165,7 @@ their time, hard work and effort. ### Support -For support, please use this repository's GitHub Issues tracking service. Feel free to send me an -email if you use and like the script. +For support, please use this repository's GitHub Issues tracking service, or join the Discord. -Original portions of the software are Copyright (c) 2022 -[Lincoln D. Stein](https://github.com/lstein) +Original portions of the software are Copyright (c) 2023 by respective contributors. -### Further Reading - -Please see the original README for more information on this software and underlying algorithm, -located in the file [README-CompViz.md](https://invoke-ai.github.io/InvokeAI/other/README-CompViz/). diff --git a/backend/invoke_ai_web_server.py b/backend/invoke_ai_web_server.py index 02e8504589..1720c44f28 100644 --- a/backend/invoke_ai_web_server.py +++ b/backend/invoke_ai_web_server.py @@ -1,35 +1,34 @@ -import eventlet +import base64 import glob +import io +import json +import math +import mimetypes import os import shutil -import mimetypes import traceback -import math -import io -import base64 -import os -import json +from threading import Event +from uuid import uuid4 -from werkzeug.utils import secure_filename +import eventlet +from PIL import Image +from PIL.Image import Image as ImageType from flask import Flask, redirect, send_from_directory, request, make_response from flask_socketio import SocketIO -from PIL import Image, ImageOps -from PIL.Image import Image as ImageType -from uuid import uuid4 -from threading import Event +from werkzeug.utils import secure_filename -from ldm.generate import Generate -from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash -from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure -from ldm.invoke.globals import Globals -from ldm.invoke.pngwriter import PngWriter, retrieve_metadata -from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend -from ldm.invoke.generator.inpaint import infill_methods - -from backend.modules.parameters import parameters_to_command from backend.modules.get_canvas_generation_mode import ( get_canvas_generation_mode, ) +from backend.modules.parameters import parameters_to_command +from ldm.generate import Generate +from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash +from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure +from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState +from ldm.invoke.generator.inpaint import infill_methods +from ldm.invoke.globals import Globals +from ldm.invoke.pngwriter import PngWriter, retrieve_metadata +from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend # Loading Arguments opt = Args() @@ -304,7 +303,7 @@ class InvokeAIWebServer: def handle_request_capabilities(): print(f">> System config requested") config = self.get_system_config() - config["model_list"] = self.generate.model_cache.list_models() + config["model_list"] = self.generate.model_manager.list_models() config["infill_methods"] = infill_methods() socketio.emit("systemConfig", config) @@ -317,11 +316,11 @@ class InvokeAIWebServer: {'search_folder': None, 'found_models': None}, ) else: - search_folder, found_models = self.generate.model_cache.search_models(search_folder) + search_folder, found_models = self.generate.model_manager.search_models(search_folder) socketio.emit( "foundModels", {'search_folder': search_folder, 'found_models': found_models}, - ) + ) except Exception as e: self.socketio.emit("error", {"message": (str(e))}) print("\n") @@ -335,18 +334,20 @@ class InvokeAIWebServer: model_name = new_model_config['name'] del new_model_config['name'] model_attributes = new_model_config + if len(model_attributes['vae']) == 0: + del model_attributes['vae'] update = False - current_model_list = self.generate.model_cache.list_models() + current_model_list = self.generate.model_manager.list_models() if model_name in current_model_list: update = True print(f">> Adding New Model: {model_name}") - self.generate.model_cache.add_model( + self.generate.model_manager.add_model( model_name=model_name, model_attributes=model_attributes, clobber=True) - self.generate.model_cache.commit(opt.conf) + self.generate.model_manager.commit(opt.conf) - new_model_list = self.generate.model_cache.list_models() + new_model_list = self.generate.model_manager.list_models() socketio.emit( "newModelAdded", {"new_model_name": model_name, @@ -364,9 +365,9 @@ class InvokeAIWebServer: def handle_delete_model(model_name: str): try: print(f">> Deleting Model: {model_name}") - self.generate.model_cache.del_model(model_name) - self.generate.model_cache.commit(opt.conf) - updated_model_list = self.generate.model_cache.list_models() + self.generate.model_manager.del_model(model_name) + self.generate.model_manager.commit(opt.conf) + updated_model_list = self.generate.model_manager.list_models() socketio.emit( "modelDeleted", {"deleted_model_name": model_name, @@ -385,7 +386,7 @@ class InvokeAIWebServer: try: print(f">> Model change requested: {model_name}") model = self.generate.set_model(model_name) - model_list = self.generate.model_cache.list_models() + model_list = self.generate.model_manager.list_models() if model is None: socketio.emit( "modelChangeFailed", @@ -797,7 +798,7 @@ class InvokeAIWebServer: # App Functions def get_system_config(self): - model_list: dict = self.generate.model_cache.list_models() + model_list: dict = self.generate.model_manager.list_models() active_model_name = None for model_name, model_dict in model_list.items(): @@ -1205,9 +1206,16 @@ class InvokeAIWebServer: print(generation_parameters) + def diffusers_step_callback_adapter(*cb_args, **kwargs): + if isinstance(cb_args[0], PipelineIntermediateState): + progress_state: PipelineIntermediateState = cb_args[0] + return image_progress(progress_state.latents, progress_state.step) + else: + return image_progress(*cb_args, **kwargs) + self.generate.prompt2image( **generation_parameters, - step_callback=image_progress, + step_callback=diffusers_step_callback_adapter, image_callback=image_done ) diff --git a/backend/modules/parameters.py b/backend/modules/parameters.py index 10af5ece3a..9055297671 100644 --- a/backend/modules/parameters.py +++ b/backend/modules/parameters.py @@ -12,6 +12,8 @@ SAMPLER_CHOICES = [ "k_heun", "k_lms", "plms", + # diffusers: + "pndm", ] diff --git a/binary_installer/requirements.in b/binary_installer/requirements.in index b4436a6ec0..66e0618f78 100644 --- a/binary_installer/requirements.in +++ b/binary_installer/requirements.in @@ -2,9 +2,10 @@ --extra-index-url https://download.pytorch.org/whl/torch_stable.html --extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org -accelerate~=0.14 +accelerate~=0.15 albumentations -diffusers +diffusers[torch]~=0.11 +einops eventlet flask_cors flask_socketio diff --git a/configs/INITIAL_MODELS.yaml b/configs/INITIAL_MODELS.yaml index 6cfb863df8..45cead0541 100644 --- a/configs/INITIAL_MODELS.yaml +++ b/configs/INITIAL_MODELS.yaml @@ -1,72 +1,76 @@ stable-diffusion-1.5: - description: The newest Stable Diffusion version 1.5 weight file (4.27 GB) + description: Stable Diffusion version 1.5 weight file (4.27 GB) repo_id: runwayml/stable-diffusion-v1-5 - config: v1-inference.yaml - file: v1-5-pruned-emaonly.ckpt - recommended: true - width: 512 - height: 512 + format: diffusers + recommended: True + vae: + repo_id: stabilityai/sd-vae-ft-mse + default: True +stable-diffusion-2.1: + description: Stable Diffusion version 2.1 diffusers model (5.21 GB) + repo_id: stabilityai/stable-diffusion-2-1 + format: diffusers + recommended: True inpainting-1.5: description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB) repo_id: runwayml/stable-diffusion-inpainting config: v1-inpainting-inference.yaml file: sd-v1-5-inpainting.ckpt - recommended: True - width: 512 - height: 512 -ft-mse-improved-autoencoder-840000: - description: StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) - repo_id: stabilityai/sd-vae-ft-mse-original - config: VAE/default - file: vae-ft-mse-840000-ema-pruned.ckpt + format: ckpt + vae: + repo_id: stabilityai/sd-vae-ft-mse-original + file: vae-ft-mse-840000-ema-pruned.ckpt recommended: True width: 512 height: 512 stable-diffusion-1.4: description: The original Stable Diffusion version 1.4 weight file (4.27 GB) - repo_id: CompVis/stable-diffusion-v-1-4-original - config: v1-inference.yaml - file: sd-v1-4.ckpt + repo_id: CompVis/stable-diffusion-v1-4 recommended: False - width: 512 - height: 512 + format: diffusers + vae: + repo_id: stabilityai/sd-vae-ft-mse +waifu-diffusion-1.4: + description: Waifu diffusion 1.4 + format: diffusers + repo_id: hakurei/waifu-diffusion waifu-diffusion-1.3: description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB) repo_id: hakurei/waifu-diffusion-v1-3 config: v1-inference.yaml file: model-epoch09-float32.ckpt + format: ckpt + vae: + repo_id: stabilityai/sd-vae-ft-mse-original + file: vae-ft-mse-840000-ema-pruned.ckpt recommended: False width: 512 height: 512 trinart-2.0: description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures (2.13 GB) repo_id: naclbit/trinart_stable_diffusion_v2 - config: v1-inference.yaml - file: trinart2_step95000.ckpt + format: diffusers recommended: False - width: 512 - height: 512 -trinart_characters-1.0: - description: An SD model finetuned with 19.2M anime/manga style images (2.13 GB) - repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1 + vae: + repo_id: stabilityai/sd-vae-ft-mse +trinart_characters-2.0: + description: An SD model finetuned with 19.2M anime/manga style images (4.27 GB) + repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion config: v1-inference.yaml - file: trinart_characters_it4_v1.ckpt - recommended: False - width: 512 - height: 512 -trinart_vae: - description: Custom autoencoder for trinart_characters - repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1 - config: VAE/trinart - file: autoencoder_fix_kl-f8-trinart_characters.ckpt + file: derrida_final.ckpt + format: ckpt + vae: + repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion + file: autoencoder_fix_kl-f8-trinart_characters.ckpt recommended: False width: 512 height: 512 papercut-1.0: description: SD 1.5 fine-tuned for papercut art (use "PaperCut" in your prompts) (2.13 GB) repo_id: Fictiverse/Stable_Diffusion_PaperCut_Model - config: v1-inference.yaml - file: PaperCut_v1.ckpt + format: diffusers + vae: + repo_id: stabilityai/sd-vae-ft-mse recommended: False width: 512 height: 512 @@ -75,6 +79,27 @@ voxel_art-1.0: repo_id: Fictiverse/Stable_Diffusion_VoxelArt_Model config: v1-inference.yaml file: VoxelArt_v1.ckpt + format: ckpt + vae: + repo_id: stabilityai/sd-vae-ft-mse + recommended: False + width: 512 + height: 512 +ft-mse-improved-autoencoder-840000: + description: StabilityAI improved autoencoder fine-tuned for human faces. Use with legacy .ckpt models ONLY (335 MB) + repo_id: stabilityai/sd-vae-ft-mse-original + format: ckpt + config: VAE/default + file: vae-ft-mse-840000-ema-pruned.ckpt + recommended: False + width: 512 + height: 512 +trinart_vae: + description: Custom autoencoder for trinart_characters for legacy .ckpt models only (335 MB) + repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1 + config: VAE/trinart + format: ckpt + file: autoencoder_fix_kl-f8-trinart_characters.ckpt recommended: False width: 512 height: 512 diff --git a/configs/models.yaml.example b/configs/models.yaml.example index 61e1fe24be..98f8f77e62 100644 --- a/configs/models.yaml.example +++ b/configs/models.yaml.example @@ -5,6 +5,25 @@ # model requires a model config file, a weights file, # and the width and height of the images it # was trained on. +diffusers-1.4: + description: ๐Ÿค—๐Ÿงจ Stable Diffusion v1.4 + format: diffusers + repo_id: CompVis/stable-diffusion-v1-4 +diffusers-1.5: + description: ๐Ÿค—๐Ÿงจ Stable Diffusion v1.5 + format: diffusers + repo_id: runwayml/stable-diffusion-v1-5 + default: true +diffusers-1.5+mse: + description: ๐Ÿค—๐Ÿงจ Stable Diffusion v1.5 + MSE-finetuned VAE + format: diffusers + repo_id: runwayml/stable-diffusion-v1-5 + vae: + repo_id: stabilityai/sd-vae-ft-mse +diffusers-inpainting-1.5: + description: ๐Ÿค—๐Ÿงจ inpainting for Stable Diffusion v1.5 + format: diffusers + repo_id: runwayml/stable-diffusion-inpainting stable-diffusion-1.5: description: The newest Stable Diffusion version 1.5 weight file (4.27 GB) weights: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt @@ -12,7 +31,6 @@ stable-diffusion-1.5: width: 512 height: 512 vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt - default: true stable-diffusion-1.4: description: Stable Diffusion inference model version 1.4 config: configs/stable-diffusion/v1-inference.yaml diff --git a/configs/stable-diffusion/v2-inference-v.yaml b/configs/stable-diffusion/v2-inference-v.yaml new file mode 100644 index 0000000000..8ec8dfbfef --- /dev/null +++ b/configs/stable-diffusion/v2-inference-v.yaml @@ -0,0 +1,68 @@ +model: + base_learning_rate: 1.0e-4 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + parameterization: "v" + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False # we set this to false because this is an inference only config + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 639a9a9466..e9461264db 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -4,6 +4,97 @@ title: Changelog # :octicons-log-16: **Changelog** +## v2.3.0 (15 January 2023) + +**Transition to diffusers + +Version 2.3 provides support for both the traditional `.ckpt` weight +checkpoint files as well as the HuggingFace `diffusers` format. This +introduces several changes you should know about. + +1. The models.yaml format has been updated. There are now two + different type of configuration stanza. The traditional ckpt + one will look like this, with a `format` of `ckpt` and a + `weights` field that points to the absolute or ROOTDIR-relative + location of the ckpt file. + + ``` + inpainting-1.5: + description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB) + repo_id: runwayml/stable-diffusion-inpainting + format: ckpt + width: 512 + height: 512 + weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt + config: configs/stable-diffusion/v1-inpainting-inference.yaml + vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt + ``` + + A configuration stanza for a diffusers model hosted at HuggingFace will look like this, + with a `format` of `diffusers` and a `repo_id` that points to the + repository ID of the model on HuggingFace: + + ``` + stable-diffusion-2.1: + description: Stable Diffusion version 2.1 diffusers model (5.21 GB) + repo_id: stabilityai/stable-diffusion-2-1 + format: diffusers + ``` + + A configuration stanza for a diffuers model stored locally should + look like this, with a `format` of `diffusers`, but a `path` field + that points at the directory that contains `model_index.json`: + + ``` + waifu-diffusion: + description: Latest waifu diffusion 1.4 + format: diffusers + path: models/diffusers/hakurei-haifu-diffusion-1.4 + ``` + +2. The format of the models directory has changed to mimic the + HuggingFace cache directory. By default, diffusers models are + now automatically downloaded and retrieved from the directory + `ROOTDIR/models/diffusers`, while other models are stored in + the directory `ROOTDIR/models/hub`. This organization is the + same as that used by HuggingFace for its cache management. + + This allows you to share diffusers and ckpt model files easily with + other machine learning applications that use the HuggingFace + libraries. To do this, set the environment variable HF_HOME + before starting up InvokeAI to tell it what directory to + cache models in. To tell InvokeAI to use the standard HuggingFace + cache directory, you would set HF_HOME like this (Linux/Mac): + + `export HF_HOME=~/.cache/hugging_face` + +3. If you upgrade to InvokeAI 2.3.* from an earlier version, there + will be a one-time migration from the old models directory format + to the new one. You will see a message about this the first time + you start `invoke.py`. + +4. Both the front end back ends of the model manager have been + rewritten to accommodate diffusers. You can import models using + their local file path, using their URLs, or their HuggingFace + repo_ids. On the command line, all these syntaxes work: + + ``` + !import_model stabilityai/stable-diffusion-2-1-base + !import_model /opt/sd-models/sd-1.4.ckpt + !import_model https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/blob/main/PaperCut_v1.ckpt + ``` + +**KNOWN BUGS (15 January 2023) + +1. On CUDA systems, the 768 pixel stable-diffusion-2.0 and + stable-diffusion-2.1 models can only be run as `diffusers` models + when the `xformer` library is installed and configured. Without + `xformers`, InvokeAI returns black images. + +2. Inpainting and outpainting have regressed in quality. + +Both these issues are being actively worked on. + ## v2.2.4 (11 December 2022) **the `invokeai` directory** diff --git a/docs/assets/canvas_preview.png b/docs/assets/canvas_preview.png new file mode 100644 index 0000000000..dba4ee2ca2 Binary files /dev/null and b/docs/assets/canvas_preview.png differ diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index 7f1e152945..c1181db347 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -12,17 +12,18 @@ title: Installing Manually ## Introduction -You have two choices for manual installation, the [first -one](#PIP_method) uses basic Python virtual environment (`venv`) -commands and the PIP package manager. The [second one](#Conda_method) -based on the Anaconda3 package manager (`conda`). Both methods require -you to enter commands on the terminal, also known as the "console". +You have two choices for manual installation. +The [first one](#pip-Install) uses basic Python virtual environment (`venv`) +command and `pip` package manager. +The [second one](#Conda-method) uses Anaconda3 package manager (`conda`). +Both methods require you to enter commands on the terminal, also known as the +"console". -Note that the conda install method is currently deprecated and will not -be supported at some point in the future. +Note that the `conda` installation method is currently deprecated and will +not be supported at some point in the future. -On Windows systems you are encouraged to install and use the -[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3), +On Windows systems, you are encouraged to install and use the +[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3), which provides compatibility with Linux and Mac shells and nice features such as command-line completion. @@ -37,7 +38,7 @@ manager, please follow these steps: ```bash python -V ``` - + 2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from GitHub: @@ -52,15 +53,15 @@ manager, please follow these steps: environment named `invokeai`: ```bash - python -mvenv invokeai + python -m venv invokeai source invokeai/bin/activate ``` -4. Make sure that pip is installed in your virtual environment an up to date: +4. Make sure that pip is installed in your virtual environment an up to date: ```bash - python -mensurepip --upgrade - python -mpip install --upgrade pip + python -m ensurepip --upgrade + python -m pip install --upgrade pip ``` 5. Pick the correct `requirements*.txt` file for your hardware and operating @@ -199,20 +200,20 @@ manager, please follow these steps: You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory. -9. Render away! +9. Render away! Browse the [features](../features/CLI.md) section to learn about all the things you can do with InvokeAI. Note that some GPUs are slow to warm up. In particular, when using an AMD card with the ROCm driver, you may have to wait for over a minute the first - time you try to generate an image. Fortunately, after the warm up period + time you try to generate an image. Fortunately, after the warm-up period rendering will be fast. -10. Subsequently, to relaunch the script, be sure to run "conda activate - invokeai", enter the `InvokeAI` directory, and then launch the invoke - script. If you forget to activate the 'invokeai' environment, the script - will fail with multiple `ModuleNotFound` errors. +10. Subsequently, to relaunch the script, be sure to enter `InvokeAI` directory, + activate the virtual environment, and then launch `invoke.py` script. + If you forget to activate the virtual environment, + the script will fail with multiple `ModuleNotFound` errors. !!! tip diff --git a/environments-and-requirements/environment-lin-aarch64.yml b/environments-and-requirements/environment-lin-aarch64.yml index c1e7553a28..9dc49c1255 100644 --- a/environments-and-requirements/environment-lin-aarch64.yml +++ b/environments-and-requirements/environment-lin-aarch64.yml @@ -28,13 +28,18 @@ dependencies: - torch-fidelity=0.3.0 - torchmetrics=0.7.0 - torchvision - - transformers=4.21.3 + - transformers~=4.25 - pip: + - accelerate + - diffusers[torch]~=0.11 - getpass_asterisk + - huggingface-hub>=0.11.1 - omegaconf==2.1.1 - picklescan - pyreadline3 - realesrgan + - requests==2.25.1 + - safetensors - taming-transformers-rom1504 - test-tube>=0.7.5 - git+https://github.com/openai/CLIP.git@main#egg=clip diff --git a/environments-and-requirements/environment-lin-amd.yml b/environments-and-requirements/environment-lin-amd.yml index 42ebf37266..cbddbb5fc8 100644 --- a/environments-and-requirements/environment-lin-amd.yml +++ b/environments-and-requirements/environment-lin-amd.yml @@ -9,14 +9,16 @@ dependencies: - numpy=1.23.3 - pip: - --extra-index-url https://download.pytorch.org/whl/rocm5.2/ + - accelerate - albumentations==0.4.3 - - diffusers==0.6.0 + - diffusers[torch]~=0.11 - einops==0.3.0 - eventlet - flask==2.1.3 - flask_cors==3.0.10 - flask_socketio==5.3.0 - getpass_asterisk + - huggingface-hub>=0.11.1 - imageio-ffmpeg==0.4.2 - imageio==2.9.0 - kornia==0.6.0 @@ -28,6 +30,8 @@ dependencies: - pyreadline3 - pytorch-lightning==1.7.7 - realesrgan + - requests==2.25.1 + - safetensors - send2trash==1.8.0 - streamlit==1.12.0 - taming-transformers-rom1504 @@ -38,7 +42,7 @@ dependencies: - torchaudio - torchmetrics==0.7.0 - torchvision - - transformers==4.21.3 + - transformers~=4.25 - git+https://github.com/openai/CLIP.git@main#egg=clip - git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion - git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg diff --git a/environments-and-requirements/environment-lin-cuda.yml b/environments-and-requirements/environment-lin-cuda.yml index ce60cfd96b..20cc636624 100644 --- a/environments-and-requirements/environment-lin-cuda.yml +++ b/environments-and-requirements/environment-lin-cuda.yml @@ -12,14 +12,16 @@ dependencies: - pytorch=1.12.1 - cudatoolkit=11.6 - pip: + - accelerate~=0.13 - albumentations==0.4.3 - - diffusers==0.6.0 + - diffusers[torch]~=0.11 - einops==0.3.0 - eventlet - flask==2.1.3 - flask_cors==3.0.10 - flask_socketio==5.3.0 - getpass_asterisk + - huggingface-hub>=0.11.1 - imageio-ffmpeg==0.4.2 - imageio==2.9.0 - kornia==0.6.0 @@ -31,13 +33,15 @@ dependencies: - pyreadline3 - pytorch-lightning==1.7.7 - realesrgan + - requests==2.25.1 + - safetensors~=0.2 - send2trash==1.8.0 - streamlit==1.12.0 - taming-transformers-rom1504 - test-tube>=0.7.5 - torch-fidelity==0.3.0 - torchmetrics==0.7.0 - - transformers==4.21.3 + - transformers~=4.25 - git+https://github.com/openai/CLIP.git@main#egg=clip - git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion - git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg diff --git a/environments-and-requirements/environment-mac.yml b/environments-and-requirements/environment-mac.yml index 3e78d9ac95..7ced428ebf 100644 --- a/environments-and-requirements/environment-mac.yml +++ b/environments-and-requirements/environment-mac.yml @@ -1,6 +1,7 @@ name: invokeai channels: - pytorch + - huggingface - conda-forge - defaults dependencies: @@ -19,10 +20,9 @@ dependencies: # sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml # CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} ' # ``` - + - accelerate - albumentations=1.2 - coloredlogs=15.0 - - diffusers=0.6 - einops=0.3 - eventlet - grpcio=1.46 @@ -49,10 +49,14 @@ dependencies: - sympy=1.10 - send2trash=1.8 - tensorboard=2.10 - - transformers=4.23 + - transformers~=4.25 - pip: + - diffusers[torch]~=0.11 + - safetensors~=0.2 - getpass_asterisk + - huggingface-hub - picklescan + - requests==2.25.1 - taming-transformers-rom1504 - test-tube==0.7.5 - git+https://github.com/openai/CLIP.git@main#egg=clip diff --git a/environments-and-requirements/environment-win-cuda.yml b/environments-and-requirements/environment-win-cuda.yml index 580f84f8ec..c7ad599641 100644 --- a/environments-and-requirements/environment-win-cuda.yml +++ b/environments-and-requirements/environment-win-cuda.yml @@ -12,14 +12,16 @@ dependencies: - pytorch=1.12.1 - cudatoolkit=11.6 - pip: + - accelerate - albumentations==0.4.3 - - diffusers==0.6.0 + - diffusers[torch]~=0.11 - einops==0.3.0 - eventlet - flask==2.1.3 - flask_cors==3.0.10 - flask_socketio==5.3.0 - getpass_asterisk + - huggingface-hub>=0.11.1 - imageio-ffmpeg==0.4.2 - imageio==2.9.0 - kornia==0.6.0 @@ -31,13 +33,16 @@ dependencies: - pyreadline3 - pytorch-lightning==1.7.7 - realesrgan + - requests==2.25.1 + - safetensors - send2trash==1.8.0 - streamlit==1.12.0 - taming-transformers-rom1504 - test-tube>=0.7.5 - torch-fidelity==0.3.0 - torchmetrics==0.7.0 - - transformers==4.21.3 + - transformers~=4.25 + - windows-curses - git+https://github.com/openai/CLIP.git@main#egg=clip - git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion - git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg diff --git a/environments-and-requirements/requirements-base.txt b/environments-and-requirements/requirements-base.txt index f52df20b5c..a11b7fa39e 100644 --- a/environments-and-requirements/requirements-base.txt +++ b/environments-and-requirements/requirements-base.txt @@ -1,6 +1,9 @@ # pip will resolve the version which matches torch +accelerate albumentations -diffusers==0.10.* +datasets +diffusers[torch]~=0.11 +dnspython==2.2.1 einops eventlet facexlib @@ -14,6 +17,7 @@ huggingface-hub>=0.11.1 imageio imageio-ffmpeg kornia +npyscreen numpy==1.23.* omegaconf opencv-python @@ -25,6 +29,7 @@ pyreadline3 pytorch-lightning==1.7.7 realesrgan requests==2.25.1 +safetensors scikit-image>=0.19 send2trash streamlit @@ -32,7 +37,8 @@ taming-transformers-rom1504 test-tube>=0.7.5 torch-fidelity torchmetrics -transformers==4.25.* +transformers~=4.25 +windows-curses; sys_platform == 'win32' https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip diff --git a/frontend/dist/assets/index-legacy-5c5a479d.js b/frontend/dist/assets/index-legacy-474a75fe.js similarity index 78% rename from frontend/dist/assets/index-legacy-5c5a479d.js rename to frontend/dist/assets/index-legacy-474a75fe.js index 3babd3134a..b48c03f8f8 100644 --- a/frontend/dist/assets/index-legacy-5c5a479d.js +++ b/frontend/dist/assets/index-legacy-474a75fe.js @@ -18,7 +18,7 @@ * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ -var Y=a.exports,Z=G.exports;function X(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n