mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'diffusers_cross_attention_control_reimplementation' of github.com:damian0815/InvokeAI into diffusers_cross_attention_control_reimplementation
This commit is contained in:
commit
c52dd7e3f4
142
.github/workflows/test-invoke-pip.yml
vendored
142
.github/workflows/test-invoke-pip.yml
vendored
@ -8,141 +8,133 @@ on:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
- 'converted_to_draft'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# fail_if_pull_request_is_draft:
|
||||
# if: github.event.pull_request.draft == true && github.head_ref != 'dev/diffusers'
|
||||
# runs-on: ubuntu-18.04
|
||||
# steps:
|
||||
# - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
||||
# run: exit 1
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false || github.head_ref == 'dev/diffusers'
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
- stable-diffusion-1.5
|
||||
requirements-file:
|
||||
- requirements-lin-cuda.txt
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-mac-mps-cpu.txt
|
||||
- requirements-win-colab-cuda.txt
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
pytorch:
|
||||
- linux-cuda-11_6
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
- linux-cpu
|
||||
- macos-default
|
||||
- windows-cpu
|
||||
- windows-cuda-11_6
|
||||
- windows-cuda-11_7
|
||||
include:
|
||||
- requirements-file: requirements-lin-cuda.txt
|
||||
- pytorch: linux-cuda-11_6
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-lin-amd.txt
|
||||
- pytorch: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-mac-mps-cpu.txt
|
||||
- pytorch: linux-cpu
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
github-env: $GITHUB_ENV
|
||||
- pytorch: macos-default
|
||||
os: macOS-12
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-win-colab-cuda.txt
|
||||
- pytorch: windows-cpu
|
||||
os: windows-2022
|
||||
github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
||||
- pytorch: windows-cuda-11_6
|
||||
os: windows-2022
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||
github-env: $env:GITHUB_ENV
|
||||
- pytorch: windows-cuda-11_7
|
||||
os: windows-2022
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||
github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
INVOKE_MODEL_RECONFIGURE: '--yes'
|
||||
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
|
||||
PYTHONUNBUFFERED: 1
|
||||
HAVE_SECRETS: ${{ secrets.HUGGINGFACE_TOKEN != '' }}
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: set INVOKEAI_ROOT Windows
|
||||
if: matrix.os == 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set INVOKEAI_ROOT others
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: Use Cached diffusers-1.5
|
||||
id: cache-sd-model
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: huggingface-${{ matrix.stable-diffusion-model }}
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
path: |
|
||||
${{ env.INVOKEAI_ROOT }}/models/runwayml
|
||||
${{ env.INVOKEAI_ROOT }}/models/stabilityai
|
||||
${{ env.INVOKEAI_ROOT }}/models/CompVis
|
||||
key: ${{ env.cache-name }}
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Set Cache-Directory Windows
|
||||
if: runner.os == 'Windows'
|
||||
id: set-cache-dir-windows
|
||||
run: |
|
||||
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
|
||||
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: Set Cache-Directory others
|
||||
if: runner.os != 'Windows'
|
||||
id: set-cache-dir-others
|
||||
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
if: ${{ github.ref != 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: create requirements.txt
|
||||
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
||||
- name: install invokeai
|
||||
run: pip3 install --use-pep517 -e .
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
- name: Use Cached models
|
||||
id: cache-sd-model
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: huggingface-models
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
# cache: 'pip'
|
||||
# cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
path: ${{ env.CACHE_DIR }}
|
||||
key: ${{ env.cache-name }}
|
||||
enableCrossOsArchive: true
|
||||
|
||||
- name: install dependencies
|
||||
run: pip3 install --upgrade pip setuptools wheel
|
||||
|
||||
- name: install requirements
|
||||
run: pip3 install -r '${{ matrix.requirements-file }}'
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
- name: run configure_invokeai
|
||||
id: run-preload-models
|
||||
env:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
|
||||
run: >
|
||||
configure_invokeai.py
|
||||
configure_invokeai
|
||||
--yes
|
||||
--default_only
|
||||
--full-precision # can't use fp16 weights without a GPU
|
||||
--full-precision
|
||||
# can't use fp16 weights without a GPU
|
||||
|
||||
- name: Run the tests
|
||||
if: runner.os != 'Windows'
|
||||
id: run-tests
|
||||
if: matrix.os != 'windows-2022'
|
||||
env:
|
||||
# Set offline mode to make sure configure preloaded successfully.
|
||||
HF_HUB_OFFLINE: 1
|
||||
HF_DATASETS_OFFLINE: 1
|
||||
TRANSFORMERS_OFFLINE: 1
|
||||
run: >
|
||||
python3 scripts/invoke.py
|
||||
invoke
|
||||
--no-patchmatch
|
||||
--no-nsfw_checker
|
||||
--model ${{ matrix.stable-diffusion-model }}
|
||||
--from_file ${{ env.TEST_PROMPTS }}
|
||||
--root="${{ env.INVOKEAI_ROOT }}"
|
||||
--outdir="${{ env.INVOKEAI_OUTDIR }}"
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
if: matrix.os != 'windows-2022'
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
|
||||
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
||||
|
15
README.md
15
README.md
@ -1,6 +1,6 @@
|
||||
<div align="center">
|
||||
|
||||
![project logo](docs/assets/invoke_ai_banner.png)
|
||||
![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
|
||||
</div>
|
||||
|
||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||
@ -38,8 +39,11 @@ _Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||
|
||||
<div align="center">
|
||||
|
||||
![canvas preview](docs/assets/canvas_preview.png)
|
||||
![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png)
|
||||
|
||||
</div>
|
||||
|
||||
# Getting Started with InvokeAI
|
||||
|
||||
@ -81,6 +85,7 @@ instructions, please see:
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
#### System
|
||||
|
||||
You will need one of the following:
|
||||
@ -105,18 +110,23 @@ to render 512x512 images.
|
||||
Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/)
|
||||
|
||||
### *Web Server & UI*
|
||||
|
||||
InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server.
|
||||
|
||||
### *Unified Canvas*
|
||||
|
||||
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
|
||||
|
||||
### *Advanced Prompt Syntax*
|
||||
|
||||
InvokeAI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space.
|
||||
|
||||
### *Command Line Interface*
|
||||
|
||||
For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool.
|
||||
|
||||
### Other features
|
||||
|
||||
- *Support for both ckpt and diffusers models*
|
||||
- *SD 2.0, 2.1 support*
|
||||
- *Noise Control & Tresholding*
|
||||
@ -126,6 +136,7 @@ For users utilizing a terminal-based environment, or who want to take advantage
|
||||
- *Model Manager & Support*
|
||||
|
||||
### Coming Soon
|
||||
|
||||
- *Node-Based Architecture & UI*
|
||||
- And more...
|
||||
|
||||
|
@ -93,6 +93,7 @@ voxel_art-1.0:
|
||||
format: ckpt
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
file: vae-ft-mse-840000-ema-pruned.ckpt
|
||||
recommended: False
|
||||
width: 512
|
||||
height: 512
|
||||
@ -102,7 +103,7 @@ ft-mse-improved-autoencoder-840000:
|
||||
format: ckpt
|
||||
config: VAE/default
|
||||
file: vae-ft-mse-840000-ema-pruned.ckpt
|
||||
recommended: False
|
||||
recommended: True
|
||||
width: 512
|
||||
height: 512
|
||||
trinart_vae:
|
||||
|
BIN
docs/assets/textual-inversion/ti-frontend.png
Normal file
BIN
docs/assets/textual-inversion/ti-frontend.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 124 KiB |
77
docs/features/MODEL_MERGING.md
Normal file
77
docs/features/MODEL_MERGING.md
Normal file
@ -0,0 +1,77 @@
|
||||
---
|
||||
title: Model Merging
|
||||
---
|
||||
|
||||
# :material-image-off: Model Merging
|
||||
|
||||
## How to Merge Models
|
||||
|
||||
As of version 2.3, InvokeAI comes with a script that allows you to
|
||||
merge two or three diffusers-type models into a new merged model. The
|
||||
resulting model will combine characteristics of the original, and can
|
||||
be used to teach an old model new tricks.
|
||||
|
||||
You may run the merge script by starting the invoke launcher
|
||||
(`invoke.sh` or `invoke.bat`) and choosing the option for _merge
|
||||
models_. This will launch a text-based interactive user interface that
|
||||
prompts you to select the models to merge, how to merge them, and the
|
||||
merged model name.
|
||||
|
||||
Alternatively you may activate InvokeAI's virtual environment from the
|
||||
command line, and call the script via `merge_models_fe.py` (the "fe"
|
||||
stands for "front end"). There is also a version that accepts
|
||||
command-line arguments, which you can run with the command
|
||||
`merge_models.py`.
|
||||
|
||||
The user interface for the text-based interactive script is
|
||||
straightforward. It shows you a series of setting fields. Use control-N (^N)
|
||||
to move to the next field, and control-P (^P) to move to the previous
|
||||
one. You can also use TAB and shift-TAB to move forward and
|
||||
backward. Once you are in a multiple choice field, use the up and down
|
||||
cursor arrows to move to your desired selection, and press <SPACE> or
|
||||
<ENTER> to select it. Change text fields by typing in them, and adjust
|
||||
scrollbars using the left and right arrow keys.
|
||||
|
||||
Once you are happy with your settings, press the OK button. Note that
|
||||
there may be two pages of settings, depending on the height of your
|
||||
screen, and the OK button may be on the second page. Advance past the
|
||||
last field of the first page to get to the second page, and reverse
|
||||
this to get back.
|
||||
|
||||
If the merge runs successfully, it will create a new diffusers model
|
||||
under the selected name and register it with InvokeAI.
|
||||
|
||||
## The Settings
|
||||
|
||||
* Model Selection -- there are three multiple choice fields that
|
||||
display all the diffusers-style models that InvokeAI knows about.
|
||||
If you do not see the model you are looking for, then it is probably
|
||||
a legacy checkpoint model and needs to be converted using the
|
||||
`invoke.py` command-line client and its `!optimize` command. You
|
||||
must select at least two models to merge. The third can be left at
|
||||
"None" if you desire.
|
||||
|
||||
* Alpha -- This is the ratio to use when combining models. It ranges
|
||||
from 0 to 1. The higher the value, the more weight is given to the
|
||||
2d and (optionally) 3d models. So if you have two models named "A"
|
||||
and "B", an alpha value of 0.25 will give you a merged model that is
|
||||
25% A and 75% B.
|
||||
|
||||
* Interpolation Method -- This is the method used to combine
|
||||
weights. The options are "weighted_sum" (the default), "sigmoid",
|
||||
"inv_sigmoid" and "add_difference". Each produces slightly different
|
||||
results. When three models are in use, only "add_difference" is
|
||||
available. (TODO: cite a reference that describes what these
|
||||
interpolation methods actually do and how to decide among them).
|
||||
|
||||
* Force -- Not all models are compatible with each other. The merge
|
||||
script will check for compatibility and refuse to merge ones that
|
||||
are incompatible. Set this checkbox to try merging anyway.
|
||||
|
||||
* Name for merged model - This is the name for the new model. Please
|
||||
use InvokeAI conventions - only alphanumeric letters and the
|
||||
characters ".+-".
|
||||
|
||||
## Caveats
|
||||
|
||||
This is a new script and may contain bugs.
|
@ -10,83 +10,263 @@ You may personalize the generated images to provide your own styles or objects
|
||||
by training a new LDM checkpoint and introducing a new vocabulary to the fixed
|
||||
model as a (.pt) embeddings file. Alternatively, you may use or train
|
||||
HuggingFace Concepts embeddings files (.bin) from
|
||||
<https://huggingface.co/sd-concepts-library> and its associated notebooks.
|
||||
<https://huggingface.co/sd-concepts-library> and its associated
|
||||
notebooks.
|
||||
|
||||
## **Training**
|
||||
## **Hardware and Software Requirements**
|
||||
|
||||
To train, prepare a folder that contains images sized at 512x512 and execute the
|
||||
following:
|
||||
You will need a GPU to perform training in a reasonable length of
|
||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||
library](../installation/070_INSTALL_XFORMERS) to accelerate the
|
||||
training process further. During training, about ~8 GB is temporarily
|
||||
needed in order to store intermediate models, checkpoints and logs.
|
||||
|
||||
### WINDOWS
|
||||
## **Preparing for Training**
|
||||
|
||||
As the default backend is not available on Windows, if you're using that
|
||||
platform, set the environment variable `PL_TORCH_DISTRIBUTED_BACKEND` to `gloo`
|
||||
To train, prepare a folder that contains 3-5 images that illustrate
|
||||
the object or concept. It is good to provide a variety of examples or
|
||||
poses to avoid overtraining the system. Format these images as PNG
|
||||
(preferred) or JPG. You do not need to resize or crop the images in
|
||||
advance, but for more control you may wish to do so.
|
||||
|
||||
```bash
|
||||
python3 ./main.py -t \
|
||||
--base ./configs/stable-diffusion/v1-finetune.yaml \
|
||||
--actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
-n my_cat \
|
||||
--gpus 0 \
|
||||
--data_root D:/textual-inversion/my_cat \
|
||||
--init_word 'cat'
|
||||
Place the training images in a directory on the machine InvokeAI runs
|
||||
on. We recommend placing them in a subdirectory of the
|
||||
`text-inversion-training-data` folder located in the InvokeAI root
|
||||
directory, ordinarily `~/invokeai` (Linux/Mac), or
|
||||
`C:\Users\your_name\invokeai` (Windows). For example, to create an
|
||||
embedding for the "psychedelic" style, you'd place the training images
|
||||
into the directory
|
||||
`~invokeai/text-inversion-training-data/psychedelic`.
|
||||
|
||||
## **Launching Training Using the Console Front End**
|
||||
|
||||
InvokeAI 2.3 and higher comes with a text console-based training front
|
||||
end. From within the `invoke.sh`/`invoke.bat` Invoke launcher script,
|
||||
start the front end by selecting choice (3):
|
||||
|
||||
```sh
|
||||
Do you want to generate images using the
|
||||
1. command-line
|
||||
2. browser-based UI
|
||||
3. textual inversion training
|
||||
4. open the developer console
|
||||
Please enter 1, 2, 3, or 4: [1] 3
|
||||
```
|
||||
|
||||
During the training process, files will be created in
|
||||
`/logs/[project][time][project]/` where you can see the process.
|
||||
From the command line, with the InvokeAI virtual environment active,
|
||||
you can launch the front end with the command
|
||||
`textual_inversion_fe`.
|
||||
|
||||
Conditioning contains the training prompts inputs, reconstruction the input
|
||||
images for the training epoch samples, samples scaled for a sample of the prompt
|
||||
and one with the init word provided.
|
||||
This will launch a text-based front end that will look like this:
|
||||
|
||||
On a RTX3090, the process for SD will take ~1h @1.6 iterations/sec.
|
||||
<figure markdown>
|
||||
![ti-frontend](../assets/textual-inversion/ti-frontend.png)
|
||||
</figure>
|
||||
|
||||
!!! note
|
||||
The interface is keyboard-based. Move from field to field using
|
||||
control-N (^N) to move to the next field and control-P (^P) to the
|
||||
previous one. <Tab> and <shift-TAB> work as well. Once a field is
|
||||
active, use the cursor keys. In a checkbox group, use the up and down
|
||||
cursor keys to move from choice to choice, and <space> to select a
|
||||
choice. In a scrollbar, use the left and right cursor keys to increase
|
||||
and decrease the value of the scroll. In textfields, type the desired
|
||||
values.
|
||||
|
||||
According to the associated paper, the optimal number of
|
||||
images is 3-5. Your model may not converge if you use more images than
|
||||
that.
|
||||
The number of parameters may look intimidating, but in most cases the
|
||||
predefined defaults work fine. The red circled fields in the above
|
||||
illustration are the ones you will adjust most frequently.
|
||||
|
||||
Training will run indefinitely, but you may wish to stop it (with ctrl-c) before
|
||||
the heat death of the universe, when you find a low loss epoch or around ~5000
|
||||
iterations. Note that you can set a fixed limit on the number of training steps
|
||||
by decreasing the "max_steps" option in
|
||||
configs/stable_diffusion/v1-finetune.yaml (currently set to 4000000)
|
||||
### Model Name
|
||||
|
||||
## **Run the Model**
|
||||
This will list all the diffusers models that are currently
|
||||
installed. Select the one you wish to use as the basis for your
|
||||
embedding. Be aware that if you use a SD-1.X-based model for your
|
||||
training, you will only be able to use this embedding with other
|
||||
SD-1.X-based models. Similarly, if you train on SD-2.X, you will only
|
||||
be able to use the embeddings with models based on SD-2.X.
|
||||
|
||||
Once the model is trained, specify the trained .pt or .bin file when starting
|
||||
invoke using
|
||||
### Trigger Term
|
||||
|
||||
```bash
|
||||
python3 ./scripts/invoke.py \
|
||||
--embedding_path /path/to/embedding.pt
|
||||
This is the prompt term you will use to trigger the embedding. Type a
|
||||
single word or phrase you wish to use as the trigger, example
|
||||
"psychedelic" (without angle brackets). Within InvokeAI, you will then
|
||||
be able to activate the trigger using the syntax `<psychedelic>`.
|
||||
|
||||
### Initializer
|
||||
|
||||
This is a single character that is used internally during the training
|
||||
process as a placeholder for the trigger term. It defaults to "*" and
|
||||
can usually be left alone.
|
||||
|
||||
### Resume from last saved checkpoint
|
||||
|
||||
As training proceeds, textual inversion will write a series of
|
||||
intermediate files that can be used to resume training from where it
|
||||
was left off in the case of an interruption. This checkbox will be
|
||||
automatically selected if you provide a previously used trigger term
|
||||
and at least one checkpoint file is found on disk.
|
||||
|
||||
Note that as of 20 January 2023, resume does not seem to be working
|
||||
properly due to an issue with the upstream code.
|
||||
|
||||
### Data Training Directory
|
||||
|
||||
This is the location of the images to be used for training. When you
|
||||
select a trigger term like "my-trigger", the frontend will prepopulate
|
||||
this field with `~/invokeai/text-inversion-training-data/my-trigger`,
|
||||
but you can change the path to wherever you want.
|
||||
|
||||
### Output Destination Directory
|
||||
|
||||
This is the location of the logs, checkpoint files, and embedding
|
||||
files created during training. When you select a trigger term like
|
||||
"my-trigger", the frontend will prepopulate this field with
|
||||
`~/invokeai/text-inversion-output/my-trigger`, but you can change the
|
||||
path to wherever you want.
|
||||
|
||||
### Image resolution
|
||||
|
||||
The images in the training directory will be automatically scaled to
|
||||
the value you use here. For best results, you will want to use the
|
||||
same default resolution of the underlying model (512 pixels for
|
||||
SD-1.5, 768 for the larger version of SD-2.1).
|
||||
|
||||
### Center crop images
|
||||
|
||||
If this is selected, your images will be center cropped to make them
|
||||
square before resizing them to the desired resolution. Center cropping
|
||||
can indiscriminately cut off the top of subjects' heads for portrait
|
||||
aspect images, so if you have images like this, you may wish to use a
|
||||
photoeditor to manually crop them to a square aspect ratio.
|
||||
|
||||
### Mixed precision
|
||||
|
||||
Select the floating point precision for the embedding. "no" will
|
||||
result in a full 32-bit precision, "fp16" will provide 16-bit
|
||||
precision, and "bf16" will provide mixed precision (only available
|
||||
when XFormers is used).
|
||||
|
||||
### Max training steps
|
||||
|
||||
How many steps the training will take before the model converges. Most
|
||||
training sets will converge with 2000-3000 steps.
|
||||
|
||||
### Batch size
|
||||
|
||||
This adjusts how many training images are processed simultaneously in
|
||||
each step. Higher values will cause the training process to run more
|
||||
quickly, but use more memory. The default size will run with GPUs with
|
||||
as little as 12 GB.
|
||||
|
||||
### Learning rate
|
||||
|
||||
The rate at which the system adjusts its internal weights during
|
||||
training. Higher values risk overtraining (getting the same image each
|
||||
time), and lower values will take more steps to train a good
|
||||
model. The default of 0.0005 is conservative; you may wish to increase
|
||||
it to 0.005 to speed up training.
|
||||
|
||||
### Scale learning rate by number of GPUs, steps and batch size
|
||||
|
||||
If this is selected (the default) the system will adjust the provided
|
||||
learning rate to improve performance.
|
||||
|
||||
### Use xformers acceleration
|
||||
|
||||
This will activate XFormers memory-efficient attention. You need to
|
||||
have XFormers installed for this to have an effect.
|
||||
|
||||
### Learning rate scheduler
|
||||
|
||||
This adjusts how the learning rate changes over the course of
|
||||
training. The default "constant" means to use a constant learning rate
|
||||
for the entire training session. The other values scale the learning
|
||||
rate according to various formulas.
|
||||
|
||||
Only "constant" is supported by the XFormers library.
|
||||
|
||||
### Gradient accumulation steps
|
||||
|
||||
This is a parameter that allows you to use bigger batch sizes than
|
||||
your GPU's VRAM would ordinarily accommodate, at the cost of some
|
||||
performance.
|
||||
|
||||
### Warmup steps
|
||||
|
||||
If "constant_with_warmup" is selected in the learning rate scheduler,
|
||||
then this provides the number of warmup steps. Warmup steps have a
|
||||
very low learning rate, and are one way of preventing early
|
||||
overtraining.
|
||||
|
||||
## The training run
|
||||
|
||||
Start the training run by advancing to the OK button (bottom right)
|
||||
and pressing <enter>. A series of progress messages will be displayed
|
||||
as the training process proceeds. This may take an hour or two,
|
||||
depending on settings and the speed of your system. Various log and
|
||||
checkpoint files will be written into the output directory (ordinarily
|
||||
`~/invokeai/text-inversion-output/my-model/`)
|
||||
|
||||
At the end of successful training, the system will copy the file
|
||||
`learned_embeds.bin` into the InvokeAI root directory's `embeddings`
|
||||
directory, using a subdirectory named after the trigger token. For
|
||||
example, if the trigger token was `psychedelic`, then look for the
|
||||
embeddings file in
|
||||
`~/invokeai/embeddings/psychedelic/learned_embeds.bin`
|
||||
|
||||
You may now launch InvokeAI and try out a prompt that uses the trigger
|
||||
term. For example `a plate of banana sushi in <psychedelic> style`.
|
||||
|
||||
## **Training with the Command-Line Script**
|
||||
|
||||
InvokeAI also comes with a traditional command-line script for
|
||||
launching textual inversion training. It is named
|
||||
`textual_inversion`, and can be launched from within the
|
||||
"developer's console", or from the command line after activating
|
||||
InvokeAI's virtual environment.
|
||||
|
||||
It accepts a large number of arguments, which can be summarized by
|
||||
passing the `--help` argument:
|
||||
|
||||
```sh
|
||||
textual_inversion --help
|
||||
```
|
||||
|
||||
Then, to utilize your subject at the invoke prompt
|
||||
|
||||
```bash
|
||||
invoke> "a photo of *"
|
||||
Typical usage is shown here:
|
||||
```sh
|
||||
python textual_inversion.py \
|
||||
--model=stable-diffusion-1.5 \
|
||||
--resolution=512 \
|
||||
--learnable_property=style \
|
||||
--initializer_token='*' \
|
||||
--placeholder_token='<psychedelic>' \
|
||||
--train_data_dir=/home/lstein/invokeai/training-data/psychedelic \
|
||||
--output_dir=/home/lstein/invokeai/text-inversion-training/psychedelic \
|
||||
--scale_lr \
|
||||
--train_batch_size=8 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--max_train_steps=3000 \
|
||||
--learning_rate=0.0005 \
|
||||
--resume_from_checkpoint=latest \
|
||||
--lr_scheduler=constant \
|
||||
--mixed_precision=fp16 \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
This also works with image2image
|
||||
## Reading
|
||||
|
||||
```bash
|
||||
invoke> "waterfall and rainbow in the style of *" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4
|
||||
```
|
||||
For more information on textual inversion, please see the following
|
||||
resources:
|
||||
|
||||
For .pt files it's also possible to train multiple tokens (modify the
|
||||
placeholder string in `configs/stable-diffusion/v1-finetune.yaml`) and combine
|
||||
LDM checkpoints using:
|
||||
|
||||
```bash
|
||||
python3 ./scripts/merge_embeddings.py \
|
||||
--manager_ckpts /path/to/first/embedding.pt \
|
||||
[</path/to/second/embedding.pt>,[...]] \
|
||||
--output_path /path/to/output/embedding.pt
|
||||
```
|
||||
|
||||
Credit goes to rinongal and the repository
|
||||
|
||||
Please see [the repository](https://github.com/rinongal/textual_inversion) and
|
||||
* The [textual inversion repository](https://github.com/rinongal/textual_inversion) and
|
||||
associated paper for details and limitations.
|
||||
* [HuggingFace's textual inversion training
|
||||
page](https://huggingface.co/docs/diffusers/training/text_inversion)
|
||||
* [HuggingFace example script
|
||||
documentation](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)
|
||||
(Note that this script is similar to, but not identical, to
|
||||
`textual_inversion`, but produces embed files that are completely compatible.
|
||||
|
||||
---
|
||||
|
||||
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team
|
@ -157,6 +157,8 @@ images in full-precision mode:
|
||||
<!-- seperator -->
|
||||
- [Prompt Engineering](features/PROMPTS.md)
|
||||
<!-- seperator -->
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
<!-- seperator -->
|
||||
- Miscellaneous
|
||||
- [NSFW Checker](features/NSFW.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
|
@ -119,10 +119,8 @@ manager, please follow these steps:
|
||||
|
||||
6. Run PIP
|
||||
|
||||
Be sure that the `invokeai` environment is active before doing this:
|
||||
|
||||
```bash
|
||||
pip install --prefer-binary -r requirements.txt
|
||||
pip --python invokeai install --use-pep517 .
|
||||
```
|
||||
|
||||
7. Set up the runtime directory
|
||||
@ -137,7 +135,7 @@ manager, please follow these steps:
|
||||
default to `invokeai` in your home directory.
|
||||
|
||||
```bash
|
||||
configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||
configure_invokeai --root_dir ~/Programs/invokeai
|
||||
```
|
||||
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
@ -452,7 +450,7 @@ time. Note that this method only works with the PIP method.
|
||||
step.
|
||||
|
||||
3. Run one additional step while you are in the source code repository
|
||||
directory `pip install .` (note the dot at the end).
|
||||
directory `pip install --use-pep517 .` (note the dot at the end).
|
||||
|
||||
4. That's all! Now, whenever you activate the virtual environment,
|
||||
`invoke.py` will know where to look for the runtime directory without
|
||||
|
@ -18,7 +18,13 @@ Windows systems with no extra intervention.
|
||||
|
||||
## Macintosh
|
||||
|
||||
PyPatchMatch is not currently supported, but the team is working on it.
|
||||
You need to have opencv installed so that pypatchmatch can be built:
|
||||
|
||||
```bash
|
||||
brew install opencv
|
||||
```
|
||||
|
||||
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
|
||||
|
||||
## Linux
|
||||
|
||||
@ -39,23 +45,16 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
||||
sudo apt install python3-opencv libopencv-dev
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
```sh
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
4. Activate the environment you use for invokeai, either with `conda` or with a
|
||||
3. Activate the environment you use for invokeai, either with `conda` or with a
|
||||
virtual environment.
|
||||
|
||||
5. Install pypatchmatch:
|
||||
4. Install pypatchmatch:
|
||||
|
||||
```sh
|
||||
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
|
||||
pip install pypatchmatch
|
||||
```
|
||||
|
||||
6. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
||||
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
||||
`python`, and then at the `>>>` line type
|
||||
`from patchmatch import patch_match`: It should look like the follwing:
|
||||
|
||||
|
@ -254,65 +254,10 @@ steps:
|
||||
source invokeai/bin/activate
|
||||
```
|
||||
|
||||
4. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||
system.
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||
|
||||
</figure>
|
||||
|
||||
Select the appropriate requirements file, and make a link to it from
|
||||
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||
this from the top-level directory is:
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||
4. Run PIP
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||
This is a base requirements file that does not have the platform-specific
|
||||
libraries. Also, be sure to link or copy the platform-specific file to
|
||||
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||
a requirements file in a subdirectory will not work as expected.
|
||||
|
||||
When this is done, confirm that a file named `requirements.txt` has been
|
||||
created in the InvokeAI root directory and that it points to the correct
|
||||
file in `environments-and-requirements`.
|
||||
|
||||
5. Run PIP
|
||||
|
||||
Be sure that the `invokeai` environment is active before doing this:
|
||||
|
||||
```bash
|
||||
pip install --prefer-binary -r requirements.txt
|
||||
pip --python invokeai install --use-pep517 .
|
||||
```
|
||||
|
||||
---
|
||||
|
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.0dadf5d0.css
vendored
1
frontend/dist/assets/index.0dadf5d0.css
vendored
File diff suppressed because one or more lines are too long
625
frontend/dist/assets/index.1b59e83a.js
vendored
625
frontend/dist/assets/index.1b59e83a.js
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.8badc8b4.css
vendored
Normal file
1
frontend/dist/assets/index.8badc8b4.css
vendored
Normal file
File diff suppressed because one or more lines are too long
625
frontend/dist/assets/index.dd470915.js
vendored
Normal file
625
frontend/dist/assets/index.dd470915.js
vendored
Normal file
File diff suppressed because one or more lines are too long
6
frontend/dist/index.html
vendored
6
frontend/dist/index.html
vendored
@ -7,8 +7,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.1b59e83a.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.0dadf5d0.css">
|
||||
<script type="module" crossorigin src="./assets/index.dd470915.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.8badc8b4.css">
|
||||
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
|
||||
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
|
||||
</head>
|
||||
@ -18,6 +18,6 @@
|
||||
|
||||
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
|
||||
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-474a75fe.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-6edbec57.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
</body>
|
||||
</html>
|
||||
|
4
frontend/dist/locales/common/en-US.json
vendored
4
frontend/dist/locales/common/en-US.json
vendored
@ -17,6 +17,9 @@
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
@ -32,6 +35,7 @@
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
|
2
frontend/dist/locales/common/en.json
vendored
2
frontend/dist/locales/common/en.json
vendored
@ -19,6 +19,7 @@
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
@ -34,6 +35,7 @@
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"back": "Back",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
|
60
frontend/dist/locales/common/ja.json
vendored
Normal file
60
frontend/dist/locales/common/ja.json
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "テーマ",
|
||||
"languagePickerLabel": "言語選択",
|
||||
"reportBugLabel": "バグ報告",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "設定",
|
||||
"darkTheme": "ダーク",
|
||||
"lightTheme": "ライト",
|
||||
"greenTheme": "緑",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "現在、画像生成のためのノードベースシステムを開発中です。機能についてのアップデートにご期待ください。",
|
||||
"postProcessing": "後処理",
|
||||
"postProcessDesc1": "Invoke AIは、多彩な後処理の機能を備えています。アップスケーリングと顔修復は、すでにWebUI上で利用可能です。これらは、[Text To Image]および[Image To Image]タブの[詳細オプション]メニューからアクセスできます。また、現在の画像表示の上やビューア内の画像アクションボタンを使って、画像を直接処理することもできます。",
|
||||
"postProcessDesc2": "より高度な後処理の機能を実現するための専用UIを近日中にリリース予定です。",
|
||||
"postProcessDesc3": "Invoke AI CLIでは、この他にもEmbiggenをはじめとする様々な機能を利用することができます。",
|
||||
"training": "追加学習",
|
||||
"trainingDesc1": "Textual InversionとDreamboothを使って、WebUIから独自のEmbeddingとチェックポイントを追加学習するための専用ワークフローです。",
|
||||
"trainingDesc2": "InvokeAIは、すでにメインスクリプトを使ったTextual Inversionによるカスタム埋め込み追加学習にも対応しています。",
|
||||
"upload": "アップロード",
|
||||
"close": "閉じる",
|
||||
"load": "ロード",
|
||||
"back": "戻る",
|
||||
"statusConnected": "接続済",
|
||||
"statusDisconnected": "切断済",
|
||||
"statusError": "エラー",
|
||||
"statusPreparing": "準備中",
|
||||
"statusProcessingCanceled": "処理をキャンセル",
|
||||
"statusProcessingComplete": "処理完了",
|
||||
"statusGenerating": "生成中",
|
||||
"statusGeneratingTextToImage": "Text To Imageで生成中",
|
||||
"statusGeneratingImageToImage": "Image To Imageで生成中",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "生成完了",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "画像を保存",
|
||||
"statusRestoringFaces": "顔の修復",
|
||||
"statusRestoringFacesGFPGAN": "顔の修復 (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "顔の修復 (CodeFormer)",
|
||||
"statusUpscaling": "アップスケーリング",
|
||||
"statusUpscalingESRGAN": "アップスケーリング (ESRGAN)",
|
||||
"statusLoadingModel": "モデルを読み込む",
|
||||
"statusModelChanged": "モデルを変更"
|
||||
}
|
||||
|
17
frontend/dist/locales/gallery/ja.json
vendored
Normal file
17
frontend/dist/locales/gallery/ja.json
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"generations": "Generations",
|
||||
"showGenerations": "Show Generations",
|
||||
"uploads": "アップロード",
|
||||
"showUploads": "アップロードした画像を見る",
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
"galleryImageResetSize": "サイズをリセット",
|
||||
"gallerySettings": "ギャラリーの設定",
|
||||
"maintainAspectRatio": "アスペクト比を維持",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"singleColumnLayout": "シングルカラムレイアウト",
|
||||
"pinGallery": "ギャラリーにピン留め",
|
||||
"allImagesLoaded": "すべての画像を読み込む",
|
||||
"loadMore": "さらに読み込む",
|
||||
"noImagesInGallery": "ギャラリーに画像がありません"
|
||||
}
|
||||
|
208
frontend/dist/locales/hotkeys/ja.json
vendored
Normal file
208
frontend/dist/locales/hotkeys/ja.json
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
{
|
||||
"keyboardShortcuts": "キーボードショートカット",
|
||||
"appHotkeys": "アプリのホットキー",
|
||||
"generalHotkeys": "Generalのホットキー",
|
||||
"galleryHotkeys": "ギャラリーのホットキー",
|
||||
"unifiedCanvasHotkeys": "Unified Canvasのホットキー",
|
||||
"invoke": {
|
||||
"title": "Invoke",
|
||||
"desc": "画像を生成"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "キャンセル",
|
||||
"desc": "画像の生成をキャンセル"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Focus Prompt",
|
||||
"desc": "プロンプトテキストボックスにフォーカス"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "オプションパネルのトグル",
|
||||
"desc": "オプションパネルの開閉"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "ピン",
|
||||
"desc": "オプションパネルを固定"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "ビュワーのトグル",
|
||||
"desc": "ビュワーを開閉"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "ギャラリーのトグル",
|
||||
"desc": "ギャラリードロワーの開閉"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "作業領域の最大化",
|
||||
"desc": "パネルを閉じて、作業領域を最大に"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "タブの切替",
|
||||
"desc": "他の作業領域と切替"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "コンソールのトグル",
|
||||
"desc": "コンソールの開閉"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "プロンプトをセット",
|
||||
"desc": "現在の画像のプロンプトを使用"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "シード値をセット",
|
||||
"desc": "現在の画像のシード値を使用"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "パラメータをセット",
|
||||
"desc": "現在の画像のすべてのパラメータを使用"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "顔の修復",
|
||||
"desc": "現在の画像を修復"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "アップスケール",
|
||||
"desc": "現在の画像をアップスケール"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "情報を見る",
|
||||
"desc": "現在の画像のメタデータ情報を表示"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Image To Imageに転送",
|
||||
"desc": "現在の画像をImage to Imageに転送"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "画像を削除",
|
||||
"desc": "現在の画像を削除"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "パネルを閉じる",
|
||||
"desc": "開いているパネルを閉じる"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "前の画像",
|
||||
"desc": "ギャラリー内の1つ前の画像を表示"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "次の画像",
|
||||
"desc": "ギャラリー内の1つ後の画像を表示"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "ギャラリードロワーの固定",
|
||||
"desc": "ギャラリーをUIにピン留め/解除"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "ギャラリーの画像を拡大",
|
||||
"desc": "ギャラリーのサムネイル画像を拡大"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "ギャラリーの画像サイズを縮小",
|
||||
"desc": "ギャラリーのサムネイル画像を縮小"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "ブラシを選択",
|
||||
"desc": "ブラシを選択"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "消しゴムを選択",
|
||||
"desc": "消しゴムを選択"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "ブラシサイズを縮小",
|
||||
"desc": "ブラシ/消しゴムのサイズを縮小"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "ブラシサイズを拡大",
|
||||
"desc": "ブラシ/消しゴムのサイズを拡大"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "ブラシの不透明度を下げる",
|
||||
"desc": "キャンバスブラシの不透明度を下げる"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "ブラシの不透明度を上げる",
|
||||
"desc": "キャンバスブラシの不透明度を上げる"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Move Tool",
|
||||
"desc": "Allows canvas navigation"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "バウンディングボックスを塗りつぶす",
|
||||
"desc": "ブラシの色でバウンディングボックス領域を塗りつぶす"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "バウンディングボックスを消す",
|
||||
"desc": "バウンディングボックス領域を消す"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "カラーピッカーを選択",
|
||||
"desc": "カラーピッカーを選択"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Toggle Snap",
|
||||
"desc": "Toggles Snap to Grid"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Quick Toggle Move",
|
||||
"desc": "Temporarily toggles Move mode"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "レイヤーを切替",
|
||||
"desc": "マスク/ベースレイヤの選択を切替"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "マスクを消す",
|
||||
"desc": "マスク全体を消す"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "マスクを非表示",
|
||||
"desc": "マスクを表示/非表示"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "バウンディングボックスを表示/非表示",
|
||||
"desc": "バウンディングボックスの表示/非表示を切替"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Merge Visible",
|
||||
"desc": "Merge all visible layers of canvas"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "ギャラリーに保存",
|
||||
"desc": "現在のキャンバスをギャラリーに保存"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "クリップボードにコピー",
|
||||
"desc": "現在のキャンバスをクリップボードにコピー"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "画像をダウンロード",
|
||||
"desc": "現在の画像をダウンロード"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Undo Stroke",
|
||||
"desc": "Undo a brush stroke"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Redo Stroke",
|
||||
"desc": "Redo a brush stroke"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "キャンバスをリセット",
|
||||
"desc": "キャンバスをリセット"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Previous Staging Image",
|
||||
"desc": "Previous Staging Area Image"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Next Staging Image",
|
||||
"desc": "Next Staging Area Image"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Accept Staging Image",
|
||||
"desc": "Accept Current Staging Area Image"
|
||||
}
|
||||
}
|
||||
|
19
frontend/dist/locales/modelmanager/en-US.json
vendored
19
frontend/dist/locales/modelmanager/en-US.json
vendored
@ -1,12 +1,18 @@
|
||||
{
|
||||
"modelManager": "Model Manager",
|
||||
"model": "Model",
|
||||
"allModels": "All Models",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"diffusersModels": "Diffusers",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"modelAdded": "Model Added",
|
||||
"modelUpdated": "Model Updated",
|
||||
"modelEntryDeleted": "Model Entry Deleted",
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"addNew": "Add New",
|
||||
"addNewModel": "Add New Model",
|
||||
"addCheckpointModel": "Add Checkpoint / Safetensor Model",
|
||||
"addDiffuserModel": "Add Diffusers",
|
||||
"addManually": "Add Manually",
|
||||
"manual": "Manual",
|
||||
"name": "Name",
|
||||
@ -17,8 +23,12 @@
|
||||
"configValidationMsg": "Path to the config file of your model.",
|
||||
"modelLocation": "Model Location",
|
||||
"modelLocationValidationMsg": "Path to where your model is located.",
|
||||
"repo_id": "Repo ID",
|
||||
"repoIDValidationMsg": "Online repository of your model",
|
||||
"vaeLocation": "VAE Location",
|
||||
"vaeLocationValidationMsg": "Path to where your VAE is located.",
|
||||
"vaeRepoID": "VAE Repo ID",
|
||||
"vaeRepoIDValidationMsg": "Online repository of your VAE",
|
||||
"width": "Width",
|
||||
"widthValidationMsg": "Default width of your model.",
|
||||
"height": "Height",
|
||||
@ -34,6 +44,7 @@
|
||||
"checkpointFolder": "Checkpoint Folder",
|
||||
"clearCheckpointFolder": "Clear Checkpoint Folder",
|
||||
"findModels": "Find Models",
|
||||
"scanAgain": "Scan Again",
|
||||
"modelsFound": "Models Found",
|
||||
"selectFolder": "Select Folder",
|
||||
"selected": "Selected",
|
||||
@ -42,9 +53,15 @@
|
||||
"showExisting": "Show Existing",
|
||||
"addSelected": "Add Selected",
|
||||
"modelExists": "Model Exists",
|
||||
"selectAndAdd": "Select and Add Models Listed Below",
|
||||
"noModelsFound": "No Models Found",
|
||||
"delete": "Delete",
|
||||
"deleteModel": "Delete Model",
|
||||
"deleteConfig": "Delete Config",
|
||||
"deleteMsg1": "Are you sure you want to delete this model entry from InvokeAI?",
|
||||
"deleteMsg2": "This will not delete the model checkpoint file from your disk. You can readd them if you wish to."
|
||||
"deleteMsg2": "This will not delete the model checkpoint file from your disk. You can readd them if you wish to.",
|
||||
"formMessageDiffusersModelLocation": "Diffusers Model Location",
|
||||
"formMessageDiffusersModelLocationDesc": "Please enter at least one.",
|
||||
"formMessageDiffusersVAELocation": "VAE Location",
|
||||
"formMessageDiffusersVAELocationDesc": "If not provided, InvokeAI will look for the VAE file inside the model location given above."
|
||||
}
|
||||
|
16
frontend/dist/locales/modelmanager/en.json
vendored
16
frontend/dist/locales/modelmanager/en.json
vendored
@ -1,12 +1,18 @@
|
||||
{
|
||||
"modelManager": "Model Manager",
|
||||
"model": "Model",
|
||||
"allModels": "All Models",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"diffusersModels": "Diffusers",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"modelAdded": "Model Added",
|
||||
"modelUpdated": "Model Updated",
|
||||
"modelEntryDeleted": "Model Entry Deleted",
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"addNew": "Add New",
|
||||
"addNewModel": "Add New Model",
|
||||
"addCheckpointModel": "Add Checkpoint / Safetensor Model",
|
||||
"addDiffuserModel": "Add Diffusers",
|
||||
"addManually": "Add Manually",
|
||||
"manual": "Manual",
|
||||
"name": "Name",
|
||||
@ -17,8 +23,12 @@
|
||||
"configValidationMsg": "Path to the config file of your model.",
|
||||
"modelLocation": "Model Location",
|
||||
"modelLocationValidationMsg": "Path to where your model is located.",
|
||||
"repo_id": "Repo ID",
|
||||
"repoIDValidationMsg": "Online repository of your model",
|
||||
"vaeLocation": "VAE Location",
|
||||
"vaeLocationValidationMsg": "Path to where your VAE is located.",
|
||||
"vaeRepoID": "VAE Repo ID",
|
||||
"vaeRepoIDValidationMsg": "Online repository of your VAE",
|
||||
"width": "Width",
|
||||
"widthValidationMsg": "Default width of your model.",
|
||||
"height": "Height",
|
||||
@ -49,5 +59,9 @@
|
||||
"deleteModel": "Delete Model",
|
||||
"deleteConfig": "Delete Config",
|
||||
"deleteMsg1": "Are you sure you want to delete this model entry from InvokeAI?",
|
||||
"deleteMsg2": "This will not delete the model checkpoint file from your disk. You can readd them if you wish to."
|
||||
"deleteMsg2": "This will not delete the model checkpoint file from your disk. You can readd them if you wish to.",
|
||||
"formMessageDiffusersModelLocation": "Diffusers Model Location",
|
||||
"formMessageDiffusersModelLocationDesc": "Please enter at least one.",
|
||||
"formMessageDiffusersVAELocation": "VAE Location",
|
||||
"formMessageDiffusersVAELocationDesc": "If not provided, InvokeAI will look for the VAE file inside the model location given above."
|
||||
}
|
||||
|
68
frontend/dist/locales/modelmanager/ja.json
vendored
Normal file
68
frontend/dist/locales/modelmanager/ja.json
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
{
|
||||
"modelManager": "モデルマネージャ",
|
||||
"model": "モデル",
|
||||
"allModels": "すべてのモデル",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"diffusersModels": "Diffusers",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"modelAdded": "モデルを追加",
|
||||
"modelUpdated": "モデルをアップデート",
|
||||
"modelEntryDeleted": "Model Entry Deleted",
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"addNew": "新規に追加",
|
||||
"addNewModel": "新規モデル追加",
|
||||
"addCheckpointModel": "Checkpointを追加 / Safetensorモデル",
|
||||
"addDiffuserModel": "Diffusersを追加",
|
||||
"addManually": "手動で追加",
|
||||
"manual": "手動",
|
||||
"name": "名前",
|
||||
"nameValidationMsg": "モデルの名前を入力",
|
||||
"description": "概要",
|
||||
"descriptionValidationMsg": "モデルの概要を入力",
|
||||
"config": "Config",
|
||||
"configValidationMsg": "モデルの設定ファイルへのパス",
|
||||
"modelLocation": "モデルの場所",
|
||||
"modelLocationValidationMsg": "モデルが配置されている場所へのパス。",
|
||||
"repo_id": "Repo ID",
|
||||
"repoIDValidationMsg": "モデルのリモートリポジトリ",
|
||||
"vaeLocation": "VAEの場所",
|
||||
"vaeLocationValidationMsg": "Vaeが配置されている場所へのパス",
|
||||
"vaeRepoID": "VAE Repo ID",
|
||||
"vaeRepoIDValidationMsg": "Vaeのリモートリポジトリ",
|
||||
"width": "幅",
|
||||
"widthValidationMsg": "モデルのデフォルトの幅",
|
||||
"height": "高さ",
|
||||
"heightValidationMsg": "モデルのデフォルトの高さ",
|
||||
"addModel": "モデルを追加",
|
||||
"updateModel": "モデルをアップデート",
|
||||
"availableModels": "モデルを有効化",
|
||||
"search": "検索",
|
||||
"load": "Load",
|
||||
"active": "active",
|
||||
"notLoaded": "読み込まれていません",
|
||||
"cached": "キャッシュ済",
|
||||
"checkpointFolder": "Checkpointフォルダ",
|
||||
"clearCheckpointFolder": "Checkpointフォルダ内を削除",
|
||||
"findModels": "モデルを見つける",
|
||||
"scanAgain": "再度スキャン",
|
||||
"modelsFound": "モデルを発見",
|
||||
"selectFolder": "フォルダを選択",
|
||||
"selected": "選択済",
|
||||
"selectAll": "すべて選択",
|
||||
"deselectAll": "すべて選択解除",
|
||||
"showExisting": "既存を表示",
|
||||
"addSelected": "選択済を追加",
|
||||
"modelExists": "モデルの有無",
|
||||
"selectAndAdd": "以下のモデルを選択し、追加できます。",
|
||||
"noModelsFound": "モデルが見つかりません。",
|
||||
"delete": "削除",
|
||||
"deleteModel": "モデルを削除",
|
||||
"deleteConfig": "設定を削除",
|
||||
"deleteMsg1": "InvokeAIからこのモデルエントリーを削除してよろしいですか?",
|
||||
"deleteMsg2": "これは、ドライブからモデルのCheckpointファイルを削除するものではありません。必要であればそれらを読み込むことができます。",
|
||||
"formMessageDiffusersModelLocation": "Diffusersモデルの場所",
|
||||
"formMessageDiffusersModelLocationDesc": "最低でも1つは入力してください。",
|
||||
"formMessageDiffusersVAELocation": "VAEの場所s",
|
||||
"formMessageDiffusersVAELocationDesc": "指定しない場合、InvokeAIは上記のモデルの場所にあるVAEファイルを探します。"
|
||||
}
|
||||
|
63
frontend/dist/locales/options/ja.json
vendored
Normal file
63
frontend/dist/locales/options/ja.json
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
{
|
||||
"images": "画像",
|
||||
"steps": "ステップ数",
|
||||
"cfgScale": "CFG Scale",
|
||||
"width": "幅",
|
||||
"height": "高さ",
|
||||
"sampler": "Sampler",
|
||||
"seed": "シード値",
|
||||
"randomizeSeed": "ランダムなシード値",
|
||||
"shuffle": "シャッフル",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"variations": "Variations",
|
||||
"variationAmount": "Variation Amount",
|
||||
"seedWeights": "シード値の重み",
|
||||
"faceRestoration": "顔の修復",
|
||||
"restoreFaces": "顔の修復",
|
||||
"type": "Type",
|
||||
"strength": "強度",
|
||||
"upscaling": "アップスケーリング",
|
||||
"upscale": "アップスケール",
|
||||
"upscaleImage": "画像をアップスケール",
|
||||
"scale": "Scale",
|
||||
"otherOptions": "その他のオプション",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
"seamBlur": "Seam Blur",
|
||||
"seamStrength": "Seam Strength",
|
||||
"seamSteps": "Seam Steps",
|
||||
"inpaintReplace": "Inpaint Replace",
|
||||
"scaleBeforeProcessing": "処理前のスケール",
|
||||
"scaledWidth": "幅のスケール",
|
||||
"scaledHeight": "高さのスケール",
|
||||
"infillMethod": "Infill Method",
|
||||
"tileSize": "Tile Size",
|
||||
"boundingBoxHeader": "バウンディングボックス",
|
||||
"seamCorrectionHeader": "Seam Correction",
|
||||
"infillScalingHeader": "Infill and Scaling",
|
||||
"img2imgStrength": "Image To Imageの強度",
|
||||
"toggleLoopback": "Toggle Loopback",
|
||||
"invoke": "Invoke",
|
||||
"cancel": "キャンセル",
|
||||
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
|
||||
"sendTo": "転送",
|
||||
"sendToImg2Img": "Image to Imageに転送",
|
||||
"sendToUnifiedCanvas": "Unified Canvasに転送",
|
||||
"copyImageToLink": "Copy Image To Link",
|
||||
"downloadImage": "画像をダウンロード",
|
||||
"openInViewer": "ビュワーを開く",
|
||||
"closeViewer": "ビュワーを閉じる",
|
||||
"usePrompt": "プロンプトを使用",
|
||||
"useSeed": "シード値を使用",
|
||||
"useAll": "すべてを使用",
|
||||
"useInitImg": "Use Initial Image",
|
||||
"info": "情報",
|
||||
"deleteImage": "画像を削除",
|
||||
"initialImage": "Inital Image",
|
||||
"showOptionsPanel": "オプションパネルを表示"
|
||||
}
|
||||
|
14
frontend/dist/locales/settings/ja.json
vendored
Normal file
14
frontend/dist/locales/settings/ja.json
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
{
|
||||
"models": "モデル",
|
||||
"displayInProgress": "生成中の画像を表示する",
|
||||
"saveSteps": "nステップごとに画像を保存",
|
||||
"confirmOnDelete": "削除時に確認",
|
||||
"displayHelpIcons": "ヘルプアイコンを表示",
|
||||
"useCanvasBeta": "キャンバスレイアウト(Beta)を使用する",
|
||||
"enableImageDebugging": "画像のデバッグを有効化",
|
||||
"resetWebUI": "WebUIをリセット",
|
||||
"resetWebUIDesc1": "WebUIのリセットは、画像と保存された設定のキャッシュをリセットするだけです。画像を削除するわけではありません。",
|
||||
"resetWebUIDesc2": "もしギャラリーに画像が表示されないなど、何か問題が発生した場合はGitHubにissueを提出する前にリセットを試してください。",
|
||||
"resetComplete": "WebUIはリセットされました。F5を押して再読み込みしてください。"
|
||||
}
|
||||
|
32
frontend/dist/locales/toast/ja.json
vendored
Normal file
32
frontend/dist/locales/toast/ja.json
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
{
|
||||
"tempFoldersEmptied": "Temp Folder Emptied",
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"uploadFailedMultipleImagesDesc": "一度にアップロードできる画像は1枚のみです。",
|
||||
"uploadFailedUnableToLoadDesc": "ファイルを読み込むことができません。",
|
||||
"downloadImageStarted": "画像ダウンロード開始",
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageLinkCopied": "画像のURLをコピー",
|
||||
"imageNotLoaded": "画像を読み込めません。",
|
||||
"imageNotLoadedDesc": "Image To Imageに転送する画像が見つかりません。",
|
||||
"imageSavedToGallery": "画像をギャラリーに保存する",
|
||||
"canvasMerged": "Canvas Merged",
|
||||
"sentToImageToImage": "Image To Imageに転送",
|
||||
"sentToUnifiedCanvas": "Unified Canvasに転送",
|
||||
"parametersSet": "Parameters Set",
|
||||
"parametersNotSet": "Parameters Not Set",
|
||||
"parametersNotSetDesc": "この画像にはメタデータがありません。",
|
||||
"parametersFailed": "パラメータ読み込みの不具合",
|
||||
"parametersFailedDesc": "initイメージを読み込めません。",
|
||||
"seedSet": "Seed Set",
|
||||
"seedNotSet": "Seed Not Set",
|
||||
"seedNotSetDesc": "この画像のシード値が見つかりません。",
|
||||
"promptSet": "Prompt Set",
|
||||
"promptNotSet": "Prompt Not Set",
|
||||
"promptNotSetDesc": "この画像のプロンプトが見つかりませんでした。",
|
||||
"upscalingFailed": "アップスケーリング失敗",
|
||||
"faceRestoreFailed": "顔の修復に失敗",
|
||||
"metadataLoadFailed": "メタデータの読み込みに失敗。",
|
||||
"initialImageSet": "Initial Image Set",
|
||||
"initialImageNotSet": "Initial Image Not Set",
|
||||
"initialImageNotSetDesc": "Could not load initial image"
|
||||
}
|
16
frontend/dist/locales/tooltip/it.json
vendored
16
frontend/dist/locales/tooltip/it.json
vendored
@ -1 +1,15 @@
|
||||
{}
|
||||
{
|
||||
"feature": {
|
||||
"prompt": "Questo è il campo del prompt. Il prompt include oggetti di generazione e termini stilistici. Puoi anche aggiungere il peso (importanza del token) nel prompt, ma i comandi e i parametri dell'interfaccia a linea di comando non funzioneranno.",
|
||||
"gallery": "Galleria visualizza le generazioni dalla cartella degli output man mano che vengono create. Le impostazioni sono memorizzate all'interno di file e accessibili dal menu contestuale.",
|
||||
"other": "Queste opzioni abiliteranno modalità di elaborazione alternative per Invoke. 'Piastrella senza cuciture' creerà modelli ripetuti nell'output. 'Ottimizzzazione Alta risoluzione' è la generazione in due passaggi con 'Immagine a Immagine': usa questa impostazione quando vuoi un'immagine più grande e più coerente senza artefatti. Ci vorrà più tempo del solito 'Testo a Immagine'.",
|
||||
"seed": "Il valore del Seme influenza il rumore iniziale da cui è formata l'immagine. Puoi usare i semi già esistenti dalle immagini precedenti. 'Soglia del rumore' viene utilizzato per mitigare gli artefatti a valori CFG elevati (provare l'intervallo 0-10) e Perlin per aggiungere il rumore Perlin durante la generazione: entrambi servono per aggiungere variazioni ai risultati.",
|
||||
"variations": "Prova una variazione con un valore compreso tra 0.1 e 1.0 per modificare il risultato per un dato seme. Variazioni interessanti del seme sono comprese tra 0.1 e 0.3.",
|
||||
"upscale": "Utilizza ESRGAN per ingrandire l'immagine subito dopo la generazione.",
|
||||
"faceCorrection": "Correzione del volto con GFPGAN o Codeformer: l'algoritmo rileva i volti nell'immagine e corregge eventuali difetti. Un valore alto cambierà maggiormente l'immagine, dando luogo a volti più attraenti. Codeformer con una maggiore fedeltà preserva l'immagine originale a scapito di una correzione facciale più forte.",
|
||||
"imageToImage": "Da Immagine a Immagine carica qualsiasi immagine come iniziale, che viene quindi utilizzata per generarne una nuova in base al prompt. Più alto è il valore, più cambierà l'immagine risultante. Sono possibili valori da 0.0 a 1.0, l'intervallo consigliato è 0.25-0.75",
|
||||
"boundingBox": "Il riquadro di selezione è lo stesso delle impostazioni Larghezza e Altezza per da Testo a Immagine o da Immagine a Immagine. Verrà elaborata solo l'area nella casella.",
|
||||
"seamCorrection": "Controlla la gestione delle giunzioni visibili che si verificano tra le immagini generate sulla tela.",
|
||||
"infillAndScaling": "Gestisce i metodi di riempimento (utilizzati su aree mascherate o cancellate dell'area di disegno) e il ridimensionamento (utile per i riquadri di selezione di piccole dimensioni)."
|
||||
}
|
||||
}
|
16
frontend/dist/locales/tooltip/ja.json
vendored
Normal file
16
frontend/dist/locales/tooltip/ja.json
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"feature": {
|
||||
"prompt": "これはプロンプトフィールドです。プロンプトには生成オブジェクトや文法用語が含まれます。プロンプトにも重み(Tokenの重要度)を付けることができますが、CLIコマンドやパラメータは機能しません。",
|
||||
"gallery": "ギャラリーは、出力先フォルダから生成物を表示します。設定はファイル内に保存され、コンテキストメニューからアクセスできます。.",
|
||||
"other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer that usual txt2img.",
|
||||
"seed": "シード値は、画像が形成される際の初期ノイズに影響します。以前の画像から既に存在するシードを使用することができます。ノイズしきい値は高いCFG値でのアーティファクトを軽減するために使用され、Perlinは生成中にPerlinノイズを追加します(0-10の範囲を試してみてください): どちらも出力にバリエーションを追加するのに役立ちます。",
|
||||
"variations": "0.1から1.0の間の値で試し、付与されたシードに対する結果を変えてみてください。面白いバリュエーションは0.1〜0.3の間です。",
|
||||
"upscale": "生成直後の画像をアップスケールするには、ESRGANを使用します。",
|
||||
"faceCorrection": "GFPGANまたはCodeformerによる顔の修復: 画像内の顔を検出し不具合を修正するアルゴリズムです。高い値を設定すると画像がより変化し、より魅力的な顔になります。Codeformerは顔の修復を犠牲にして、元の画像をできる限り保持します。",
|
||||
"imageToImage": "Image To Imageは任意の画像を初期値として読み込み、プロンプトとともに新しい画像を生成するために使用されます。値が高いほど結果画像はより変化します。0.0から1.0までの値が可能で、推奨範囲は0.25から0.75です。",
|
||||
"boundingBox": "バウンディングボックスは、Text To ImageまたはImage To Imageの幅/高さの設定と同じです。ボックス内の領域のみが処理されます。",
|
||||
"seamCorrection": "キャンバス上の生成された画像間に発生する可視可能な境界の処理を制御します。",
|
||||
"infillAndScaling": "Manage infill methods (used on masked or erased areas of the canvas) and scaling (useful for small bounding box sizes)."
|
||||
}
|
||||
}
|
||||
|
15
frontend/dist/locales/tooltips/it.json
vendored
15
frontend/dist/locales/tooltips/it.json
vendored
@ -1,15 +0,0 @@
|
||||
{
|
||||
"feature": {
|
||||
"prompt": "Questo è il campo del prompt. Il prompt include oggetti di generazione e termini stilistici. Puoi anche aggiungere il peso (importanza del token) nel prompt, ma i comandi e i parametri dell'interfaccia a linea di comando non funzioneranno.",
|
||||
"gallery": "Galleria visualizza le generazioni dalla cartella degli output man mano che vengono create. Le impostazioni sono memorizzate all'interno di file e accessibili dal menu contestuale.",
|
||||
"other": "Queste opzioni abiliteranno modalità di elaborazione alternative per Invoke. 'Piastrella senza cuciture' creerà modelli ripetuti nell'output. 'Ottimizzzazione Alta risoluzione' è la generazione in due passaggi con 'Immagine a Immagine': usa questa impostazione quando vuoi un'immagine più grande e più coerente senza artefatti. Ci vorrà più tempo del solito 'Testo a Immagine'.",
|
||||
"seed": "Il valore del Seme influenza il rumore iniziale da cui è formata l'immagine. Puoi usare i semi già esistenti dalle immagini precedenti. 'Soglia del rumore' viene utilizzato per mitigare gli artefatti a valori CFG elevati (provare l'intervallo 0-10) e Perlin per aggiungere il rumore Perlin durante la generazione: entrambi servono per aggiungere variazioni ai risultati.",
|
||||
"variations": "Prova una variazione con un valore compreso tra 0.1 e 1.0 per modificare il risultato per un dato seme. Variazioni interessanti del seme sono comprese tra 0.1 e 0.3.",
|
||||
"upscale": "Utilizza ESRGAN per ingrandire l'immagine subito dopo la generazione.",
|
||||
"faceCorrection": "Correzione del volto con GFPGAN o Codeformer: l'algoritmo rileva i volti nell'immagine e corregge eventuali difetti. Un valore alto cambierà maggiormente l'immagine, dando luogo a volti più attraenti. Codeformer con una maggiore fedeltà preserva l'immagine originale a scapito di una correzione facciale più forte.",
|
||||
"imageToImage": "Da Immagine a Immagine carica qualsiasi immagine come iniziale, che viene quindi utilizzata per generarne una nuova in base al prompt. Più alto è il valore, più cambierà l'immagine risultante. Sono possibili valori da 0.0 a 1.0, l'intervallo consigliato è 0.25-0.75",
|
||||
"boundingBox": "Il riquadro di selezione è lo stesso delle impostazioni Larghezza e Altezza per dat Testo a Immagine o da Immagine a Immagine. Verrà elaborata solo l'area nella casella.",
|
||||
"seamCorrection": "Controlla la gestione delle giunzioni visibili che si verificano tra le immagini generate sulla tela.",
|
||||
"infillAndScaling": "Gestisce i metodi di riempimento (utilizzati su aree mascherate o cancellate dell'area di disegno) e il ridimensionamento (utile per i riquadri di selezione di piccole dimensioni)."
|
||||
}
|
||||
}
|
60
frontend/dist/locales/unifiedcanvas/ja.json
vendored
Normal file
60
frontend/dist/locales/unifiedcanvas/ja.json
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
{
|
||||
"layer": "Layer",
|
||||
"base": "Base",
|
||||
"mask": "マスク",
|
||||
"maskingOptions": "マスクのオプション",
|
||||
"enableMask": "マスクを有効化",
|
||||
"preserveMaskedArea": "マスク領域の保存",
|
||||
"clearMask": "マスクを解除",
|
||||
"brush": "ブラシ",
|
||||
"eraser": "消しゴム",
|
||||
"fillBoundingBox": "バウンディングボックスの塗りつぶし",
|
||||
"eraseBoundingBox": "バウンディングボックスの消去",
|
||||
"colorPicker": "カラーピッカー",
|
||||
"brushOptions": "ブラシオプション",
|
||||
"brushSize": "サイズ",
|
||||
"move": "Move",
|
||||
"resetView": "Reset View",
|
||||
"mergeVisible": "Merge Visible",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"downloadAsImage": "画像としてダウンロード",
|
||||
"undo": "取り消し",
|
||||
"redo": "やり直し",
|
||||
"clearCanvas": "キャンバスを片付ける",
|
||||
"canvasSettings": "キャンバスの設定",
|
||||
"showIntermediates": "Show Intermediates",
|
||||
"showGrid": "グリッドを表示",
|
||||
"snapToGrid": "Snap to Grid",
|
||||
"darkenOutsideSelection": "外周を暗くする",
|
||||
"autoSaveToGallery": "ギャラリーに自動保存",
|
||||
"saveBoxRegionOnly": "ボックス領域のみ保存",
|
||||
"limitStrokesToBox": "Limit Strokes to Box",
|
||||
"showCanvasDebugInfo": "キャンバスのデバッグ情報を表示",
|
||||
"clearCanvasHistory": "キャンバスの履歴を削除",
|
||||
"clearHistory": "履歴を削除",
|
||||
"clearCanvasHistoryMessage": "履歴を消去すると現在のキャンバスは残りますが、取り消しややり直しの履歴は不可逆的に消去されます。",
|
||||
"clearCanvasHistoryConfirm": "履歴を削除しますか?",
|
||||
"emptyTempImageFolder": "Empty Temp Image Folde",
|
||||
"emptyFolder": "空のフォルダ",
|
||||
"emptyTempImagesFolderMessage": "一時フォルダを空にすると、Unified Canvasも完全にリセットされます。これには、すべての取り消し/やり直しの履歴、ステージング領域の画像、およびキャンバスのベースレイヤーが含まれます。",
|
||||
"emptyTempImagesFolderConfirm": "一時フォルダを削除しますか?",
|
||||
"activeLayer": "Active Layer",
|
||||
"canvasScale": "Canvas Scale",
|
||||
"boundingBox": "バウンディングボックス",
|
||||
"scaledBoundingBox": "Scaled Bounding Box",
|
||||
"boundingBoxPosition": "バウンディングボックスの位置",
|
||||
"canvasDimensions": "キャンバスの大きさ",
|
||||
"canvasPosition": "キャンバスの位置",
|
||||
"cursorPosition": "カーソルの位置",
|
||||
"previous": "前",
|
||||
"next": "次",
|
||||
"accept": "同意",
|
||||
"showHide": "表示/非表示",
|
||||
"discardAll": "すべて破棄",
|
||||
"betaClear": "Clear",
|
||||
"betaDarkenOutside": "Darken Outside",
|
||||
"betaLimitToBox": "Limit To Box",
|
||||
"betaPreserveMasked": "Preserve Masked"
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
|
@ -19,6 +19,7 @@
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"langJapanese": "Japanese",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
|
60
frontend/public/locales/common/ja.json
Normal file
60
frontend/public/locales/common/ja.json
Normal file
@ -0,0 +1,60 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "テーマ",
|
||||
"languagePickerLabel": "言語選択",
|
||||
"reportBugLabel": "バグ報告",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "設定",
|
||||
"darkTheme": "ダーク",
|
||||
"lightTheme": "ライト",
|
||||
"greenTheme": "緑",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "現在、画像生成のためのノードベースシステムを開発中です。機能についてのアップデートにご期待ください。",
|
||||
"postProcessing": "後処理",
|
||||
"postProcessDesc1": "Invoke AIは、多彩な後処理の機能を備えています。アップスケーリングと顔修復は、すでにWebUI上で利用可能です。これらは、[Text To Image]および[Image To Image]タブの[詳細オプション]メニューからアクセスできます。また、現在の画像表示の上やビューア内の画像アクションボタンを使って、画像を直接処理することもできます。",
|
||||
"postProcessDesc2": "より高度な後処理の機能を実現するための専用UIを近日中にリリース予定です。",
|
||||
"postProcessDesc3": "Invoke AI CLIでは、この他にもEmbiggenをはじめとする様々な機能を利用することができます。",
|
||||
"training": "追加学習",
|
||||
"trainingDesc1": "Textual InversionとDreamboothを使って、WebUIから独自のEmbeddingとチェックポイントを追加学習するための専用ワークフローです。",
|
||||
"trainingDesc2": "InvokeAIは、すでにメインスクリプトを使ったTextual Inversionによるカスタム埋め込み追加学習にも対応しています。",
|
||||
"upload": "アップロード",
|
||||
"close": "閉じる",
|
||||
"load": "ロード",
|
||||
"back": "戻る",
|
||||
"statusConnected": "接続済",
|
||||
"statusDisconnected": "切断済",
|
||||
"statusError": "エラー",
|
||||
"statusPreparing": "準備中",
|
||||
"statusProcessingCanceled": "処理をキャンセル",
|
||||
"statusProcessingComplete": "処理完了",
|
||||
"statusGenerating": "生成中",
|
||||
"statusGeneratingTextToImage": "Text To Imageで生成中",
|
||||
"statusGeneratingImageToImage": "Image To Imageで生成中",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "生成完了",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "画像を保存",
|
||||
"statusRestoringFaces": "顔の修復",
|
||||
"statusRestoringFacesGFPGAN": "顔の修復 (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "顔の修復 (CodeFormer)",
|
||||
"statusUpscaling": "アップスケーリング",
|
||||
"statusUpscalingESRGAN": "アップスケーリング (ESRGAN)",
|
||||
"statusLoadingModel": "モデルを読み込む",
|
||||
"statusModelChanged": "モデルを変更"
|
||||
}
|
||||
|
17
frontend/public/locales/gallery/ja.json
Normal file
17
frontend/public/locales/gallery/ja.json
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
"generations": "Generations",
|
||||
"showGenerations": "Show Generations",
|
||||
"uploads": "アップロード",
|
||||
"showUploads": "アップロードした画像を見る",
|
||||
"galleryImageSize": "画像のサイズ",
|
||||
"galleryImageResetSize": "サイズをリセット",
|
||||
"gallerySettings": "ギャラリーの設定",
|
||||
"maintainAspectRatio": "アスペクト比を維持",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"singleColumnLayout": "シングルカラムレイアウト",
|
||||
"pinGallery": "ギャラリーにピン留め",
|
||||
"allImagesLoaded": "すべての画像を読み込む",
|
||||
"loadMore": "さらに読み込む",
|
||||
"noImagesInGallery": "ギャラリーに画像がありません"
|
||||
}
|
||||
|
208
frontend/public/locales/hotkeys/ja.json
Normal file
208
frontend/public/locales/hotkeys/ja.json
Normal file
@ -0,0 +1,208 @@
|
||||
{
|
||||
"keyboardShortcuts": "キーボードショートカット",
|
||||
"appHotkeys": "アプリのホットキー",
|
||||
"generalHotkeys": "Generalのホットキー",
|
||||
"galleryHotkeys": "ギャラリーのホットキー",
|
||||
"unifiedCanvasHotkeys": "Unified Canvasのホットキー",
|
||||
"invoke": {
|
||||
"title": "Invoke",
|
||||
"desc": "画像を生成"
|
||||
},
|
||||
"cancel": {
|
||||
"title": "キャンセル",
|
||||
"desc": "画像の生成をキャンセル"
|
||||
},
|
||||
"focusPrompt": {
|
||||
"title": "Focus Prompt",
|
||||
"desc": "プロンプトテキストボックスにフォーカス"
|
||||
},
|
||||
"toggleOptions": {
|
||||
"title": "オプションパネルのトグル",
|
||||
"desc": "オプションパネルの開閉"
|
||||
},
|
||||
"pinOptions": {
|
||||
"title": "ピン",
|
||||
"desc": "オプションパネルを固定"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "ビュワーのトグル",
|
||||
"desc": "ビュワーを開閉"
|
||||
},
|
||||
"toggleGallery": {
|
||||
"title": "ギャラリーのトグル",
|
||||
"desc": "ギャラリードロワーの開閉"
|
||||
},
|
||||
"maximizeWorkSpace": {
|
||||
"title": "作業領域の最大化",
|
||||
"desc": "パネルを閉じて、作業領域を最大に"
|
||||
},
|
||||
"changeTabs": {
|
||||
"title": "タブの切替",
|
||||
"desc": "他の作業領域と切替"
|
||||
},
|
||||
"consoleToggle": {
|
||||
"title": "コンソールのトグル",
|
||||
"desc": "コンソールの開閉"
|
||||
},
|
||||
"setPrompt": {
|
||||
"title": "プロンプトをセット",
|
||||
"desc": "現在の画像のプロンプトを使用"
|
||||
},
|
||||
"setSeed": {
|
||||
"title": "シード値をセット",
|
||||
"desc": "現在の画像のシード値を使用"
|
||||
},
|
||||
"setParameters": {
|
||||
"title": "パラメータをセット",
|
||||
"desc": "現在の画像のすべてのパラメータを使用"
|
||||
},
|
||||
"restoreFaces": {
|
||||
"title": "顔の修復",
|
||||
"desc": "現在の画像を修復"
|
||||
},
|
||||
"upscale": {
|
||||
"title": "アップスケール",
|
||||
"desc": "現在の画像をアップスケール"
|
||||
},
|
||||
"showInfo": {
|
||||
"title": "情報を見る",
|
||||
"desc": "現在の画像のメタデータ情報を表示"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Image To Imageに転送",
|
||||
"desc": "現在の画像をImage to Imageに転送"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "画像を削除",
|
||||
"desc": "現在の画像を削除"
|
||||
},
|
||||
"closePanels": {
|
||||
"title": "パネルを閉じる",
|
||||
"desc": "開いているパネルを閉じる"
|
||||
},
|
||||
"previousImage": {
|
||||
"title": "前の画像",
|
||||
"desc": "ギャラリー内の1つ前の画像を表示"
|
||||
},
|
||||
"nextImage": {
|
||||
"title": "次の画像",
|
||||
"desc": "ギャラリー内の1つ後の画像を表示"
|
||||
},
|
||||
"toggleGalleryPin": {
|
||||
"title": "ギャラリードロワーの固定",
|
||||
"desc": "ギャラリーをUIにピン留め/解除"
|
||||
},
|
||||
"increaseGalleryThumbSize": {
|
||||
"title": "ギャラリーの画像を拡大",
|
||||
"desc": "ギャラリーのサムネイル画像を拡大"
|
||||
},
|
||||
"decreaseGalleryThumbSize": {
|
||||
"title": "ギャラリーの画像サイズを縮小",
|
||||
"desc": "ギャラリーのサムネイル画像を縮小"
|
||||
},
|
||||
"selectBrush": {
|
||||
"title": "ブラシを選択",
|
||||
"desc": "ブラシを選択"
|
||||
},
|
||||
"selectEraser": {
|
||||
"title": "消しゴムを選択",
|
||||
"desc": "消しゴムを選択"
|
||||
},
|
||||
"decreaseBrushSize": {
|
||||
"title": "ブラシサイズを縮小",
|
||||
"desc": "ブラシ/消しゴムのサイズを縮小"
|
||||
},
|
||||
"increaseBrushSize": {
|
||||
"title": "ブラシサイズを拡大",
|
||||
"desc": "ブラシ/消しゴムのサイズを拡大"
|
||||
},
|
||||
"decreaseBrushOpacity": {
|
||||
"title": "ブラシの不透明度を下げる",
|
||||
"desc": "キャンバスブラシの不透明度を下げる"
|
||||
},
|
||||
"increaseBrushOpacity": {
|
||||
"title": "ブラシの不透明度を上げる",
|
||||
"desc": "キャンバスブラシの不透明度を上げる"
|
||||
},
|
||||
"moveTool": {
|
||||
"title": "Move Tool",
|
||||
"desc": "Allows canvas navigation"
|
||||
},
|
||||
"fillBoundingBox": {
|
||||
"title": "バウンディングボックスを塗りつぶす",
|
||||
"desc": "ブラシの色でバウンディングボックス領域を塗りつぶす"
|
||||
},
|
||||
"eraseBoundingBox": {
|
||||
"title": "バウンディングボックスを消す",
|
||||
"desc": "バウンディングボックス領域を消す"
|
||||
},
|
||||
"colorPicker": {
|
||||
"title": "カラーピッカーを選択",
|
||||
"desc": "カラーピッカーを選択"
|
||||
},
|
||||
"toggleSnap": {
|
||||
"title": "Toggle Snap",
|
||||
"desc": "Toggles Snap to Grid"
|
||||
},
|
||||
"quickToggleMove": {
|
||||
"title": "Quick Toggle Move",
|
||||
"desc": "Temporarily toggles Move mode"
|
||||
},
|
||||
"toggleLayer": {
|
||||
"title": "レイヤーを切替",
|
||||
"desc": "マスク/ベースレイヤの選択を切替"
|
||||
},
|
||||
"clearMask": {
|
||||
"title": "マスクを消す",
|
||||
"desc": "マスク全体を消す"
|
||||
},
|
||||
"hideMask": {
|
||||
"title": "マスクを非表示",
|
||||
"desc": "マスクを表示/非表示"
|
||||
},
|
||||
"showHideBoundingBox": {
|
||||
"title": "バウンディングボックスを表示/非表示",
|
||||
"desc": "バウンディングボックスの表示/非表示を切替"
|
||||
},
|
||||
"mergeVisible": {
|
||||
"title": "Merge Visible",
|
||||
"desc": "Merge all visible layers of canvas"
|
||||
},
|
||||
"saveToGallery": {
|
||||
"title": "ギャラリーに保存",
|
||||
"desc": "現在のキャンバスをギャラリーに保存"
|
||||
},
|
||||
"copyToClipboard": {
|
||||
"title": "クリップボードにコピー",
|
||||
"desc": "現在のキャンバスをクリップボードにコピー"
|
||||
},
|
||||
"downloadImage": {
|
||||
"title": "画像をダウンロード",
|
||||
"desc": "現在の画像をダウンロード"
|
||||
},
|
||||
"undoStroke": {
|
||||
"title": "Undo Stroke",
|
||||
"desc": "Undo a brush stroke"
|
||||
},
|
||||
"redoStroke": {
|
||||
"title": "Redo Stroke",
|
||||
"desc": "Redo a brush stroke"
|
||||
},
|
||||
"resetView": {
|
||||
"title": "キャンバスをリセット",
|
||||
"desc": "キャンバスをリセット"
|
||||
},
|
||||
"previousStagingImage": {
|
||||
"title": "Previous Staging Image",
|
||||
"desc": "Previous Staging Area Image"
|
||||
},
|
||||
"nextStagingImage": {
|
||||
"title": "Next Staging Image",
|
||||
"desc": "Next Staging Area Image"
|
||||
},
|
||||
"acceptStagingImage": {
|
||||
"title": "Accept Staging Image",
|
||||
"desc": "Accept Current Staging Area Image"
|
||||
}
|
||||
}
|
||||
|
68
frontend/public/locales/modelmanager/ja.json
Normal file
68
frontend/public/locales/modelmanager/ja.json
Normal file
@ -0,0 +1,68 @@
|
||||
{
|
||||
"modelManager": "モデルマネージャ",
|
||||
"model": "モデル",
|
||||
"allModels": "すべてのモデル",
|
||||
"checkpointModels": "Checkpoints",
|
||||
"diffusersModels": "Diffusers",
|
||||
"safetensorModels": "SafeTensors",
|
||||
"modelAdded": "モデルを追加",
|
||||
"modelUpdated": "モデルをアップデート",
|
||||
"modelEntryDeleted": "Model Entry Deleted",
|
||||
"cannotUseSpaces": "Cannot Use Spaces",
|
||||
"addNew": "新規に追加",
|
||||
"addNewModel": "新規モデル追加",
|
||||
"addCheckpointModel": "Checkpointを追加 / Safetensorモデル",
|
||||
"addDiffuserModel": "Diffusersを追加",
|
||||
"addManually": "手動で追加",
|
||||
"manual": "手動",
|
||||
"name": "名前",
|
||||
"nameValidationMsg": "モデルの名前を入力",
|
||||
"description": "概要",
|
||||
"descriptionValidationMsg": "モデルの概要を入力",
|
||||
"config": "Config",
|
||||
"configValidationMsg": "モデルの設定ファイルへのパス",
|
||||
"modelLocation": "モデルの場所",
|
||||
"modelLocationValidationMsg": "モデルが配置されている場所へのパス。",
|
||||
"repo_id": "Repo ID",
|
||||
"repoIDValidationMsg": "モデルのリモートリポジトリ",
|
||||
"vaeLocation": "VAEの場所",
|
||||
"vaeLocationValidationMsg": "Vaeが配置されている場所へのパス",
|
||||
"vaeRepoID": "VAE Repo ID",
|
||||
"vaeRepoIDValidationMsg": "Vaeのリモートリポジトリ",
|
||||
"width": "幅",
|
||||
"widthValidationMsg": "モデルのデフォルトの幅",
|
||||
"height": "高さ",
|
||||
"heightValidationMsg": "モデルのデフォルトの高さ",
|
||||
"addModel": "モデルを追加",
|
||||
"updateModel": "モデルをアップデート",
|
||||
"availableModels": "モデルを有効化",
|
||||
"search": "検索",
|
||||
"load": "Load",
|
||||
"active": "active",
|
||||
"notLoaded": "読み込まれていません",
|
||||
"cached": "キャッシュ済",
|
||||
"checkpointFolder": "Checkpointフォルダ",
|
||||
"clearCheckpointFolder": "Checkpointフォルダ内を削除",
|
||||
"findModels": "モデルを見つける",
|
||||
"scanAgain": "再度スキャン",
|
||||
"modelsFound": "モデルを発見",
|
||||
"selectFolder": "フォルダを選択",
|
||||
"selected": "選択済",
|
||||
"selectAll": "すべて選択",
|
||||
"deselectAll": "すべて選択解除",
|
||||
"showExisting": "既存を表示",
|
||||
"addSelected": "選択済を追加",
|
||||
"modelExists": "モデルの有無",
|
||||
"selectAndAdd": "以下のモデルを選択し、追加できます。",
|
||||
"noModelsFound": "モデルが見つかりません。",
|
||||
"delete": "削除",
|
||||
"deleteModel": "モデルを削除",
|
||||
"deleteConfig": "設定を削除",
|
||||
"deleteMsg1": "InvokeAIからこのモデルエントリーを削除してよろしいですか?",
|
||||
"deleteMsg2": "これは、ドライブからモデルのCheckpointファイルを削除するものではありません。必要であればそれらを読み込むことができます。",
|
||||
"formMessageDiffusersModelLocation": "Diffusersモデルの場所",
|
||||
"formMessageDiffusersModelLocationDesc": "最低でも1つは入力してください。",
|
||||
"formMessageDiffusersVAELocation": "VAEの場所s",
|
||||
"formMessageDiffusersVAELocationDesc": "指定しない場合、InvokeAIは上記のモデルの場所にあるVAEファイルを探します。"
|
||||
}
|
||||
|
63
frontend/public/locales/options/ja.json
Normal file
63
frontend/public/locales/options/ja.json
Normal file
@ -0,0 +1,63 @@
|
||||
{
|
||||
"images": "画像",
|
||||
"steps": "ステップ数",
|
||||
"cfgScale": "CFG Scale",
|
||||
"width": "幅",
|
||||
"height": "高さ",
|
||||
"sampler": "Sampler",
|
||||
"seed": "シード値",
|
||||
"randomizeSeed": "ランダムなシード値",
|
||||
"shuffle": "シャッフル",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"variations": "Variations",
|
||||
"variationAmount": "Variation Amount",
|
||||
"seedWeights": "シード値の重み",
|
||||
"faceRestoration": "顔の修復",
|
||||
"restoreFaces": "顔の修復",
|
||||
"type": "Type",
|
||||
"strength": "強度",
|
||||
"upscaling": "アップスケーリング",
|
||||
"upscale": "アップスケール",
|
||||
"upscaleImage": "画像をアップスケール",
|
||||
"scale": "Scale",
|
||||
"otherOptions": "その他のオプション",
|
||||
"seamlessTiling": "Seamless Tiling",
|
||||
"hiresOptim": "High Res Optimization",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"codeformerFidelity": "Fidelity",
|
||||
"seamSize": "Seam Size",
|
||||
"seamBlur": "Seam Blur",
|
||||
"seamStrength": "Seam Strength",
|
||||
"seamSteps": "Seam Steps",
|
||||
"inpaintReplace": "Inpaint Replace",
|
||||
"scaleBeforeProcessing": "処理前のスケール",
|
||||
"scaledWidth": "幅のスケール",
|
||||
"scaledHeight": "高さのスケール",
|
||||
"infillMethod": "Infill Method",
|
||||
"tileSize": "Tile Size",
|
||||
"boundingBoxHeader": "バウンディングボックス",
|
||||
"seamCorrectionHeader": "Seam Correction",
|
||||
"infillScalingHeader": "Infill and Scaling",
|
||||
"img2imgStrength": "Image To Imageの強度",
|
||||
"toggleLoopback": "Toggle Loopback",
|
||||
"invoke": "Invoke",
|
||||
"cancel": "キャンセル",
|
||||
"promptPlaceholder": "Type prompt here. [negative tokens], (upweight)++, (downweight)--, swap and blend are available (see docs)",
|
||||
"sendTo": "転送",
|
||||
"sendToImg2Img": "Image to Imageに転送",
|
||||
"sendToUnifiedCanvas": "Unified Canvasに転送",
|
||||
"copyImageToLink": "Copy Image To Link",
|
||||
"downloadImage": "画像をダウンロード",
|
||||
"openInViewer": "ビュワーを開く",
|
||||
"closeViewer": "ビュワーを閉じる",
|
||||
"usePrompt": "プロンプトを使用",
|
||||
"useSeed": "シード値を使用",
|
||||
"useAll": "すべてを使用",
|
||||
"useInitImg": "Use Initial Image",
|
||||
"info": "情報",
|
||||
"deleteImage": "画像を削除",
|
||||
"initialImage": "Inital Image",
|
||||
"showOptionsPanel": "オプションパネルを表示"
|
||||
}
|
||||
|
14
frontend/public/locales/settings/ja.json
Normal file
14
frontend/public/locales/settings/ja.json
Normal file
@ -0,0 +1,14 @@
|
||||
{
|
||||
"models": "モデル",
|
||||
"displayInProgress": "生成中の画像を表示する",
|
||||
"saveSteps": "nステップごとに画像を保存",
|
||||
"confirmOnDelete": "削除時に確認",
|
||||
"displayHelpIcons": "ヘルプアイコンを表示",
|
||||
"useCanvasBeta": "キャンバスレイアウト(Beta)を使用する",
|
||||
"enableImageDebugging": "画像のデバッグを有効化",
|
||||
"resetWebUI": "WebUIをリセット",
|
||||
"resetWebUIDesc1": "WebUIのリセットは、画像と保存された設定のキャッシュをリセットするだけです。画像を削除するわけではありません。",
|
||||
"resetWebUIDesc2": "もしギャラリーに画像が表示されないなど、何か問題が発生した場合はGitHubにissueを提出する前にリセットを試してください。",
|
||||
"resetComplete": "WebUIはリセットされました。F5を押して再読み込みしてください。"
|
||||
}
|
||||
|
32
frontend/public/locales/toast/ja.json
Normal file
32
frontend/public/locales/toast/ja.json
Normal file
@ -0,0 +1,32 @@
|
||||
{
|
||||
"tempFoldersEmptied": "Temp Folder Emptied",
|
||||
"uploadFailed": "アップロード失敗",
|
||||
"uploadFailedMultipleImagesDesc": "一度にアップロードできる画像は1枚のみです。",
|
||||
"uploadFailedUnableToLoadDesc": "ファイルを読み込むことができません。",
|
||||
"downloadImageStarted": "画像ダウンロード開始",
|
||||
"imageCopied": "画像をコピー",
|
||||
"imageLinkCopied": "画像のURLをコピー",
|
||||
"imageNotLoaded": "画像を読み込めません。",
|
||||
"imageNotLoadedDesc": "Image To Imageに転送する画像が見つかりません。",
|
||||
"imageSavedToGallery": "画像をギャラリーに保存する",
|
||||
"canvasMerged": "Canvas Merged",
|
||||
"sentToImageToImage": "Image To Imageに転送",
|
||||
"sentToUnifiedCanvas": "Unified Canvasに転送",
|
||||
"parametersSet": "Parameters Set",
|
||||
"parametersNotSet": "Parameters Not Set",
|
||||
"parametersNotSetDesc": "この画像にはメタデータがありません。",
|
||||
"parametersFailed": "パラメータ読み込みの不具合",
|
||||
"parametersFailedDesc": "initイメージを読み込めません。",
|
||||
"seedSet": "Seed Set",
|
||||
"seedNotSet": "Seed Not Set",
|
||||
"seedNotSetDesc": "この画像のシード値が見つかりません。",
|
||||
"promptSet": "Prompt Set",
|
||||
"promptNotSet": "Prompt Not Set",
|
||||
"promptNotSetDesc": "この画像のプロンプトが見つかりませんでした。",
|
||||
"upscalingFailed": "アップスケーリング失敗",
|
||||
"faceRestoreFailed": "顔の修復に失敗",
|
||||
"metadataLoadFailed": "メタデータの読み込みに失敗。",
|
||||
"initialImageSet": "Initial Image Set",
|
||||
"initialImageNotSet": "Initial Image Not Set",
|
||||
"initialImageNotSetDesc": "Could not load initial image"
|
||||
}
|
16
frontend/public/locales/tooltip/ja.json
Normal file
16
frontend/public/locales/tooltip/ja.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"feature": {
|
||||
"prompt": "これはプロンプトフィールドです。プロンプトには生成オブジェクトや文法用語が含まれます。プロンプトにも重み(Tokenの重要度)を付けることができますが、CLIコマンドやパラメータは機能しません。",
|
||||
"gallery": "ギャラリーは、出力先フォルダから生成物を表示します。設定はファイル内に保存され、コンテキストメニューからアクセスできます。.",
|
||||
"other": "These options will enable alternative processing modes for Invoke. 'Seamless tiling' will create repeating patterns in the output. 'High resolution' is generation in two steps with img2img: use this setting when you want a larger and more coherent image without artifacts. It will take longer that usual txt2img.",
|
||||
"seed": "シード値は、画像が形成される際の初期ノイズに影響します。以前の画像から既に存在するシードを使用することができます。ノイズしきい値は高いCFG値でのアーティファクトを軽減するために使用され、Perlinは生成中にPerlinノイズを追加します(0-10の範囲を試してみてください): どちらも出力にバリエーションを追加するのに役立ちます。",
|
||||
"variations": "0.1から1.0の間の値で試し、付与されたシードに対する結果を変えてみてください。面白いバリュエーションは0.1〜0.3の間です。",
|
||||
"upscale": "生成直後の画像をアップスケールするには、ESRGANを使用します。",
|
||||
"faceCorrection": "GFPGANまたはCodeformerによる顔の修復: 画像内の顔を検出し不具合を修正するアルゴリズムです。高い値を設定すると画像がより変化し、より魅力的な顔になります。Codeformerは顔の修復を犠牲にして、元の画像をできる限り保持します。",
|
||||
"imageToImage": "Image To Imageは任意の画像を初期値として読み込み、プロンプトとともに新しい画像を生成するために使用されます。値が高いほど結果画像はより変化します。0.0から1.0までの値が可能で、推奨範囲は0.25から0.75です。",
|
||||
"boundingBox": "バウンディングボックスは、Text To ImageまたはImage To Imageの幅/高さの設定と同じです。ボックス内の領域のみが処理されます。",
|
||||
"seamCorrection": "キャンバス上の生成された画像間に発生する可視可能な境界の処理を制御します。",
|
||||
"infillAndScaling": "Manage infill methods (used on masked or erased areas of the canvas) and scaling (useful for small bounding box sizes)."
|
||||
}
|
||||
}
|
||||
|
60
frontend/public/locales/unifiedcanvas/ja.json
Normal file
60
frontend/public/locales/unifiedcanvas/ja.json
Normal file
@ -0,0 +1,60 @@
|
||||
{
|
||||
"layer": "Layer",
|
||||
"base": "Base",
|
||||
"mask": "マスク",
|
||||
"maskingOptions": "マスクのオプション",
|
||||
"enableMask": "マスクを有効化",
|
||||
"preserveMaskedArea": "マスク領域の保存",
|
||||
"clearMask": "マスクを解除",
|
||||
"brush": "ブラシ",
|
||||
"eraser": "消しゴム",
|
||||
"fillBoundingBox": "バウンディングボックスの塗りつぶし",
|
||||
"eraseBoundingBox": "バウンディングボックスの消去",
|
||||
"colorPicker": "カラーピッカー",
|
||||
"brushOptions": "ブラシオプション",
|
||||
"brushSize": "サイズ",
|
||||
"move": "Move",
|
||||
"resetView": "Reset View",
|
||||
"mergeVisible": "Merge Visible",
|
||||
"saveToGallery": "ギャラリーに保存",
|
||||
"copyToClipboard": "クリップボードにコピー",
|
||||
"downloadAsImage": "画像としてダウンロード",
|
||||
"undo": "取り消し",
|
||||
"redo": "やり直し",
|
||||
"clearCanvas": "キャンバスを片付ける",
|
||||
"canvasSettings": "キャンバスの設定",
|
||||
"showIntermediates": "Show Intermediates",
|
||||
"showGrid": "グリッドを表示",
|
||||
"snapToGrid": "Snap to Grid",
|
||||
"darkenOutsideSelection": "外周を暗くする",
|
||||
"autoSaveToGallery": "ギャラリーに自動保存",
|
||||
"saveBoxRegionOnly": "ボックス領域のみ保存",
|
||||
"limitStrokesToBox": "Limit Strokes to Box",
|
||||
"showCanvasDebugInfo": "キャンバスのデバッグ情報を表示",
|
||||
"clearCanvasHistory": "キャンバスの履歴を削除",
|
||||
"clearHistory": "履歴を削除",
|
||||
"clearCanvasHistoryMessage": "履歴を消去すると現在のキャンバスは残りますが、取り消しややり直しの履歴は不可逆的に消去されます。",
|
||||
"clearCanvasHistoryConfirm": "履歴を削除しますか?",
|
||||
"emptyTempImageFolder": "Empty Temp Image Folde",
|
||||
"emptyFolder": "空のフォルダ",
|
||||
"emptyTempImagesFolderMessage": "一時フォルダを空にすると、Unified Canvasも完全にリセットされます。これには、すべての取り消し/やり直しの履歴、ステージング領域の画像、およびキャンバスのベースレイヤーが含まれます。",
|
||||
"emptyTempImagesFolderConfirm": "一時フォルダを削除しますか?",
|
||||
"activeLayer": "Active Layer",
|
||||
"canvasScale": "Canvas Scale",
|
||||
"boundingBox": "バウンディングボックス",
|
||||
"scaledBoundingBox": "Scaled Bounding Box",
|
||||
"boundingBoxPosition": "バウンディングボックスの位置",
|
||||
"canvasDimensions": "キャンバスの大きさ",
|
||||
"canvasPosition": "キャンバスの位置",
|
||||
"cursorPosition": "カーソルの位置",
|
||||
"previous": "前",
|
||||
"next": "次",
|
||||
"accept": "同意",
|
||||
"showHide": "表示/非表示",
|
||||
"discardAll": "すべて破棄",
|
||||
"betaClear": "Clear",
|
||||
"betaDarkenOutside": "Darken Outside",
|
||||
"betaLimitToBox": "Limit To Box",
|
||||
"betaPreserveMasked": "Preserve Masked"
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ export default function LanguagePicker() {
|
||||
pl: t('common:langPolish'),
|
||||
zh_cn: t('common:langSimplifiedChinese'),
|
||||
es: t('common:langSpanish'),
|
||||
ja: t('common:langJapanese'),
|
||||
};
|
||||
|
||||
const renderLanguagePicker = () => {
|
||||
|
@ -10,8 +10,9 @@ echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. run textual inversion training
|
||||
echo 4. open the developer console
|
||||
echo 5. re-run the configure script to download new models
|
||||
echo 4. merge models (diffusers type only)
|
||||
echo 5. open the developer console
|
||||
echo 6. re-run the configure script to download new models
|
||||
set /P restore="Please enter 1, 2, 3, 4 or 5: [5] "
|
||||
if not defined restore set restore=2
|
||||
IF /I "%restore%" == "1" (
|
||||
@ -24,6 +25,9 @@ IF /I "%restore%" == "1" (
|
||||
echo Starting textual inversion training..
|
||||
python .venv\Scripts\textual_inversion_fe.py --web %*
|
||||
) ELSE IF /I "%restore%" == "4" (
|
||||
echo Starting model merging script..
|
||||
python .venv\Scripts\merge_models_fe.py --web %*
|
||||
) ELSE IF /I "%restore%" == "5" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
@ -35,7 +39,7 @@ IF /I "%restore%" == "1" (
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%restore%" == "5" (
|
||||
) ELSE IF /I "%restore%" == "6" (
|
||||
echo Running configure_invokeai.py...
|
||||
python .venv\Scripts\configure_invokeai.py --web %*
|
||||
) ELSE (
|
||||
|
@ -20,16 +20,18 @@ if [ "$0" != "bash" ]; then
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "3. run textual inversion training"
|
||||
echo "4. open the developer console"
|
||||
echo "4. merge models (diffusers type only)"
|
||||
echo "5. re-run the configure script to download new models"
|
||||
echo "6. open the developer console"
|
||||
read -p "Please enter 1, 2, 3, 4 or 5: [1] " yn
|
||||
choice=${yn:='2'}
|
||||
case $choice in
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; .venv/bin/python .venv/bin/invoke.py $*;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; .venv/bin/python .venv/bin/invoke.py --web $*;;
|
||||
3 ) printf "\nStarting Textual Inversion:\n"; .venv/bin/python .venv/bin/textual_inversion_fe.py $*;;
|
||||
4 ) printf "\nDeveloper Console:\n"; file_name=$(basename "${BASH_SOURCE[0]}"); bash --init-file "$file_name";;
|
||||
5 ) printf "\nRunning configure_invokeai.py:\n"; .venv/bin/python .venv/bin/configure_invokeai.py $*;;
|
||||
4 ) printf "\nMerging Models:\n"; .venv/bin/python .venv/bin/merge_models_fe.py $*;;
|
||||
5 ) printf "\nDeveloper Console:\n"; file_name=$(basename "${BASH_SOURCE[0]}"); bash --init-file "$file_name";;
|
||||
6 ) printf "\nRunning configure_invokeai.py:\n"; .venv/bin/python .venv/bin/configure_invokeai.py $*;;
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
||||
else # in developer console
|
||||
|
@ -146,7 +146,7 @@ class Generate:
|
||||
gfpgan=None,
|
||||
codeformer=None,
|
||||
esrgan=None,
|
||||
free_gpu_mem=False,
|
||||
free_gpu_mem: bool=False,
|
||||
safety_checker:bool=False,
|
||||
max_loaded_models:int=2,
|
||||
# these are deprecated; if present they override values in the conf file
|
||||
@ -445,7 +445,11 @@ class Generate:
|
||||
self._set_sampler()
|
||||
|
||||
# apply the concepts library to the prompt
|
||||
prompt = self.huggingface_concepts_library.replace_concepts_with_triggers(prompt, lambda concepts: self.load_huggingface_concepts(concepts))
|
||||
prompt = self.huggingface_concepts_library.replace_concepts_with_triggers(
|
||||
prompt,
|
||||
lambda concepts: self.load_huggingface_concepts(concepts),
|
||||
self.model.textual_inversion_manager.get_all_trigger_strings()
|
||||
)
|
||||
|
||||
# bit of a hack to change the cached sampler's karras threshold to
|
||||
# whatever the user asked for
|
||||
@ -460,10 +464,13 @@ class Generate:
|
||||
init_image = None
|
||||
mask_image = None
|
||||
|
||||
|
||||
try:
|
||||
if self.free_gpu_mem and self.model.cond_stage_model.device != self.model.device:
|
||||
self.model.cond_stage_model.device = self.model.device
|
||||
self.model.cond_stage_model.to(self.model.device)
|
||||
except AttributeError:
|
||||
print(">> Warning: '--free_gpu_mem' is not yet supported when generating image using model based on HuggingFace Diffuser.")
|
||||
pass
|
||||
|
||||
try:
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
@ -531,6 +538,7 @@ class Generate:
|
||||
inpaint_height = inpaint_height,
|
||||
inpaint_width = inpaint_width,
|
||||
enable_image_debugging = enable_image_debugging,
|
||||
free_gpu_mem=self.free_gpu_mem,
|
||||
)
|
||||
|
||||
if init_color:
|
||||
@ -844,6 +852,7 @@ class Generate:
|
||||
model_data = cache.get_model(model_name)
|
||||
except Exception as e:
|
||||
print(f'** model {model_name} could not be loaded: {str(e)}')
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
if previous_model_name is None:
|
||||
raise e
|
||||
print(f'** trying to reload previous model')
|
||||
|
@ -573,12 +573,12 @@ def import_model(model_path:str, gen, opt, completer):
|
||||
|
||||
if model_path.startswith(('http:','https:','ftp:')):
|
||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||
elif os.path.exists(model_path) and model_path.endswith('.ckpt') and os.path.isfile(model_path):
|
||||
elif os.path.exists(model_path) and model_path.endswith(('.ckpt','.safetensors')) and os.path.isfile(model_path):
|
||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||
elif re.match('^[\w.+-]+/[\w.+-]+$',model_path):
|
||||
model_name = import_diffuser_model(model_path, gen, opt, completer)
|
||||
elif os.path.isdir(model_path):
|
||||
model_name = import_diffuser_model(model_path, gen, opt, completer)
|
||||
model_name = import_diffuser_model(Path(model_path), gen, opt, completer)
|
||||
else:
|
||||
print(f'** {model_path} is neither the path to a .ckpt file nor a diffusers repository id. Can\'t import.')
|
||||
|
||||
@ -589,8 +589,7 @@ def import_model(model_path:str, gen, opt, completer):
|
||||
print('** model failed to load. Discarding configuration entry')
|
||||
gen.model_manager.del_model(model_name)
|
||||
return
|
||||
|
||||
if input('Make this the default model? [n] ') in ('y','Y'):
|
||||
if input('Make this the default model? [n] ').strip() in ('y','Y'):
|
||||
gen.model_manager.set_default_model(model_name)
|
||||
|
||||
gen.model_manager.commit(opt.conf)
|
||||
@ -607,10 +606,14 @@ def import_diffuser_model(path_or_repo:str, gen, opt, completer)->str:
|
||||
model_name=default_name,
|
||||
model_description=default_description
|
||||
)
|
||||
vae = None
|
||||
if input('Replace this model\'s VAE with "stabilityai/sd-vae-ft-mse"? [n] ').strip() in ('y','Y'):
|
||||
vae = dict(repo_id='stabilityai/sd-vae-ft-mse')
|
||||
|
||||
if not manager.import_diffuser_model(
|
||||
path_or_repo,
|
||||
model_name = model_name,
|
||||
vae = vae,
|
||||
description = model_description):
|
||||
print('** model failed to import')
|
||||
return None
|
||||
@ -627,18 +630,29 @@ def import_ckpt_model(path_or_url:str, gen, opt, completer)->str:
|
||||
model_description=default_description
|
||||
)
|
||||
config_file = None
|
||||
default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
||||
|
||||
completer.complete_extensions(('.yaml','.yml'))
|
||||
completer.set_line('configs/stable-diffusion/v1-inference.yaml')
|
||||
completer.set_line(str(default))
|
||||
done = False
|
||||
while not done:
|
||||
config_file = input('Configuration file for this model: ').strip()
|
||||
done = os.path.exists(config_file)
|
||||
|
||||
completer.complete_extensions(('.ckpt','.safetensors'))
|
||||
vae = None
|
||||
default = Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt')
|
||||
completer.set_line(str(default))
|
||||
done = False
|
||||
while not done:
|
||||
vae = input('VAE file for this model (leave blank for none): ').strip() or None
|
||||
done = (not vae) or os.path.exists(vae)
|
||||
completer.complete_extensions(None)
|
||||
|
||||
if not manager.import_ckpt_model(
|
||||
path_or_url,
|
||||
config = config_file,
|
||||
vae = vae,
|
||||
model_name = model_name,
|
||||
model_description = model_description,
|
||||
commit_to_conf = opt.conf,
|
||||
@ -710,7 +724,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
return
|
||||
|
||||
completer.update_models(gen.model_manager.list_models())
|
||||
if input(f'Load optimized model {model_name}? [y] ') not in ('n','N'):
|
||||
if input(f'Load optimized model {model_name}? [y] ').strip() not in ('n','N'):
|
||||
gen.set_model(model_name)
|
||||
|
||||
response = input(f'Delete the original .ckpt file at ({ckpt_path} ? [n] ')
|
||||
@ -726,7 +740,12 @@ def del_config(model_name:str, gen, opt, completer):
|
||||
if model_name not in gen.model_manager.config:
|
||||
print(f"** Unknown model {model_name}")
|
||||
return
|
||||
gen.model_manager.del_model(model_name)
|
||||
|
||||
if input(f'Remove {model_name} from the list of models known to InvokeAI? [y] ').strip().startswith(('n','N')):
|
||||
return
|
||||
|
||||
delete_completely = input('Completely remove the model file or directory from disk? [n] ').startswith(('y','Y'))
|
||||
gen.model_manager.del_model(model_name,delete_files=delete_completely)
|
||||
gen.model_manager.commit(opt.conf)
|
||||
print(f'** {model_name} deleted')
|
||||
completer.update_models(gen.model_manager.list_models())
|
||||
@ -1099,7 +1118,7 @@ def report_model_error(opt:Namespace, e:Exception):
|
||||
if yes_to_all is not None:
|
||||
sys.argv.append(yes_to_all)
|
||||
|
||||
import configure_invokeai
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
configure_invokeai.main()
|
||||
print('** InvokeAI will now restart')
|
||||
sys.argv = previous_args
|
||||
|
@ -56,9 +56,11 @@ class CkptGenerator():
|
||||
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
||||
safety_checker:dict=None,
|
||||
attention_maps_callback = None,
|
||||
free_gpu_mem: bool=False,
|
||||
**kwargs):
|
||||
scope = choose_autocast(self.precision)
|
||||
self.safety_checker = safety_checker
|
||||
self.free_gpu_mem = free_gpu_mem
|
||||
attention_maps_images = []
|
||||
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
||||
make_image = self.get_make_image(
|
||||
|
@ -21,7 +21,7 @@ import os
|
||||
import re
|
||||
import torch
|
||||
from pathlib import Path
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.invoke.globals import Globals, global_cache_dir
|
||||
from safetensors.torch import load_file
|
||||
|
||||
try:
|
||||
@ -637,7 +637,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config):
|
||||
|
||||
|
||||
def convert_ldm_clip_checkpoint(checkpoint):
|
||||
text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
|
||||
text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14",cache_dir=global_cache_dir('hub'))
|
||||
|
||||
keys = list(checkpoint.keys())
|
||||
|
||||
@ -677,7 +677,8 @@ textenc_pattern = re.compile("|".join(protected.keys()))
|
||||
|
||||
|
||||
def convert_paint_by_example_checkpoint(checkpoint):
|
||||
config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14")
|
||||
cache_dir = global_cache_dir('hub')
|
||||
config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir)
|
||||
model = PaintByExampleImageEncoder(config)
|
||||
|
||||
keys = list(checkpoint.keys())
|
||||
@ -744,7 +745,8 @@ def convert_paint_by_example_checkpoint(checkpoint):
|
||||
|
||||
|
||||
def convert_open_clip_checkpoint(checkpoint):
|
||||
text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder")
|
||||
cache_dir=global_cache_dir('hub')
|
||||
text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir)
|
||||
|
||||
keys = list(checkpoint.keys())
|
||||
|
||||
@ -795,6 +797,7 @@ def convert_ckpt_to_diffuser(checkpoint_path:str,
|
||||
):
|
||||
|
||||
checkpoint = load_file(checkpoint_path) if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path)
|
||||
cache_dir = global_cache_dir('hub')
|
||||
|
||||
# Sometimes models don't have the global_step item
|
||||
if "global_step" in checkpoint:
|
||||
@ -904,7 +907,7 @@ def convert_ckpt_to_diffuser(checkpoint_path:str,
|
||||
|
||||
if model_type == "FrozenOpenCLIPEmbedder":
|
||||
text_model = convert_open_clip_checkpoint(checkpoint)
|
||||
tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", subfolder="tokenizer")
|
||||
tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", subfolder="tokenizer",cache_dir=global_cache_dir('diffusers'))
|
||||
pipe = StableDiffusionPipeline(
|
||||
vae=vae,
|
||||
text_encoder=text_model,
|
||||
@ -917,8 +920,8 @@ def convert_ckpt_to_diffuser(checkpoint_path:str,
|
||||
)
|
||||
elif model_type == "PaintByExample":
|
||||
vision_model = convert_paint_by_example_checkpoint(checkpoint)
|
||||
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
||||
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir)
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir)
|
||||
pipe = PaintByExamplePipeline(
|
||||
vae=vae,
|
||||
image_encoder=vision_model,
|
||||
@ -929,9 +932,9 @@ def convert_ckpt_to_diffuser(checkpoint_path:str,
|
||||
)
|
||||
elif model_type in ['FrozenCLIPEmbedder','WeightedFrozenCLIPEmbedder']:
|
||||
text_model = convert_ldm_clip_checkpoint(checkpoint)
|
||||
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
||||
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14",cache_dir=cache_dir)
|
||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir)
|
||||
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir)
|
||||
pipe = StableDiffusionPipeline(
|
||||
vae=vae,
|
||||
text_encoder=text_model,
|
||||
@ -944,7 +947,7 @@ def convert_ckpt_to_diffuser(checkpoint_path:str,
|
||||
else:
|
||||
text_config = create_ldm_bert_config(original_config)
|
||||
text_model = convert_ldm_bert_checkpoint(checkpoint, text_config)
|
||||
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
|
||||
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased",cache_dir=cache_dir)
|
||||
pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
|
||||
|
||||
pipe.save_pretrained(
|
||||
|
@ -59,7 +59,7 @@ class HuggingFaceConceptsLibrary(object):
|
||||
be downloaded.
|
||||
'''
|
||||
if not concept_name in self.list_concepts():
|
||||
print(f'This concept is not known to the Hugging Face library. Generation will continue without the concept.')
|
||||
print(f'This concept is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept.')
|
||||
return None
|
||||
return self.get_concept_file(concept_name.lower(),'learned_embeds.bin')
|
||||
|
||||
@ -115,13 +115,19 @@ class HuggingFaceConceptsLibrary(object):
|
||||
return self.trigger_to_concept(match.group(1)) or f'<{match.group(1)}>'
|
||||
return self.match_trigger.sub(do_replace, prompt)
|
||||
|
||||
def replace_concepts_with_triggers(self, prompt:str, load_concepts_callback: Callable[[list], any])->str:
|
||||
def replace_concepts_with_triggers(self,
|
||||
prompt:str,
|
||||
load_concepts_callback: Callable[[list], any],
|
||||
excluded_tokens:list[str])->str:
|
||||
'''
|
||||
Given a prompt string that contains `<concept_name>` tags, replace
|
||||
these tags with the appropriate trigger.
|
||||
|
||||
If any `<concept_name>` tags are found, `load_concepts_callback()` is called with a list
|
||||
of `concepts_name` strings.
|
||||
|
||||
`excluded_tokens` are any tokens that should not be replaced, typically because they
|
||||
are trigger tokens from a locally-loaded embedding.
|
||||
'''
|
||||
concepts = self.match_concept.findall(prompt)
|
||||
if not concepts:
|
||||
@ -129,6 +135,8 @@ class HuggingFaceConceptsLibrary(object):
|
||||
load_concepts_callback(concepts)
|
||||
|
||||
def do_replace(match)->str:
|
||||
if excluded_tokens and f'<{match.group(1)}>' in excluded_tokens:
|
||||
return f'<{match.group(1)}>'
|
||||
return self.concept_to_trigger(match.group(1)) or f'<{match.group(1)}>'
|
||||
return self.match_concept.sub(do_replace, prompt)
|
||||
|
||||
|
@ -747,7 +747,7 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
|
||||
safety_checker = '--nsfw_checker' if enable_safety_checker else '--no-nsfw_checker'
|
||||
|
||||
for name in ('models','configs','embeddings'):
|
||||
for name in ('models','configs','embeddings','text-inversion-data','text-inversion-training-data'):
|
||||
os.makedirs(os.path.join(root,name), exist_ok=True)
|
||||
for src in (['configs']):
|
||||
dest = os.path.join(root,src)
|
@ -62,9 +62,11 @@ class Generator:
|
||||
def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None,
|
||||
image_callback=None, step_callback=None, threshold=0.0, perlin=0.0,
|
||||
safety_checker:dict=None,
|
||||
free_gpu_mem: bool=False,
|
||||
**kwargs):
|
||||
scope = nullcontext
|
||||
self.safety_checker = safety_checker
|
||||
self.free_gpu_mem = free_gpu_mem
|
||||
attention_maps_images = []
|
||||
attention_maps_callback = lambda saver: attention_maps_images.append(saver.get_stacked_maps_image())
|
||||
make_image = self.get_make_image(
|
||||
|
@ -29,6 +29,7 @@ else:
|
||||
|
||||
# Where to look for the initialization file
|
||||
Globals.initfile = 'invokeai.init'
|
||||
Globals.models_file = 'models.yaml'
|
||||
Globals.models_dir = 'models'
|
||||
Globals.config_dir = 'configs'
|
||||
Globals.autoscan_dir = 'weights'
|
||||
@ -49,6 +50,9 @@ Globals.disable_xformers = False
|
||||
# whether we are forcing full precision
|
||||
Globals.full_precision = False
|
||||
|
||||
def global_config_file()->Path:
|
||||
return Path(Globals.root, Globals.config_dir, Globals.models_file)
|
||||
|
||||
def global_config_dir()->Path:
|
||||
return Path(Globals.root, Globals.config_dir)
|
||||
|
||||
|
62
ldm/invoke/merge_diffusers.py
Normal file
62
ldm/invoke/merge_diffusers.py
Normal file
@ -0,0 +1,62 @@
|
||||
'''
|
||||
ldm.invoke.merge_diffusers exports a single function call merge_diffusion_models()
|
||||
used to merge 2-3 models together and create a new InvokeAI-registered diffusion model.
|
||||
'''
|
||||
import os
|
||||
from typing import List
|
||||
from diffusers import DiffusionPipeline
|
||||
from ldm.invoke.globals import global_config_file, global_models_dir, global_cache_dir
|
||||
from ldm.invoke.model_manager import ModelManager
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
def merge_diffusion_models(models:List['str'],
|
||||
merged_model_name:str,
|
||||
alpha:float=0.5,
|
||||
interp:str=None,
|
||||
force:bool=False,
|
||||
**kwargs):
|
||||
'''
|
||||
models - up to three models, designated by their InvokeAI models.yaml model name
|
||||
merged_model_name = name for new model
|
||||
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
||||
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
||||
interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None.
|
||||
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported.
|
||||
force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
||||
|
||||
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
|
||||
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
|
||||
'''
|
||||
config_file = global_config_file()
|
||||
model_manager = ModelManager(OmegaConf.load(config_file))
|
||||
for mod in models:
|
||||
assert (mod in model_manager.model_names()), f'** Unknown model "{mod}"'
|
||||
assert (model_manager.model_info(mod).get('format',None) == 'diffusers'), f'** {mod} is not a diffusers model. It must be optimized before merging.'
|
||||
model_ids_or_paths = [model_manager.model_name_or_path(x) for x in models]
|
||||
|
||||
pipe = DiffusionPipeline.from_pretrained(model_ids_or_paths[0],
|
||||
cache_dir=kwargs.get('cache_dir',global_cache_dir()),
|
||||
custom_pipeline='checkpoint_merger')
|
||||
merged_pipe = pipe.merge(pretrained_model_name_or_path_list=model_ids_or_paths,
|
||||
alpha=alpha,
|
||||
interp=interp,
|
||||
force=force,
|
||||
**kwargs)
|
||||
dump_path = global_models_dir() / 'merged_diffusers'
|
||||
os.makedirs(dump_path,exist_ok=True)
|
||||
dump_path = dump_path / merged_model_name
|
||||
merged_pipe.save_pretrained (
|
||||
dump_path,
|
||||
safe_serialization=1
|
||||
)
|
||||
model_manager.import_diffuser_model(
|
||||
dump_path,
|
||||
model_name = merged_model_name,
|
||||
description = f'Merge of models {", ".join(models)}'
|
||||
)
|
||||
print('REMINDER: When PR 2369 is merged, replace merge_diffusers.py line 56 with vae= argument to impormodel()')
|
||||
if vae := model_manager.config[models[0]].get('vae',None):
|
||||
print(f'>> Using configured VAE assigned to {models[0]}')
|
||||
model_manager.config[merged_model_name]['vae'] = vae
|
||||
|
||||
model_manager.commit(config_file)
|
@ -18,7 +18,9 @@ import traceback
|
||||
import warnings
|
||||
import safetensors.torch
|
||||
from pathlib import Path
|
||||
from shutil import move, rmtree
|
||||
from typing import Union, Any
|
||||
from huggingface_hub import scan_cache_dir
|
||||
from ldm.util import download_with_progress_bar
|
||||
|
||||
import torch
|
||||
@ -35,9 +37,16 @@ from ldm.invoke.globals import Globals, global_models_dir, global_autoscan_dir,
|
||||
from ldm.util import instantiate_from_config, ask_user
|
||||
|
||||
DEFAULT_MAX_MODELS=2
|
||||
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
|
||||
'vae-ft-mse-840000-ema-pruned': 'stabilityai/sd-vae-ft-mse',
|
||||
}
|
||||
|
||||
class ModelManager(object):
|
||||
def __init__(self, config:OmegaConf, device_type:str, precision:str, max_loaded_models=DEFAULT_MAX_MODELS):
|
||||
def __init__(self,
|
||||
config:OmegaConf,
|
||||
device_type:str='cpu',
|
||||
precision:str='float16',
|
||||
max_loaded_models=DEFAULT_MAX_MODELS):
|
||||
'''
|
||||
Initialize with the path to the models.yaml config file,
|
||||
the torch device type, and precision. The optional
|
||||
@ -143,7 +152,7 @@ class ModelManager(object):
|
||||
Return true if this is a legacy (.ckpt) model
|
||||
'''
|
||||
info = self.model_info(model_name)
|
||||
if 'weights' in info and info['weights'].endswith('.ckpt'):
|
||||
if 'weights' in info and info['weights'].endswith(('.ckpt','.safetensors')):
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -226,7 +235,7 @@ class ModelManager(object):
|
||||
line = f'\033[1m{line}\033[0m'
|
||||
print(line)
|
||||
|
||||
def del_model(self, model_name:str) -> None:
|
||||
def del_model(self, model_name:str, delete_files:bool=False) -> None:
|
||||
'''
|
||||
Delete the named model.
|
||||
'''
|
||||
@ -234,9 +243,25 @@ class ModelManager(object):
|
||||
if model_name not in omega:
|
||||
print(f'** Unknown model {model_name}')
|
||||
return
|
||||
# save these for use in deletion later
|
||||
conf = omega[model_name]
|
||||
repo_id = conf.get('repo_id',None)
|
||||
path = self._abs_path(conf.get('path',None))
|
||||
weights = self._abs_path(conf.get('weights',None))
|
||||
|
||||
del omega[model_name]
|
||||
if model_name in self.stack:
|
||||
self.stack.remove(model_name)
|
||||
if delete_files:
|
||||
if weights:
|
||||
print(f'** deleting file {weights}')
|
||||
Path(weights).unlink(missing_ok=True)
|
||||
elif path:
|
||||
print(f'** deleting directory {path}')
|
||||
rmtree(path,ignore_errors=True)
|
||||
elif repo_id:
|
||||
print(f'** deleting the cached model directory for {repo_id}')
|
||||
self._delete_model_from_cache(repo_id)
|
||||
|
||||
def add_model(self, model_name:str, model_attributes:dict, clobber:bool=False) -> None:
|
||||
'''
|
||||
@ -362,8 +387,14 @@ class ModelManager(object):
|
||||
vae = os.path.normpath(os.path.join(Globals.root,vae))
|
||||
if os.path.exists(vae):
|
||||
print(f' | Loading VAE weights from: {vae}')
|
||||
vae_ckpt = None
|
||||
vae_dict = None
|
||||
if vae.endswith('.safetensors'):
|
||||
vae_ckpt = safetensors.torch.load_file(vae)
|
||||
vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"}
|
||||
else:
|
||||
vae_ckpt = torch.load(vae, map_location="cpu")
|
||||
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|
||||
vae_dict = {k: v for k, v in vae_ckpt['state_dict'].items() if k[0:4] != "loss"}
|
||||
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||
else:
|
||||
print(f' | VAE file {vae} not found. Skipping.')
|
||||
@ -407,7 +438,7 @@ class ModelManager(object):
|
||||
safety_checker=None,
|
||||
local_files_only=not Globals.internet_available
|
||||
)
|
||||
if 'vae' in mconfig:
|
||||
if 'vae' in mconfig and mconfig['vae'] is not None:
|
||||
vae = self._load_vae(mconfig['vae'])
|
||||
pipeline_args.update(vae=vae)
|
||||
if not isinstance(name_or_path,Path):
|
||||
@ -516,6 +547,7 @@ class ModelManager(object):
|
||||
repo_or_path:Union[str,Path],
|
||||
model_name:str=None,
|
||||
description:str=None,
|
||||
vae:dict=None,
|
||||
commit_to_conf:Path=None,
|
||||
)->bool:
|
||||
'''
|
||||
@ -533,10 +565,11 @@ class ModelManager(object):
|
||||
description = description or f'imported diffusers model {model_name}'
|
||||
new_config = dict(
|
||||
description=description,
|
||||
vae=vae,
|
||||
format='diffusers',
|
||||
)
|
||||
if isinstance(repo_or_path,Path) and repo_or_path.exists():
|
||||
new_config.update(path=repo_or_path)
|
||||
new_config.update(path=str(repo_or_path))
|
||||
else:
|
||||
new_config.update(repo_id=repo_or_path)
|
||||
|
||||
@ -548,6 +581,7 @@ class ModelManager(object):
|
||||
def import_ckpt_model(self,
|
||||
weights:Union[str,Path],
|
||||
config:Union[str,Path]='configs/stable-diffusion/v1-inference.yaml',
|
||||
vae:Union[str,Path]=None,
|
||||
model_name:str=None,
|
||||
model_description:str=None,
|
||||
commit_to_conf:Path=None,
|
||||
@ -558,6 +592,9 @@ class ModelManager(object):
|
||||
"weights" can be either a path-like object corresponding to a local .ckpt file
|
||||
or a http/https URL pointing to a remote model.
|
||||
|
||||
"vae" is a Path or str object pointing to a ckpt or safetensors file to be used
|
||||
as the VAE for this model.
|
||||
|
||||
"config" is the model config file to use with this ckpt file. It defaults to
|
||||
v1-inference.yaml. If a URL is provided, the config will be downloaded.
|
||||
|
||||
@ -584,6 +621,8 @@ class ModelManager(object):
|
||||
width=512,
|
||||
height=512
|
||||
)
|
||||
if vae:
|
||||
new_config['vae'] = vae
|
||||
self.add_model(model_name, new_config, True)
|
||||
if commit_to_conf:
|
||||
self.commit(commit_to_conf)
|
||||
@ -623,7 +662,7 @@ class ModelManager(object):
|
||||
|
||||
def convert_and_import(self,
|
||||
ckpt_path:Path,
|
||||
diffuser_path:Path,
|
||||
diffusers_path:Path,
|
||||
model_name=None,
|
||||
model_description=None,
|
||||
commit_to_conf:Path=None,
|
||||
@ -635,46 +674,56 @@ class ModelManager(object):
|
||||
new_config = None
|
||||
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
|
||||
import transformers
|
||||
if diffuser_path.exists():
|
||||
print(f'ERROR: The path {str(diffuser_path)} already exists. Please move or remove it and try again.')
|
||||
if diffusers_path.exists():
|
||||
print(f'ERROR: The path {str(diffusers_path)} already exists. Please move or remove it and try again.')
|
||||
return
|
||||
|
||||
model_name = model_name or diffuser_path.name
|
||||
model_name = model_name or diffusers_path.name
|
||||
model_description = model_description or 'Optimized version of {model_name}'
|
||||
print(f'>> {model_name}: optimizing (30-60s).')
|
||||
print(f'>> Optimizing {model_name} (30-60s)')
|
||||
try:
|
||||
verbosity =transformers.logging.get_verbosity()
|
||||
transformers.logging.set_verbosity_error()
|
||||
convert_ckpt_to_diffuser(ckpt_path, diffuser_path,extract_ema=True)
|
||||
convert_ckpt_to_diffuser(ckpt_path, diffusers_path,extract_ema=True)
|
||||
transformers.logging.set_verbosity(verbosity)
|
||||
print(f'>> Success. Optimized model is now located at {str(diffuser_path)}')
|
||||
print(f'>> Writing new config file entry for {model_name}...',end='')
|
||||
print(f'>> Success. Optimized model is now located at {str(diffusers_path)}')
|
||||
print(f'>> Writing new config file entry for {model_name}')
|
||||
new_config = dict(
|
||||
path=str(diffuser_path),
|
||||
path=str(diffusers_path),
|
||||
description=model_description,
|
||||
format='diffusers',
|
||||
)
|
||||
|
||||
# HACK (LS): in the event that the original entry is using a custom ckpt VAE, we try to
|
||||
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
|
||||
# I would prefer to do this differently: We load the ckpt model into memory, swap the
|
||||
# VAE in memory, and then pass that to convert_ckpt_to_diffuser() so that the swapped
|
||||
# VAE is built into the model. However, when I tried this I got obscure key errors.
|
||||
if model_name in self.config and (vae_ckpt_path := self.model_info(model_name)['vae']):
|
||||
vae_basename = Path(vae_ckpt_path).stem
|
||||
diffusers_vae = None
|
||||
if (diffusers_vae := VAE_TO_REPO_ID.get(vae_basename,None)):
|
||||
print(f'>> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version')
|
||||
new_config.update(
|
||||
vae = {'repo_id': diffusers_vae}
|
||||
)
|
||||
else:
|
||||
print(f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown')
|
||||
print(f'** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config')
|
||||
new_config.update(
|
||||
vae = {'repo_id': 'stabilityai/sd-vae-ft-mse'}
|
||||
)
|
||||
|
||||
self.del_model(model_name)
|
||||
self.add_model(model_name, new_config, True)
|
||||
if commit_to_conf:
|
||||
self.commit(commit_to_conf)
|
||||
print('>> Conversion succeeded')
|
||||
except Exception as e:
|
||||
print(f'** Conversion failed: {str(e)}')
|
||||
traceback.print_exc()
|
||||
|
||||
print('done.')
|
||||
return new_config
|
||||
|
||||
def del_config(self, model_name:str, gen, opt, completer):
|
||||
current_model = gen.model_name
|
||||
if model_name == current_model:
|
||||
print("** Can't delete active model. !switch to another model first. **")
|
||||
return
|
||||
gen.model_manager.del_model(model_name)
|
||||
gen.model_manager.commit(opt.conf)
|
||||
print(f'** {model_name} deleted')
|
||||
completer.del_model(model_name)
|
||||
|
||||
def search_models(self, search_folder):
|
||||
print(f'>> Finding Models In: {search_folder}')
|
||||
models_folder_ckpt = Path(search_folder).glob('**/*.ckpt')
|
||||
@ -756,7 +805,6 @@ class ModelManager(object):
|
||||
|
||||
print('** Legacy version <= 2.2.5 model directory layout detected. Reorganizing.')
|
||||
print('** This is a quick one-time operation.')
|
||||
from shutil import move, rmtree
|
||||
|
||||
# transformer files get moved into the hub directory
|
||||
if cls._is_huggingface_hub_directory_present():
|
||||
@ -972,6 +1020,27 @@ class ModelManager(object):
|
||||
|
||||
return vae
|
||||
|
||||
@staticmethod
|
||||
def _delete_model_from_cache(repo_id):
|
||||
cache_info = scan_cache_dir(global_cache_dir('diffusers'))
|
||||
|
||||
# I'm sure there is a way to do this with comprehensions
|
||||
# but the code quickly became incomprehensible!
|
||||
hashes_to_delete = set()
|
||||
for repo in cache_info.repos:
|
||||
if repo.repo_id==repo_id:
|
||||
for revision in repo.revisions:
|
||||
hashes_to_delete.add(revision.commit_hash)
|
||||
strategy = cache_info.delete_revisions(*hashes_to_delete)
|
||||
print(f'** deletion of this model is expected to free {strategy.expected_freed_size_str}')
|
||||
strategy.execute()
|
||||
|
||||
@staticmethod
|
||||
def _abs_path(path:Union(str,Path))->Path:
|
||||
if path is None or Path(path).is_absolute():
|
||||
return path
|
||||
return Path(Globals.root,path).resolve()
|
||||
|
||||
@staticmethod
|
||||
def _is_huggingface_hub_directory_present() -> bool:
|
||||
return os.getenv('HF_HOME') is not None or os.getenv('XDG_CACHE_HOME') is not None
|
||||
|
@ -4,7 +4,6 @@
|
||||
# and modified slightly by Lincoln Stein (@lstein) to work with InvokeAI
|
||||
|
||||
import argparse
|
||||
from argparse import Namespace
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
@ -207,6 +206,12 @@ def parse_args():
|
||||
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
||||
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
||||
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
||||
parser.add_argument(
|
||||
"--hub_model_id",
|
||||
type=str,
|
||||
default=None,
|
||||
help="The name of the repository to keep in sync with the local `output_dir`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logging_dir",
|
||||
type=Path,
|
||||
@ -455,7 +460,8 @@ def do_textual_inversion_training(
|
||||
checkpointing_steps:int=500,
|
||||
resume_from_checkpoint:Path=None,
|
||||
enable_xformers_memory_efficient_attention:bool=False,
|
||||
root_dir:Path=None
|
||||
root_dir:Path=None,
|
||||
hub_model_id:str=None,
|
||||
):
|
||||
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if env_local_rank != -1 and env_local_rank != local_rank:
|
||||
@ -521,7 +527,7 @@ def do_textual_inversion_training(
|
||||
|
||||
# Load tokenizer
|
||||
if tokenizer_name:
|
||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name,cache_dir=global_cache_dir('transformers'))
|
||||
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name,**pipeline_args)
|
||||
else:
|
||||
tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="tokenizer", **pipeline_args)
|
||||
|
||||
@ -631,7 +637,7 @@ def do_textual_inversion_training(
|
||||
text_encoder, optimizer, train_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
||||
# For mixed precision training we cast the unet and vae weights to half-precision
|
||||
# as these models are only used for inference, keeping weights in full precision is not required.
|
||||
weight_dtype = torch.float32
|
||||
if accelerator.mixed_precision == "fp16":
|
||||
@ -670,6 +676,7 @@ def do_textual_inversion_training(
|
||||
logger.info(f" Total optimization steps = {max_train_steps}")
|
||||
global_step = 0
|
||||
first_epoch = 0
|
||||
resume_step = None
|
||||
|
||||
# Potentially load in the weights and states from a previous save
|
||||
if resume_from_checkpoint:
|
||||
@ -680,14 +687,21 @@ def do_textual_inversion_training(
|
||||
dirs = os.listdir(output_dir)
|
||||
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
||||
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
||||
path = dirs[-1]
|
||||
path = dirs[-1] if len(dirs) > 0 else None
|
||||
|
||||
if path is None:
|
||||
accelerator.print(
|
||||
f"Checkpoint '{resume_from_checkpoint}' does not exist. Starting a new training run."
|
||||
)
|
||||
resume_from_checkpoint = None
|
||||
else:
|
||||
accelerator.print(f"Resuming from checkpoint {path}")
|
||||
accelerator.load_state(os.path.join(output_dir, path))
|
||||
global_step = int(path.split("-")[1])
|
||||
|
||||
resume_global_step = global_step * gradient_accumulation_steps
|
||||
first_epoch = resume_global_step // num_update_steps_per_epoch
|
||||
resume_step = resume_global_step % num_update_steps_per_epoch
|
||||
first_epoch = global_step // num_update_steps_per_epoch
|
||||
resume_step = resume_global_step % (num_update_steps_per_epoch * gradient_accumulation_steps)
|
||||
|
||||
# Only show the progress bar once on each machine.
|
||||
progress_bar = tqdm(range(global_step, max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
@ -700,7 +714,7 @@ def do_textual_inversion_training(
|
||||
text_encoder.train()
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# Skip steps until we reach the resumed step
|
||||
if resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
||||
if resume_step and resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
||||
if step % gradient_accumulation_steps == 0:
|
||||
progress_bar.update(1)
|
||||
continue
|
||||
|
@ -1,18 +1,16 @@
|
||||
import math
|
||||
import os.path
|
||||
from functools import partial
|
||||
from typing import Optional
|
||||
|
||||
import clip
|
||||
import kornia
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from functools import partial
|
||||
import clip
|
||||
from einops import rearrange, repeat
|
||||
from einops import repeat
|
||||
from transformers import CLIPTokenizer, CLIPTextModel
|
||||
import kornia
|
||||
from ldm.invoke.devices import choose_torch_device
|
||||
from ldm.invoke.globals import Globals, global_cache_dir
|
||||
#from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
|
||||
from ldm.invoke.devices import choose_torch_device
|
||||
from ldm.invoke.globals import global_cache_dir
|
||||
from ldm.modules.x_transformer import (
|
||||
Encoder,
|
||||
TransformerWrapper,
|
||||
@ -654,21 +652,22 @@ class WeightedFrozenCLIPEmbedder(FrozenCLIPEmbedder):
|
||||
per_token_weights += [weight] * len(this_fragment_token_ids)
|
||||
|
||||
# leave room for bos/eos
|
||||
if len(all_token_ids) > self.max_length - 2:
|
||||
excess_token_count = len(all_token_ids) - self.max_length - 2
|
||||
max_token_count_without_bos_eos_markers = self.max_length - 2
|
||||
if len(all_token_ids) > max_token_count_without_bos_eos_markers:
|
||||
excess_token_count = len(all_token_ids) - max_token_count_without_bos_eos_markers
|
||||
# TODO build nice description string of how the truncation was applied
|
||||
# this should be done by calling self.tokenizer.convert_ids_to_tokens() then passing the result to
|
||||
# self.tokenizer.convert_tokens_to_string() for the token_ids on each side of the truncation limit.
|
||||
print(f">> Prompt is {excess_token_count} token(s) too long and has been truncated")
|
||||
all_token_ids = all_token_ids[0:self.max_length]
|
||||
per_token_weights = per_token_weights[0:self.max_length]
|
||||
all_token_ids = all_token_ids[0:max_token_count_without_bos_eos_markers]
|
||||
per_token_weights = per_token_weights[0:max_token_count_without_bos_eos_markers]
|
||||
|
||||
# pad out to a 77-entry array: [eos_token, <prompt tokens>, eos_token, ..., eos_token]
|
||||
# pad out to a 77-entry array: [bos_token, <prompt tokens>, eos_token, pad_token…]
|
||||
# (77 = self.max_length)
|
||||
all_token_ids = [self.tokenizer.bos_token_id] + all_token_ids + [self.tokenizer.eos_token_id]
|
||||
per_token_weights = [1.0] + per_token_weights + [1.0]
|
||||
pad_length = self.max_length - len(all_token_ids)
|
||||
all_token_ids += [self.tokenizer.eos_token_id] * pad_length
|
||||
all_token_ids += [self.tokenizer.pad_token_id] * pad_length
|
||||
per_token_weights += [1.0] * pad_length
|
||||
|
||||
all_token_ids_tensor = torch.tensor(all_token_ids, dtype=torch.long).to(self.device)
|
||||
|
@ -3,8 +3,9 @@ import math
|
||||
import torch
|
||||
from transformers import CLIPTokenizer, CLIPTextModel
|
||||
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
from ldm.invoke.devices import torch_dtype
|
||||
from ldm.modules.textual_inversion_manager import TextualInversionManager
|
||||
|
||||
|
||||
class WeightedPromptFragmentsToEmbeddingsConverter():
|
||||
|
||||
@ -22,8 +23,8 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
|
||||
return self.tokenizer.model_max_length
|
||||
|
||||
def get_embeddings_for_weighted_prompt_fragments(self,
|
||||
text: list[str],
|
||||
fragment_weights: list[float],
|
||||
text: list[list[str]],
|
||||
fragment_weights: list[list[float]],
|
||||
should_return_tokens: bool = False,
|
||||
device='cpu'
|
||||
) -> torch.Tensor:
|
||||
@ -198,12 +199,12 @@ class WeightedPromptFragmentsToEmbeddingsConverter():
|
||||
all_token_ids = all_token_ids[0:max_token_count_without_bos_eos_markers]
|
||||
per_token_weights = per_token_weights[0:max_token_count_without_bos_eos_markers]
|
||||
|
||||
# pad out to a self.max_length-entry array: [eos_token, <prompt tokens>, eos_token, ..., eos_token]
|
||||
# pad out to a self.max_length-entry array: [bos_token, <prompt tokens>, eos_token, pad_token…]
|
||||
# (typically self.max_length == 77)
|
||||
all_token_ids = [self.tokenizer.bos_token_id] + all_token_ids + [self.tokenizer.eos_token_id]
|
||||
per_token_weights = [1.0] + per_token_weights + [1.0]
|
||||
pad_length = self.max_length - len(all_token_ids)
|
||||
all_token_ids += [self.tokenizer.eos_token_id] * pad_length
|
||||
all_token_ids += [self.tokenizer.pad_token_id] * pad_length
|
||||
per_token_weights += [1.0] * pad_length
|
||||
|
||||
all_token_ids_tensor = torch.tensor(all_token_ids, dtype=torch.long, device=device)
|
||||
|
@ -38,11 +38,15 @@ class TextualInversionManager():
|
||||
if concept_name in self.hf_concepts_library.concepts_loaded:
|
||||
continue
|
||||
trigger = self.hf_concepts_library.concept_to_trigger(concept_name)
|
||||
if self.has_textual_inversion_for_trigger_string(trigger):
|
||||
if self.has_textual_inversion_for_trigger_string(trigger) \
|
||||
or self.has_textual_inversion_for_trigger_string(concept_name) \
|
||||
or self.has_textual_inversion_for_trigger_string(f'<{concept_name}>'): # in case a token with literal angle brackets encountered
|
||||
print(f'>> Loaded local embedding for trigger {concept_name}')
|
||||
continue
|
||||
bin_file = self.hf_concepts_library.get_concept_model_path(concept_name)
|
||||
if not bin_file:
|
||||
continue
|
||||
print(f'>> Loaded remote embedding for trigger {concept_name}')
|
||||
self.load_textual_inversion(bin_file)
|
||||
self.hf_concepts_library.concepts_loaded[concept_name]=True
|
||||
|
||||
@ -50,6 +54,8 @@ class TextualInversionManager():
|
||||
return [ti.trigger_string for ti in self.textual_inversions]
|
||||
|
||||
def load_textual_inversion(self, ckpt_path, defer_injecting_tokens: bool=False):
|
||||
if str(ckpt_path).endswith('.DS_Store'):
|
||||
return
|
||||
try:
|
||||
scan_result = scan_file_path(ckpt_path)
|
||||
if scan_result.infected_files == 1:
|
||||
@ -66,8 +72,9 @@ class TextualInversionManager():
|
||||
self._add_textual_inversion(embedding_info['name'],
|
||||
embedding_info['embedding'],
|
||||
defer_injecting_tokens=defer_injecting_tokens)
|
||||
except ValueError:
|
||||
print(f' | ignoring incompatible embedding {embedding_info["name"]}')
|
||||
except ValueError as e:
|
||||
print(f' | Ignoring incompatible embedding {embedding_info["name"]}')
|
||||
print(f' | The error was {str(e)}')
|
||||
else:
|
||||
print(f'>> Failed to load embedding located at {ckpt_path}. Unsupported file.')
|
||||
|
||||
@ -151,7 +158,8 @@ class TextualInversionManager():
|
||||
try:
|
||||
self._inject_tokens_and_assign_embeddings(ti)
|
||||
except ValueError as e:
|
||||
print(f' | ignoring incompatible embedding trigger {ti.trigger_string}')
|
||||
print(f' | Ignoring incompatible embedding trigger {ti.trigger_string}')
|
||||
print(f' | The error was {str(e)}')
|
||||
continue
|
||||
injected_token_ids.append(ti.trigger_token_id)
|
||||
injected_token_ids.extend(ti.pad_token_ids)
|
||||
|
130
pyproject.toml
Normal file
130
pyproject.toml
Normal file
@ -0,0 +1,130 @@
|
||||
[build-system]
|
||||
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "InvokeAI"
|
||||
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
||||
requires-python = ">=3.9, <3.11"
|
||||
readme = { content-type = "text/markdown", file = "README.md" }
|
||||
keywords = ["stable-diffusion", "AI"]
|
||||
dynamic = ["version"]
|
||||
license = { file = "LICENSE" }
|
||||
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
||||
classifiers = [
|
||||
'Development Status :: 4 - Beta',
|
||||
'Environment :: GPU',
|
||||
'Environment :: GPU :: NVIDIA CUDA',
|
||||
'Environment :: MacOS X',
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Programming Language :: Python :: 3 :: Only',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Topic :: Artistic Software',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
||||
'Topic :: Multimedia :: Graphics',
|
||||
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
||||
'Topic :: Scientific/Engineering :: Image Processing',
|
||||
]
|
||||
dependencies = [
|
||||
"accelerate",
|
||||
"albumentations",
|
||||
"clip_anytorch", # replaceing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
||||
"clipseg @ https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip", # is this still necesarry with diffusers?
|
||||
"datasets",
|
||||
"diffusers[torch]~=0.11",
|
||||
"dnspython==2.2.1",
|
||||
"einops",
|
||||
"eventlet",
|
||||
"facexlib",
|
||||
"flask==2.1.3",
|
||||
"flask_cors==3.0.10",
|
||||
"flask_socketio==5.3.0",
|
||||
"flaskwebgui==1.0.3",
|
||||
"getpass_asterisk",
|
||||
"gfpgan==1.3.8",
|
||||
"huggingface-hub>=0.11.1",
|
||||
"imageio",
|
||||
"imageio-ffmpeg",
|
||||
"k-diffusion", # replaceing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip",
|
||||
"kornia",
|
||||
"npyscreen",
|
||||
"numpy~=1.23",
|
||||
"omegaconf",
|
||||
"opencv-python",
|
||||
"picklescan",
|
||||
"pillow",
|
||||
"pudb",
|
||||
"pypatchmatch",
|
||||
"pyreadline3",
|
||||
"pytorch-lightning==1.7.7",
|
||||
"realesrgan",
|
||||
"requests==2.25.1",
|
||||
"safetensors",
|
||||
"scikit-image>=0.19",
|
||||
"send2trash",
|
||||
"streamlit",
|
||||
"taming-transformers-rom1504",
|
||||
"test-tube>=0.7.5",
|
||||
"torch>=1.13.1",
|
||||
"torch-fidelity",
|
||||
"torchvision>=0.14.1",
|
||||
"torchmetrics",
|
||||
"transformers~=4.25",
|
||||
"windows-curses; sys_platform=='win32'",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
||||
"docs" = [
|
||||
"mkdocs-material<9.0",
|
||||
"mkdocs-git-revision-date-localized-plugin",
|
||||
"mkdocs-redirects==1.2.0",
|
||||
]
|
||||
test = ["pytest>6.0.0", "pytest-cov"]
|
||||
|
||||
[project.scripts]
|
||||
"configure_invokeai" = "ldm.invoke.configure_invokeai:main"
|
||||
"dream" = "ldm.invoke:CLI.main"
|
||||
"invoke" = "ldm.invoke:CLI.main"
|
||||
"legacy_api" = "scripts:legacy_api.main"
|
||||
"load_models" = "scripts:configure_invokeai.main"
|
||||
"merge_embeddings" = "scripts:merge_embeddings.main"
|
||||
"preload_models" = "ldm.invoke.configure_invokeai:main"
|
||||
|
||||
[project.urls]
|
||||
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
||||
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
||||
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
||||
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
||||
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
||||
|
||||
[tool.setuptools.dynamic]
|
||||
version = { attr = "ldm.invoke.__version__" }
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
"where" = ["."]
|
||||
"include" = ["assets", "backend*", "configs*", "frontend.dist*", "ldm*"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"assets" = ["caution.png"]
|
||||
"backend" = ["**.png"]
|
||||
"configs" = ["*.example", "**/*.yaml", "*.txt"]
|
||||
"frontend.dist" = ["**"]
|
||||
|
||||
[tool.setuptools.exclude-package-data]
|
||||
configs = ["models.yaml"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov=./ldm/ --cov=./backend --cov-branch"
|
||||
python_files = ["test_*.py"]
|
||||
pythonpath = [".venv/lib/python3.9", ".venv/lib/python3.10"]
|
||||
testpaths = ["tests"]
|
2
scripts/load_models.py
Normal file → Executable file
2
scripts/load_models.py
Normal file → Executable file
@ -5,7 +5,7 @@
|
||||
# two machines must share a common .cache directory.
|
||||
|
||||
import warnings
|
||||
import configure_invokeai
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
|
||||
if __name__ == '__main__':
|
||||
configure_invokeai.main()
|
||||
|
92
scripts/merge_models.py
Executable file
92
scripts/merge_models.py
Executable file
@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
from ldm.invoke.globals import (Globals, global_cache_dir, global_config_file,
|
||||
global_set_root)
|
||||
from ldm.invoke.model_manager import ModelManager
|
||||
|
||||
parser = argparse.ArgumentParser(description="InvokeAI textual inversion training")
|
||||
parser.add_argument(
|
||||
"--root_dir",
|
||||
"--root-dir",
|
||||
type=Path,
|
||||
default=Globals.root,
|
||||
help="Path to the invokeai runtime directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--models",
|
||||
required=True,
|
||||
type=str,
|
||||
nargs="+",
|
||||
help="Two to three model names to be merged",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--merged_model_name",
|
||||
"--destination",
|
||||
dest="merged_model_name",
|
||||
type=str,
|
||||
help="Name of the output model. If not specified, will be the concatenation of the input model names.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--alpha",
|
||||
type=float,
|
||||
default=0.5,
|
||||
help="The interpolation parameter, ranging from 0 to 1. It affects the ratio in which the checkpoints are merged. Higher values give more weight to the 2d and 3d models",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--interpolation",
|
||||
dest="interp",
|
||||
type=str,
|
||||
choices=["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"],
|
||||
default="weighted_sum",
|
||||
help='Interpolation method to use. If three models are present, only "add_difference" will work.',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Try to merge models even if they are incompatible with each other",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--clobber",
|
||||
"--overwrite",
|
||||
dest='clobber',
|
||||
action="store_true",
|
||||
help="Overwrite the merged model if --merged_model_name already exists",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
global_set_root(args.root_dir)
|
||||
|
||||
assert args.alpha >= 0 and args.alpha <= 1.0, "alpha must be between 0 and 1"
|
||||
assert len(args.models) >= 1 and len(args.models) <= 3, "provide 2 or 3 models to merge"
|
||||
|
||||
if not args.merged_model_name:
|
||||
args.merged_model_name = "+".join(args.models)
|
||||
print(
|
||||
f'>> No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
|
||||
)
|
||||
|
||||
model_manager = ModelManager(OmegaConf.load(global_config_file()))
|
||||
assert (args.clobber or args.merged_model_name not in model_manager.model_names()), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
||||
|
||||
# It seems that the merge pipeline is not honoring cache_dir, so we set the
|
||||
# HF_HOME environment variable here *before* we load diffusers.
|
||||
cache_dir = str(global_cache_dir("diffusers"))
|
||||
os.environ["HF_HOME"] = cache_dir
|
||||
from ldm.invoke.merge_diffusers import merge_diffusion_models
|
||||
|
||||
try:
|
||||
merge_diffusion_models(**vars(args))
|
||||
print(f'>> Models merged into new model: "{args.merged_model_name}".')
|
||||
except Exception as e:
|
||||
print(f"** An error occurred while merging the pipelines: {str(e)}")
|
||||
print("** DETAILS:")
|
||||
print(traceback.format_exc())
|
||||
sys.exit(-1)
|
87
scripts/merge_fe.py → scripts/merge_models_fe.py
Normal file → Executable file
87
scripts/merge_fe.py → scripts/merge_models_fe.py
Normal file → Executable file
@ -3,11 +3,10 @@
|
||||
import npyscreen
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import shutil
|
||||
import traceback
|
||||
import argparse
|
||||
from ldm.invoke.globals import Globals, global_set_root
|
||||
from ldm.invoke.globals import Globals, global_set_root, global_cache_dir, global_config_file
|
||||
from ldm.invoke.model_manager import ModelManager
|
||||
from omegaconf import OmegaConf
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
@ -30,6 +29,14 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
'inv_sigmoid',
|
||||
'add_difference']
|
||||
|
||||
def __init__(self, parentApp, name):
|
||||
self.parentApp = parentApp
|
||||
super().__init__(parentApp, name)
|
||||
|
||||
@property
|
||||
def model_manager(self):
|
||||
return self.parentApp.model_manager
|
||||
|
||||
def afterEditing(self):
|
||||
self.parentApp.setNextForm(None)
|
||||
|
||||
@ -83,6 +90,11 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
lowest=0,
|
||||
value=0.5,
|
||||
)
|
||||
self.force = self.add_widget_intelligent(
|
||||
npyscreen.Checkbox,
|
||||
name='Force merge of incompatible models',
|
||||
value=False,
|
||||
)
|
||||
self.merged_model_name = self.add_widget_intelligent(
|
||||
npyscreen.TitleText,
|
||||
name='Name for merged model',
|
||||
@ -105,20 +117,51 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
self.merge_method.value=0
|
||||
|
||||
def on_ok(self):
|
||||
if self.validate_field_values():
|
||||
if self.validate_field_values() and self.check_for_overwrite():
|
||||
self.parentApp.setNextForm(None)
|
||||
self.editing = False
|
||||
self.parentApp.merge_arguments = self.marshall_arguments()
|
||||
npyscreen.notify('Starting the merge...')
|
||||
import ldm.invoke.merge_diffusers # this keeps the message up while diffusers loads
|
||||
else:
|
||||
self.editing = True
|
||||
|
||||
def ok_cancel(self):
|
||||
def on_cancel(self):
|
||||
sys.exit(0)
|
||||
|
||||
def marshall_arguments(self)->dict:
|
||||
model_names = self.model_names
|
||||
models = [
|
||||
model_names[self.model1.value[0]],
|
||||
model_names[self.model2.value[0]],
|
||||
]
|
||||
if self.model3.value[0] > 0:
|
||||
models.append(model_names[self.model3.value[0]-1])
|
||||
|
||||
args = dict(
|
||||
models=models,
|
||||
alpha = self.alpha.value,
|
||||
interp = self.interpolations[self.merge_method.value[0]],
|
||||
force = self.force.value,
|
||||
merged_model_name = self.merged_model_name.value,
|
||||
)
|
||||
return args
|
||||
|
||||
def check_for_overwrite(self)->bool:
|
||||
model_out = self.merged_model_name.value
|
||||
if model_out not in self.model_names:
|
||||
return True
|
||||
else:
|
||||
return npyscreen.notify_yes_no(f'The chosen merged model destination, {model_out}, is already in use. Overwrite?')
|
||||
|
||||
def validate_field_values(self)->bool:
|
||||
bad_fields = []
|
||||
selected_models = set((self.model1.value[0],self.model2.value[0],self.model3.value[0]))
|
||||
if len(selected_models) < 3:
|
||||
bad_fields.append('Please select two or three DIFFERENT models to compare')
|
||||
model_names = self.model_names
|
||||
selected_models = set((model_names[self.model1.value[0]],model_names[self.model2.value[0]]))
|
||||
if self.model3.value[0] > 0:
|
||||
selected_models.add(model_names[self.model3.value[0]-1])
|
||||
if len(selected_models) < 2:
|
||||
bad_fields.append(f'Please select two or three DIFFERENT models to compare. You selected {selected_models}')
|
||||
if len(bad_fields) > 0:
|
||||
message = 'The following problems were detected and must be corrected:'
|
||||
for problem in bad_fields:
|
||||
@ -129,13 +172,15 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
||||
return True
|
||||
|
||||
def get_model_names(self)->List[str]:
|
||||
conf = OmegaConf.load(os.path.join(Globals.root,'configs/models.yaml'))
|
||||
model_names = [name for name in conf.keys() if conf[name].get('format',None)=='diffusers']
|
||||
model_names = [name for name in self.model_manager.model_names() if self.model_manager.model_info(name).get('format') == 'diffusers']
|
||||
print(model_names)
|
||||
return sorted(model_names)
|
||||
|
||||
class MyApplication(npyscreen.NPSAppManaged):
|
||||
class Mergeapp(npyscreen.NPSAppManaged):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
conf = OmegaConf.load(global_config_file())
|
||||
self.model_manager = ModelManager(conf,'cpu','float16') # precision doesn't really matter here
|
||||
|
||||
def onStart(self):
|
||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||
@ -152,5 +197,21 @@ if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
global_set_root(args.root_dir)
|
||||
|
||||
myapplication = MyApplication()
|
||||
myapplication.run()
|
||||
cache_dir = str(global_cache_dir('diffusers')) # because not clear the merge pipeline is honoring cache_dir
|
||||
os.environ['HF_HOME'] = cache_dir
|
||||
|
||||
mergeapp = Mergeapp()
|
||||
mergeapp.run()
|
||||
|
||||
args = mergeapp.merge_arguments
|
||||
args.update(cache_dir = cache_dir)
|
||||
from ldm.invoke.merge_diffusers import merge_diffusion_models
|
||||
|
||||
try:
|
||||
merge_diffusion_models(**args)
|
||||
print(f'>> Models merged into new model: "{args["merged_model_name"]}".')
|
||||
except Exception as e:
|
||||
print(f'** An error occurred while merging the pipelines: {str(e)}')
|
||||
print('** DETAILS:')
|
||||
print(traceback.format_exc())
|
||||
sys.exit(-1)
|
0
scripts/merge_embeddings.py → scripts/orig_scripts/merge_embeddings.py
Normal file → Executable file
0
scripts/merge_embeddings.py → scripts/orig_scripts/merge_embeddings.py
Normal file → Executable file
@ -5,7 +5,7 @@
|
||||
# two machines must share a common .cache directory.
|
||||
|
||||
import warnings
|
||||
import configure_invokeai
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
|
||||
if __name__ == '__main__':
|
||||
configure_invokeai.main()
|
||||
|
@ -1,11 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2023, Lincoln Stein @lstein
|
||||
from ldm.invoke.globals import Globals, set_root
|
||||
from ldm.invoke.globals import Globals, global_set_root
|
||||
from ldm.invoke.textual_inversion_training import parse_args, do_textual_inversion_training
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
set_root(args.root_dir or Globals.root)
|
||||
global_set_root(args.root_dir or Globals.root)
|
||||
kwargs = vars(args)
|
||||
do_textual_inversion_training(**kwargs)
|
||||
|
@ -6,14 +6,15 @@ import sys
|
||||
import re
|
||||
import shutil
|
||||
import traceback
|
||||
import curses
|
||||
from ldm.invoke.globals import Globals, global_set_root
|
||||
from omegaconf import OmegaConf
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
import argparse
|
||||
|
||||
TRAINING_DATA = 'training-data'
|
||||
TRAINING_DIR = 'text-inversion-training'
|
||||
TRAINING_DATA = 'text-inversion-training-data'
|
||||
TRAINING_DIR = 'text-inversion-output'
|
||||
CONF_FILE = 'preferences.conf'
|
||||
|
||||
class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
@ -43,6 +44,11 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
except:
|
||||
pass
|
||||
|
||||
self.add_widget_intelligent(
|
||||
npyscreen.FixedText,
|
||||
value='Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields, cursor arrows to make a selection, and space to toggle checkboxes.'
|
||||
)
|
||||
|
||||
self.model = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
name='Model Name:',
|
||||
@ -82,18 +88,18 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
max_height=4,
|
||||
)
|
||||
self.train_data_dir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilenameCombo,
|
||||
npyscreen.TitleFilename,
|
||||
name='Data Training Directory:',
|
||||
select_dir=True,
|
||||
must_exist=True,
|
||||
value=saved_args.get('train_data_dir',Path(Globals.root) / TRAINING_DATA / default_placeholder_token)
|
||||
must_exist=False,
|
||||
value=str(saved_args.get('train_data_dir',Path(Globals.root) / TRAINING_DATA / default_placeholder_token))
|
||||
)
|
||||
self.output_dir = self.add_widget_intelligent(
|
||||
npyscreen.TitleFilenameCombo,
|
||||
npyscreen.TitleFilename,
|
||||
name='Output Destination Directory:',
|
||||
select_dir=True,
|
||||
must_exist=False,
|
||||
value=saved_args.get('output_dir',Path(Globals.root) / TRAINING_DIR / default_placeholder_token)
|
||||
value=str(saved_args.get('output_dir',Path(Globals.root) / TRAINING_DIR / default_placeholder_token))
|
||||
)
|
||||
self.resolution = self.add_widget_intelligent(
|
||||
npyscreen.TitleSelectOne,
|
||||
@ -182,8 +188,8 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
def initializer_changed(self):
|
||||
placeholder = self.placeholder_token.value
|
||||
self.prompt_token.value = f'(Trigger by using <{placeholder}> in your prompts)'
|
||||
self.train_data_dir.value = Path(Globals.root) / TRAINING_DATA / placeholder
|
||||
self.output_dir.value = Path(Globals.root) / TRAINING_DIR / placeholder
|
||||
self.train_data_dir.value = str(Path(Globals.root) / TRAINING_DATA / placeholder)
|
||||
self.output_dir.value = str(Path(Globals.root) / TRAINING_DIR / placeholder)
|
||||
self.resume_from_checkpoint.value = Path(self.output_dir.value).exists()
|
||||
|
||||
def on_ok(self):
|
||||
@ -221,7 +227,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||
|
||||
def get_model_names(self)->(List[str],int):
|
||||
conf = OmegaConf.load(os.path.join(Globals.root,'configs/models.yaml'))
|
||||
model_names = list(conf.keys())
|
||||
model_names = [idx for idx in sorted(list(conf.keys())) if conf[idx].get('format',None)=='diffusers']
|
||||
defaults = [idx for idx in range(len(model_names)) if 'default' in conf[model_names[idx]]]
|
||||
return (model_names,defaults[0])
|
||||
|
||||
@ -288,7 +294,9 @@ def save_args(args:dict):
|
||||
'''
|
||||
Save the current argument values to an omegaconf file
|
||||
'''
|
||||
conf_file = Path(Globals.root) / TRAINING_DIR / CONF_FILE
|
||||
dest_dir = Path(Globals.root) / TRAINING_DIR
|
||||
os.makedirs(dest_dir, exist_ok=True)
|
||||
conf_file = dest_dir / CONF_FILE
|
||||
conf = OmegaConf.create(args)
|
||||
OmegaConf.save(config=conf, f=conf_file)
|
||||
|
||||
|
99
setup.py
99
setup.py
@ -1,99 +0,0 @@
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
def list_files(directory):
|
||||
listing = list()
|
||||
for root, dirs, files in os.walk(directory,topdown=False):
|
||||
pair = (root,[os.path.join(root,f) for f in files])
|
||||
listing.append(pair)
|
||||
return listing
|
||||
|
||||
|
||||
def get_version()->str:
|
||||
from ldm.invoke import __version__ as version
|
||||
return version
|
||||
|
||||
# The canonical version number is stored in the file ldm/invoke/_version.py
|
||||
VERSION = get_version()
|
||||
DESCRIPTION = ('An implementation of Stable Diffusion which provides various new features'
|
||||
' and options to aid the image generation process')
|
||||
LONG_DESCRIPTION = ('This version of Stable Diffusion features a slick WebGUI, an'
|
||||
' interactive command-line script that combines text2img and img2img'
|
||||
' functionality in a "dream bot" style interface, and multiple features'
|
||||
' and other enhancements.')
|
||||
HOMEPAGE = 'https://github.com/invoke-ai/InvokeAI'
|
||||
FRONTEND_FILES = list_files('frontend/dist')
|
||||
FRONTEND_FILES.append(('assets',['assets/caution.png']))
|
||||
print(FRONTEND_FILES)
|
||||
|
||||
REQUIREMENTS=[
|
||||
'accelerate',
|
||||
'albumentations',
|
||||
'diffusers',
|
||||
'eventlet',
|
||||
'flask_cors',
|
||||
'flask_socketio',
|
||||
'flaskwebgui',
|
||||
'getpass_asterisk',
|
||||
'imageio-ffmpeg',
|
||||
'pyreadline3',
|
||||
'realesrgan',
|
||||
'send2trash',
|
||||
'streamlit',
|
||||
'taming-transformers-rom1504',
|
||||
'test-tube',
|
||||
'torch-fidelity',
|
||||
'torch',
|
||||
'torchvision',
|
||||
'transformers',
|
||||
'picklescan',
|
||||
'clip',
|
||||
'clipseg',
|
||||
'gfpgan',
|
||||
'k-diffusion',
|
||||
'pypatchmatch',
|
||||
]
|
||||
|
||||
setup(
|
||||
name='InvokeAI',
|
||||
version=VERSION,
|
||||
description=DESCRIPTION,
|
||||
long_description=LONG_DESCRIPTION,
|
||||
author='The InvokeAI Project',
|
||||
author_email='lincoln.stein@gmail.com',
|
||||
url=HOMEPAGE,
|
||||
license='MIT',
|
||||
packages=find_packages(exclude=['tests.*']),
|
||||
install_requires=REQUIREMENTS,
|
||||
dependency_links=['https://download.pytorch.org/whl/torch_stable.html'],
|
||||
python_requires='>=3.9, <4',
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Environment :: GPU',
|
||||
'Environment :: GPU :: NVIDIA CUDA',
|
||||
'Environment :: MacOS X',
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Programming Language :: Python :: 3 :: Only,'
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Topic :: Artistic Software',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
||||
'Topic :: Multimedia :: Graphics',
|
||||
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
||||
'Topic :: Scientific/Engineering :: Image Processing',
|
||||
],
|
||||
scripts = ['scripts/invoke.py','scripts/configure_invokeai.py', 'scripts/sd-metadata.py',
|
||||
'scripts/preload_models.py', 'scripts/images2prompt.py','scripts/merge_embeddings.py',
|
||||
'scripts/textual_inversion_fe.py','scripts/textual_inversion.py'
|
||||
],
|
||||
data_files=FRONTEND_FILES,
|
||||
)
|
Loading…
Reference in New Issue
Block a user