diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index b9b7053aba..ec4a105a9e 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -17,9 +17,9 @@ jobs: - aarch64 include: - arch: x86_64 - conda-env-file: environment.yml + conda-env-file: environment-lin-cuda.yml - arch: aarch64 - conda-env-file: environment-linux-aarch64.yml + conda-env-file: environment-lin-aarch64.yml runs-on: ubuntu-latest name: ${{ matrix.arch }} steps: diff --git a/.github/workflows/test-invoke-conda.yml b/.github/workflows/test-invoke-conda.yml index a144303cc3..8fc3b10fef 100644 --- a/.github/workflows/test-invoke-conda.yml +++ b/.github/workflows/test-invoke-conda.yml @@ -23,7 +23,7 @@ jobs: - macOS-12 include: - os: ubuntu-latest - environment-file: environment.yml + environment-file: environment-lin-cuda.yml default-shell: bash -l {0} - os: macOS-12 environment-file: environment-mac.yml @@ -49,6 +49,9 @@ jobs: - name: create models.yaml from example run: cp configs/models.yaml.example configs/models.yaml + - name: create environment.yml + run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml + - name: Use cached conda packages id: use-cached-conda-packages uses: actions/cache@v3 @@ -61,7 +64,7 @@ jobs: uses: conda-incubator/setup-miniconda@v2 with: activate-environment: ${{ env.CONDA_ENV_NAME }} - environment-file: ${{ matrix.environment-file }} + environment-file: environment.yml miniconda-version: latest - name: set test prompt to main branch validation diff --git a/1-click-installer/install.bat b/1-click-installer/install.bat index c8e68d7391..6dc644783c 100644 --- a/1-click-installer/install.bat +++ b/1-click-installer/install.bat @@ -81,7 +81,7 @@ call conda activate @rem create the environment call conda env remove -n invokeai -mklink environment.yml environments-and-requirements\environment-win-cuda.yml +cp environments-and-requirements\environment-win-cuda.yml environment.yml call conda env create if "%ERRORLEVEL%" NEQ "0" ( echo "" diff --git a/docker-build/Dockerfile b/docker-build/Dockerfile index 1de6f778bb..3f20e4b6f0 100644 --- a/docker-build/Dockerfile +++ b/docker-build/Dockerfile @@ -43,33 +43,42 @@ RUN apt-get update \ ARG invokeai_git=invoke-ai/InvokeAI ARG invokeai_branch=main ARG project_name=invokeai -RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git /${project_name} \ - && cp /${project_name}/configs/models.yaml.example /${project_name}/configs/models.yaml \ - && ln -s /data/models/v1-5-pruned-emaonly.ckpt /${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt \ - && ln -s /data/outputs/ /${project_name}/outputs +ARG conda_env_file=environment-lin-cuda.yml +RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \ + && cp \ + "/${project_name}/configs/models.yaml.example" \ + "/${project_name}/configs/models.yaml" \ + && ln -sf \ + "/${project_name}/environments-and-requirements/${conda_env_file}" \ + "/${project_name}/environment.yml" \ + && ln -sf \ + /data/models/v1-5-pruned-emaonly.ckpt \ + "/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \ + && ln -sf \ + /data/outputs/ \ + "/${project_name}/outputs" # set workdir -WORKDIR /${project_name} +WORKDIR "/${project_name}" # install conda env and preload models ARG conda_prefix=/opt/conda -ARG conda_env_file=environment.yml -COPY --from=get_miniconda ${conda_prefix} ${conda_prefix} -RUN source ${conda_prefix}/etc/profile.d/conda.sh \ +COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}" +RUN source "${conda_prefix}/etc/profile.d/conda.sh" \ && conda init bash \ && source ~/.bashrc \ && conda env create \ - --name ${project_name} \ - --file ${conda_env_file} \ + --name "${project_name}" \ && rm -Rf ~/.cache \ && conda clean -afy \ - && echo "conda activate ${project_name}" >> ~/.bashrc \ - && conda activate ${project_name} \ + && echo "conda activate ${project_name}" >> ~/.bashrc + +RUN source ~/.bashrc \ && python scripts/preload_models.py \ --no-interactive # Copy entrypoint and set env -ENV CONDA_PREFIX=${conda_prefix} -ENV PROJECT_NAME=${project_name} +ENV CONDA_PREFIX="${conda_prefix}" +ENV PROJECT_NAME="${project_name}" COPY docker-build/entrypoint.sh / ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/docker-build/build.sh b/docker-build/build.sh index ebd1ca6a41..c7b94d7c0e 100755 --- a/docker-build/build.sh +++ b/docker-build/build.sh @@ -8,7 +8,7 @@ source ./docker-build/env.sh || echo "please run from repository root" || exit 1 invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}} invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda} -invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml} +invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml} invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI} invokeai_branch=${INVOKEAI_BRANCH:-main} huggingface_token=${HUGGINGFACE_TOKEN?} diff --git a/docs/features/WEBUIHOTKEYS.md b/docs/features/WEBUIHOTKEYS.md index 89432687dc..1f0af08dd6 100644 --- a/docs/features/WEBUIHOTKEYS.md +++ b/docs/features/WEBUIHOTKEYS.md @@ -2,7 +2,7 @@ title: WebUI Hotkey List --- -# **WebUI Hotkey List** +# :material-keyboard: **WebUI Hotkey List** ## General @@ -19,7 +19,7 @@ title: WebUI Hotkey List | ++ctrl+enter++ | Start processing | | ++shift+x++ | cancel Processing | | ++shift+d++ | Toggle Dark Mode | -| ` | Toggle console | +| ++"`"++ | Toggle console | ## Tabs @@ -48,10 +48,10 @@ title: WebUI Hotkey List | Setting | Hotkey | | ---------------------------- | --------------------- | -| [ | Decrease brush size | -| ] | Increase brush size | -| alt + [ | Decrease mask opacity | -| alt + ] | Increase mask opacity | +| ++"["++ | Decrease brush size | +| ++"]"++ | Increase brush size | +| ++alt+"["++ | Decrease mask opacity | +| ++alt+"]"++ | Increase mask opacity | | ++b++ | Select brush | | ++e++ | Select eraser | | ++ctrl+z++ | Undo brush stroke | diff --git a/docs/installation/INSTALL.md b/docs/installation/INSTALL.md index 1bfb5b147e..9cb65760bb 100644 --- a/docs/installation/INSTALL.md +++ b/docs/installation/INSTALL.md @@ -9,50 +9,49 @@ experience and preferences. 1. [1-click installer](INSTALL_1CLICK.md) - This is an automated shell script that will handle installation of - all dependencies for you, and is recommended for those who have - limited or no experience with the Python programming language, are - not currently interested in contributing to the project, and just want - the thing to install and run. In this version, you interact with the - web server and command-line clients through a shell script named - `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows), and perform - updates using `update.sh` and `update.bat`. + This is an automated shell script that will handle installation of + all dependencies for you, and is recommended for those who have + limited or no experience with the Python programming language, are + not currently interested in contributing to the project, and just want + the thing to install and run. In this version, you interact with the + web server and command-line clients through a shell script named + `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows), and perform + updates using `update.sh` and `update.bat`. 2. [Pre-compiled PIP installer](INSTALL_PCP.md) - This is a series of installer files for which all the requirements - for InvokeAI have been precompiled, thereby preventing the conflicts - that sometimes occur when an external library is changed unexpectedly. - It will leave you with an environment in which you interact directly - with the scripts for running the web and command line clients, and - you will update to new versions using standard developer commands. + This is a series of installer files for which all the requirements + for InvokeAI have been precompiled, thereby preventing the conflicts + that sometimes occur when an external library is changed unexpectedly. + It will leave you with an environment in which you interact directly + with the scripts for running the web and command line clients, and + you will update to new versions using standard developer commands. - This method is recommended for users with a bit of experience using - the `git` and `pip` tools. + This method is recommended for users with a bit of experience using + the `git` and `pip` tools. -3. [Manual Installation](MANUAL_INSTALL.md) +3. [Manual Installation](INSTALL_MANUAL.md) - In this method you will manually run the commands needed to install - InvokeAI and its dependencies. We offer two recipes: one suited to - those who prefer the `conda` tool, and one suited to those who prefer - `pip` and Python virtual environments. + In this method you will manually run the commands needed to install + InvokeAI and its dependencies. We offer two recipes: one suited to + those who prefer the `conda` tool, and one suited to those who prefer + `pip` and Python virtual environments. + + This method is recommended for users who have previously used `conda` + or `pip` in the past, developers, and anyone who wishes to remain on + the cutting edge of future InvokeAI development and is willing to put + up with occasional glitches and breakage. - This method is recommended for users who have previously used `conda` - or `pip` in the past, developers, and anyone who wishes to remain on - the cutting edge of future InvokeAI development and is willing to put - up with occasional glitches and breakage. - 4. [Docker Installation](INSTALL_DOCKER.md) - We also offer a method for creating Docker containers containing - InvokeAI and its dependencies. This method is recommended for - individuals with experience with Docker containers and understand - the pluses and minuses of a container-based install. + We also offer a method for creating Docker containers containing + InvokeAI and its dependencies. This method is recommended for + individuals with experience with Docker containers and understand + the pluses and minuses of a container-based install. 5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md) - This method is suitable for running InvokeAI on a Google Colab - account. It is recommended for individuals who have previously - worked on the Colab and are comfortable with the Jupyter notebook - environment. - + This method is suitable for running InvokeAI on a Google Colab + account. It is recommended for individuals who have previously + worked on the Colab and are comfortable with the Jupyter notebook + environment. diff --git a/docs/installation/INSTALL_1CLICK.md b/docs/installation/INSTALL_1CLICK.md new file mode 100644 index 0000000000..30a9709f99 --- /dev/null +++ b/docs/installation/INSTALL_1CLICK.md @@ -0,0 +1,164 @@ +--- +title: The "One-Click" Installer +--- + +## Introduction + +The one-click installer is a shell script that attempts to automate +every step needed to install and run InvokeAI on a stock computer +running recent versions of Linux, MacOSX or Windows. + +Before you begin, make sure that you meet the [hardware +requirements](index.md#Hardware_Requirements) and has the appropriate +GPU drivers installed. In particular, if you are a Linux user with an +AMD GPU installed, you may need to install the [ROCm +driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html). + +Installation requires roughly 18G of free disk space to load the +libraries and recommended model weights files. + +## Walk through + +Though there are multiple steps, there really is only one click +involved to kick off the process. + +1. The 1-click installer is distributed in ZIP files. Download the one +that is appropriate for your operating system: + + !!! todo "Change the URLs after release" + + - [invokeAI-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/download/2.1.3-rc1/invokeAI-mac.zip) + - [invokeAI-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/download/2.1.3-rc1/invokeAI-linux.zip) + - [invokeAI-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/download/2.1.3-rc1/invokeAI-windows.zip) + +2. Unpack the zip file into a directory that has at least 18G of free + space. Do *not* unpack into a directory that has an earlier version of + InvokeAI. + + This will create a new directory named "InvokeAI". This example + shows how this would look using the `unzip` command-line tool, + but you may use any graphical or command-line Zip extractor: + + ```bash + C:\Documents\Linco> unzip invokeAI-windows.zip + Archive: C: \Linco\Downloads\invokeAI-linux.zip + creating: invokeAI\ + inflating: invokeAI\install.bat + inflating: invokeAI\readme.txt + ``` + +3. If you are using a desktop GUI, double-click the installer file. + It will be named `install.bat` on Windows systems and `install.sh` + on Linux and Macintosh systems. + +4. Alternatively, form the command line, run the shell script or .bat + file: + + ```bash + C:\Documents\Linco> cd invokeAI + C:\Documents\Linco> install.bat + ``` + +5. Sit back and let the install script work. It will install various + binary requirements including Conda, Git and Python, then download + the current InvokeAI code and install it along with its + dependencies. + +6. After installation completes, the installer will launch a script + called `preload_models.py`, which will guide you through the + first-time process of selecting one or more Stable Diffusion model + weights files, downloading and configuring them. + + Note that the main Stable Diffusion weights file is protected by a + license agreement that you must agree to in order to use. The + script will list the steps you need to take to create an account on + the official site that hosts the weights files, accept the + agreement, and provide an access token that allows InvokeAI to + legally download and install the weights files. + + If you have already downloaded the weights file(s) for another + Stable Diffusion distribution, you may skip this step (by selecting + "skip" when prompted) and configure InvokeAI to use the + previously-downloaded files. The process for this is described in + [INSTALLING_MODELS.md]. + + 7. The script will now exit and you'll be ready to generate some + images. The invokeAI directory will contain numerous files. Look + for a shell script named `invoke.sh` (Linux/Mac) or `invoke.bat` + (Windows). Launch the script by double-clicking it or typing + its name at the command-line: + + ```bash + C:\Documents\Linco\invokeAI> cd invokeAI + C:\Documents\Linco\invokeAI> invoke.bat + ``` + + The `invoke.bat` (`invoke.sh`) script will give you the choice of + starting (1) the command-line interface, or (2) the web GUI. If you + start the latter, you can load the user interface by pointing your + browser at http://localhost:9090. + + The `invoke` script also offers you a third option labeled "open + the developer console". If you choose this option, you will be + dropped into a command-line interface in which you can run python + commands directly, access developer tools, and launch InvokeAI + with customized options. To do the latter, you would launch the + script `scripts/invoke.py` as shown in this example: + + ```bash + python scripts\invoke.py --web --max_load_models=3 \ + --model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos + ``` + + These options are described in detail in the [Command-Line + Interface](../features/CLI.md) documentation. + +## Updating to newer versions + +This section describes how to update InvokeAI to new versions of the +software. + +### Updating the stable version + +This distribution is changing rapidly, and we add new features on a +daily basis. To update to the latest released version (recommended), +run the `update.sh` (Linux/Mac) or `update.bat` (Windows) +scripts. This will fetch the latest release and re-run the +`preload_models` script to download any updated models files that may +be needed. You can also use this to add additional models that you did +not select at installation time. + +### Updating to the development version + +There may be times that there is a feature in the `development` branch +of InvokeAI that you'd like to take advantage of. Or perhaps there is +a branch that corrects an annoying bug. To do this, you will use the +developer's console. + +From within the invokeAI directory, run the command `invoke.sh` +(Linux/Mac) or `invoke.bat` (Windows) and selection option (3) to open +the developers console. Then run the following command to get the +`development branch`: + +```bash +git checkout development +git pull +conda env update +``` + +You can now close the developer console and run `invoke` as before. +If you get complaints about missing models, then you may need to do +the additional step of running `preload_models.py`. This happens +relatively infrequently. To do this, simply open up the developer's +console again and type `python scripts/preload_models.py`. + +## Troubleshooting + +If you run into problems during or after installation, the InvokeAI +team is available to help you. Either create an +[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub +site, or make a request for help on the "bugs-and-support" channel of +our [Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% +volunteer organization, but typically somebody will be available to +help you within 24 hours, and often much sooner. + diff --git a/docs/installation/INSTALL_DOCKER.md b/docs/installation/INSTALL_DOCKER.md index 08d4c5a4d3..326ad39021 100644 --- a/docs/installation/INSTALL_DOCKER.md +++ b/docs/installation/INSTALL_DOCKER.md @@ -1,29 +1,28 @@ --- -Title: Docker +title: Docker --- # :fontawesome-brands-docker: Docker ## Before you begin -- For end users: Install InvokeAI locally using the instructions for - your OS. +- For end users: Install InvokeAI locally using the instructions for your OS. - For developers: For container-related development tasks or for enabling easy deployment to other environments (on-premises or cloud), follow these instructions. For general use, install locally to leverage your machine's GPU. ## Why containers? -They provide a flexible, reliable way to build and deploy InvokeAI. -You'll also use a Docker volume to store the largest model files and image -outputs as a first step in decoupling storage and compute. Future enhancements -can do this for other assets. See [Processes](https://12factor.net/processes) -under the Twelve-Factor App methodology for details on why running applications -in such a stateless fashion is important. +They provide a flexible, reliable way to build and deploy InvokeAI. You'll also +use a Docker volume to store the largest model files and image outputs as a +first step in decoupling storage and compute. Future enhancements can do this +for other assets. See [Processes](https://12factor.net/processes) under the +Twelve-Factor App methodology for details on why running applications in such a +stateless fashion is important. You can specify the target platform when building the image and running the -container. You'll also need to specify the InvokeAI requirements file -that matches the container's OS and the architecture it will run on. +container. You'll also need to specify the InvokeAI requirements file that +matches the container's OS and the architecture it will run on. Developers on Apple silicon (M1/M2): You [can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224) @@ -65,13 +64,14 @@ created in the last step. Some Suggestions of variables you may want to change besides the Token: -| Environment-Variable | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------------ | -| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint | -| `ARCH=aarch64` | if you are using a ARM based CPU | -| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used | -| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch | -| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork | +| Environment-Variable | Default value | Description | +| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- | +| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint | +| `ARCH` | x86_64 | if you are using a ARM based CPU | +| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used | +| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch | +| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use | +| `INVOKEAI_BRANCH` | main | the branch to checkout | #### Build the Image @@ -79,25 +79,41 @@ I provided a build script, which is located in `docker-build/build.sh` but still needs to be executed from the Repository root. ```bash -docker-build/build.sh +./docker-build/build.sh ``` The build Script not only builds the container, but also creates the docker -volume if not existing yet, or if empty it will just download the models. When -it is done you can run the container via the run script +volume if not existing yet, or if empty it will just download the models. + +#### Run the Container + +After the build process is done, you can run the container via the provided +`docker-build/run.sh` script ```bash -docker-build/run.sh +./docker-build/run.sh ``` When used without arguments, the container will start the website and provide you the link to open it. But if you want to use some other parameters you can also do so. +!!! example + + ```bash + docker-build/run.sh --from_file tests/validate_pr_prompt.txt + ``` + + The output folder is located on the volume which is also used to store the model. + + Find out more about available CLI-Parameter at [features/CLI.md](../features/CLI.md) + +--- + !!! warning "Deprecated" - From here on it is the rest of the previous Docker-Docs, which will still - provide usefull informations for one or the other. + From here on you will find the rest of the previous Docker-Docs, which will still + provide some usefull informations. ## Usage (time to have fun) diff --git a/docs/installation/INSTALL_JUPYTER.md b/docs/installation/INSTALL_JUPYTER.md new file mode 100644 index 0000000000..aa8efd6630 --- /dev/null +++ b/docs/installation/INSTALL_JUPYTER.md @@ -0,0 +1,28 @@ +--- +title: Running InvokeAI on Google Colab using a Jupyter Notebook +--- + +# THIS NEEDS TO BE FLESHED OUT + +## Introduction + +We have a [Jupyter +notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb) +with cell-by-cell installation steps. It will download the code in +this repo as one of the steps, so instead of cloning this repo, simply +download the notebook from the link above and load it up in VSCode +(with the appropriate extensions installed)/Jupyter/JupyterLab and +start running the cells one-by-one. + +Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehand. + + +## Walkthrough + +## Updating to newer versions + +### Updating the stable version + +### Updating to the development version + +## Troubleshooting \ No newline at end of file diff --git a/docs/installation/MANUAL_INSTALL.md b/docs/installation/INSTALL_MANUAL.md similarity index 89% rename from docs/installation/MANUAL_INSTALL.md rename to docs/installation/INSTALL_MANUAL.md index 4e0089a9ca..f0c8a809aa 100644 --- a/docs/installation/MANUAL_INSTALL.md +++ b/docs/installation/INSTALL_MANUAL.md @@ -80,20 +80,17 @@ download installers from the following URLs !!! todo "Macintosh and Linux" - ```bash - ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml - ``` + ```bash + ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml + ``` - Replace `xxx` and `yyy` with the appropriate OS and GPU codes. + Replace `xxx` and `yyy` with the appropriate OS and GPU codes. - !!! todo "Windows" + !!! todo "Windows requires admin privileges to make links, so we use the copy (cp) command" - ```bash - mklink environment.yml environments-and-requirements\environment-win-cuda.yml - ``` - - Note that the order of arguments is reversed between the Linux/Mac and Windows - commands! + ```bash + cp environments-and-requirements\environment-win-cuda.yml environment.yml + ``` When this is done, confirm that a file `environment.yml` has been created in the InvokeAI root directory and that it points to the correct file in the @@ -136,6 +133,12 @@ download installers from the following URLs provide an access token that allows InvokeAI to legally download and install the weights files. + If you have already downloaded the weights file(s) for another + Stable Diffusion distribution, you may skip this step (by selecting + "skip" when prompted) and configure InvokeAI to use the + previously-downloaded files. The process for this is described in + [INSTALLING_MODELS.md]. + If you get an error message about a module not being installed, check that the `invokeai` environment is active and if not, repeat step 5. @@ -191,8 +194,8 @@ prompting you to download the big Stable Diffusion weights files. To install InvokeAI with only the PIP package manager, please follow these steps: -1. Make sure you are using Python 3.9 or higher. Some InvokeAI - features require this: +1. Make sure you are using Python 3.9 or higher. The rest of the install + procedure depends on this: ```bash python -V @@ -233,17 +236,17 @@ operating system. !!! todo "Macintosh and Linux" - ```bash - ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt - ``` + ```bash + ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt + ``` Replace `xxx` and `yyy` with the appropriate OS and GPU codes. - !!! todo "Windows" + !!! todo "Windows requires admin privileges to make links, so we use the copy (cp) command instead" - ```bash - mklink requirements.txt environments-and-requirements\requirements-lin-win-colab-cuda.txt - ``` + ```bash + cp environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt + ``` Note that the order of arguments is reversed between the Linux/Mac and Windows commands! @@ -262,7 +265,7 @@ operating system. this: ```bash - pip install -r requirements.txt + pip install --prefer-binary -r requirements.txt ``` ## Troubleshooting @@ -277,8 +280,10 @@ Here are some common issues and their suggested solutions. incompatibility. While we have tried to minimize these, over time packages get updated and sometimes introduce incompatibilities. - We suggest that you search [Issues](https://github.com/invoke-ai/InvokeAI/issues) or the - Bug Report and Support channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy). + We suggest that you search + [Issues](https://github.com/invoke-ai/InvokeAI/issues) or the + "bugs-and-support" channel of the [InvokeAI + Discord](https://discord.gg/ZmtBAhwWhy). You may also try to install the broken packages manually using PIP. To do this, activate the `invokeai` environment, and run `pip install` with the name and version of the diff --git a/docs/installation/INSTALL_PCP.md b/docs/installation/INSTALL_PCP.md new file mode 100644 index 0000000000..e321237e1c --- /dev/null +++ b/docs/installation/INSTALL_PCP.md @@ -0,0 +1,17 @@ +--- +title: Installing InvokeAI with the Pre-Compiled PIP Installer +--- + +# THIS NEEDS TO BE FLESHED OUT + +## Introduction + +## Walkthrough + +## Updating to newer versions + +### Updating the stable version + +### Updating to the development version + +## Troubleshooting \ No newline at end of file diff --git a/docs/installation/INSTALL_LINUX.md b/docs/installation/older_docs_to_be_removed/INSTALL_LINUX.md similarity index 100% rename from docs/installation/INSTALL_LINUX.md rename to docs/installation/older_docs_to_be_removed/INSTALL_LINUX.md diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/older_docs_to_be_removed/INSTALL_MAC.md similarity index 100% rename from docs/installation/INSTALL_MAC.md rename to docs/installation/older_docs_to_be_removed/INSTALL_MAC.md diff --git a/docs/installation/INSTALL_WINDOWS.md b/docs/installation/older_docs_to_be_removed/INSTALL_WINDOWS.md similarity index 100% rename from docs/installation/INSTALL_WINDOWS.md rename to docs/installation/older_docs_to_be_removed/INSTALL_WINDOWS.md diff --git a/environments-and-requirements/environment-mac.yml b/environments-and-requirements/environment-mac.yml index 21d133c572..99182476dd 100644 --- a/environments-and-requirements/environment-mac.yml +++ b/environments-and-requirements/environment-mac.yml @@ -29,7 +29,6 @@ dependencies: - pytorch-lightning=1.7.7 - scipy=1.9.3 - streamlit=1.12.2 - - taming-transformers-rom1504 - sympy=1.10.1 - tensorboard=2.10.0 - torchmetrics=0.10.1 @@ -46,6 +45,7 @@ dependencies: - getpass_asterisk - dependency_injector==4.40.0 - realesrgan==0.2.5.0 + - taming-transformers-rom1504 - test-tube==0.7.5 - git+https://github.com/openai/CLIP.git@main#egg=clip - git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion diff --git a/update.sh b/update.sh index e969826359..d634324760 100755 --- a/update.sh +++ b/update.sh @@ -22,3 +22,5 @@ case "${OS_NAME}" in esac python scripts/preload_models.py + +