Merge branch 'main' into dev/diffusers

This commit is contained in:
Kevin Turner 2022-12-07 11:18:44 -08:00 committed by GitHub
commit 0390e6740d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 52 additions and 20 deletions

View File

@ -4,9 +4,12 @@ on:
push:
branches:
- main
- development
tags:
- v*
# we will NOT push the image on pull requests, only test buildability.
pull_request:
branches:
- main
permissions:
contents: read
@ -21,36 +24,56 @@ jobs:
strategy:
fail-fast: false
matrix:
# only x86_64 for now. aarch64+cuda isn't really a thing yet
arch:
- x86_64
# requires resolving a patchmatch issue
# - aarch64
runs-on: ubuntu-latest
name: ${{ matrix.arch }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: matrix.arch == 'aarch64'
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# see https://github.com/docker/metadata-action
# will push the following tags:
# :edge
# :main (+ any other branches enabled in the workflow)
# :<tag>
# :1.2.3 (for semver tags)
# :1.2 (for semver tags)
# :<sha>
tags: |
type=edge,branch=main
type=ref,event=branch
type=ref,event=tag
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
# suffix image tags with architecture
flavor: |
latest=auto
suffix=-${{ matrix.arch }},latest=true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# - if: github.event_name != 'pull_request'
# name: Docker login
# uses: docker/login-action@v2
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GITHUB_TOKEN }}
# do not login to container registry on PRs
- if: github.event_name != 'pull_request'
name: Docker login
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push cloud image
uses: docker/build-push-action@v3
@ -58,7 +81,7 @@ jobs:
context: .
file: docker-build/Dockerfile.cloud
platforms: Linux/${{ matrix.arch }}
# push: ${{ github.event_name != 'pull_request' }}
push: false
# do not push the image on PRs
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -2,12 +2,10 @@
title: Running InvokeAI on Google Colab using a Jupyter Notebook
---
# THIS DOCUMENTATION IS UNFINISHED - VOLUNTEERS GRATEFULLY ACCEPTED
## Introduction
We have a [Jupyter
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
with cell-by-cell installation steps. It will download the code in
this repo as one of the steps, so instead of cloning this repo, simply
download the notebook from the link above and load it up in VSCode
@ -16,10 +14,19 @@ start running the cells one-by-one.
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
## Walkthrough
## Running Online On Google Colabotary
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
## Updating to newer versions
## Running Locally (Cloning)
### Updating the stable version
1. Install the Jupyter Notebook python library (one-time):
pip install jupyter
## Troubleshooting
2. Clone the InvokeAI repository:
git clone https://github.com/invoke-ai/InvokeAI.git
cd invoke-ai
3. Create a virtual environment using conda:
conda create -n invoke jupyter
4. Activate the environment and start the Jupyter notebook:
conda activate invoke
jupyter notebook

View File

@ -266,7 +266,9 @@ class ModelCache(object):
model_hash = self._cached_sha256(weights, weight_bytes)
sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu')
del weight_bytes
sd = sd['state_dict']
# merged models from auto11 merge board are flat for some reason
if 'state_dict' in sd:
sd = sd['state_dict']
model = instantiate_from_config(omega_config.model)
model.load_state_dict(sd, strict=False)