Merge branch 'main' into req-xformers

This commit is contained in:
Lincoln Stein 2023-02-01 10:29:49 -05:00 committed by GitHub
commit 3e98b50b62
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 24 additions and 1 deletions

View File

@ -21,6 +21,7 @@ env:
jobs:
docker:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:

View File

@ -8,6 +8,7 @@ on:
jobs:
docker:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:

View File

@ -14,6 +14,7 @@ defaults:
jobs:
lint-frontend:
if: github.event.pull_request.draft == false
runs-on: ubuntu-22.04
steps:
- name: Setup Node 18

View File

@ -7,6 +7,7 @@ on:
jobs:
mkdocs-material:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: checkout sources

View File

@ -9,6 +9,7 @@ on:
jobs:
pyflakes:
name: runner / pyflakes
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2

View File

@ -20,6 +20,7 @@ import torch
import transformers
from PIL import Image, ImageOps
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything, logging
@ -203,6 +204,14 @@ class Generate:
self.precision = choose_precision(self.device)
Globals.full_precision = self.precision=='float32'
if is_xformers_available():
if not Globals.disable_xformers:
print('>> xformers memory-efficient attention is available and enabled')
else:
print('>> xformers memory-efficient attention is available but disabled')
else:
print('>> xformers not installed')
# model caching system for fast switching
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
# don't accept invalid models

View File

@ -597,11 +597,20 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps)
return self.check_for_safety(output, dtype=conditioning_data.dtype)
def non_noised_latents_from_image(self, init_image, *, device, dtype):
def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype):
init_image = init_image.to(device=device, dtype=dtype)
with torch.inference_mode():
if device.type == 'mps':
# workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222
# TODO remove this workaround once kulinseth#222 is merged to pytorch mainline
self.vae.to('cpu')
init_image = init_image.to('cpu')
init_latent_dist = self.vae.encode(init_image).latent_dist
init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
if device.type == 'mps':
self.vae.to(device)
init_latents = init_latents.to(device)
init_latents = 0.18215 * init_latents
return init_latents