mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into req-xformers
This commit is contained in:
commit
3e98b50b62
1
.github/workflows/build-cloud-img.yml
vendored
1
.github/workflows/build-cloud-img.yml
vendored
@ -21,6 +21,7 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
1
.github/workflows/build-container.yml
vendored
1
.github/workflows/build-container.yml
vendored
@ -8,6 +8,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
1
.github/workflows/lint-frontend.yml
vendored
1
.github/workflows/lint-frontend.yml
vendored
@ -14,6 +14,7 @@ defaults:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-frontend:
|
lint-frontend:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Node 18
|
- name: Setup Node 18
|
||||||
|
1
.github/workflows/mkdocs-material.yml
vendored
1
.github/workflows/mkdocs-material.yml
vendored
@ -7,6 +7,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: checkout sources
|
- name: checkout sources
|
||||||
|
1
.github/workflows/pyflakes.yml
vendored
1
.github/workflows/pyflakes.yml
vendored
@ -9,6 +9,7 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
pyflakes:
|
pyflakes:
|
||||||
name: runner / pyflakes
|
name: runner / pyflakes
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
@ -20,6 +20,7 @@ import torch
|
|||||||
import transformers
|
import transformers
|
||||||
from PIL import Image, ImageOps
|
from PIL import Image, ImageOps
|
||||||
from diffusers.pipeline_utils import DiffusionPipeline
|
from diffusers.pipeline_utils import DiffusionPipeline
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
from pytorch_lightning import seed_everything, logging
|
from pytorch_lightning import seed_everything, logging
|
||||||
|
|
||||||
@ -203,6 +204,14 @@ class Generate:
|
|||||||
self.precision = choose_precision(self.device)
|
self.precision = choose_precision(self.device)
|
||||||
Globals.full_precision = self.precision=='float32'
|
Globals.full_precision = self.precision=='float32'
|
||||||
|
|
||||||
|
if is_xformers_available():
|
||||||
|
if not Globals.disable_xformers:
|
||||||
|
print('>> xformers memory-efficient attention is available and enabled')
|
||||||
|
else:
|
||||||
|
print('>> xformers memory-efficient attention is available but disabled')
|
||||||
|
else:
|
||||||
|
print('>> xformers not installed')
|
||||||
|
|
||||||
# model caching system for fast switching
|
# model caching system for fast switching
|
||||||
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
|
self.model_manager = ModelManager(mconfig,self.device,self.precision,max_loaded_models=max_loaded_models)
|
||||||
# don't accept invalid models
|
# don't accept invalid models
|
||||||
|
@ -597,11 +597,20 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps)
|
output = InvokeAIStableDiffusionPipelineOutput(images=image, nsfw_content_detected=[], attention_map_saver=result_attention_maps)
|
||||||
return self.check_for_safety(output, dtype=conditioning_data.dtype)
|
return self.check_for_safety(output, dtype=conditioning_data.dtype)
|
||||||
|
|
||||||
def non_noised_latents_from_image(self, init_image, *, device, dtype):
|
def non_noised_latents_from_image(self, init_image, *, device: torch.device, dtype):
|
||||||
init_image = init_image.to(device=device, dtype=dtype)
|
init_image = init_image.to(device=device, dtype=dtype)
|
||||||
with torch.inference_mode():
|
with torch.inference_mode():
|
||||||
|
if device.type == 'mps':
|
||||||
|
# workaround for torch MPS bug that has been fixed in https://github.com/kulinseth/pytorch/pull/222
|
||||||
|
# TODO remove this workaround once kulinseth#222 is merged to pytorch mainline
|
||||||
|
self.vae.to('cpu')
|
||||||
|
init_image = init_image.to('cpu')
|
||||||
init_latent_dist = self.vae.encode(init_image).latent_dist
|
init_latent_dist = self.vae.encode(init_image).latent_dist
|
||||||
init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
|
init_latents = init_latent_dist.sample().to(dtype=dtype) # FIXME: uses torch.randn. make reproducible!
|
||||||
|
if device.type == 'mps':
|
||||||
|
self.vae.to(device)
|
||||||
|
init_latents = init_latents.to(device)
|
||||||
|
|
||||||
init_latents = 0.18215 * init_latents
|
init_latents = 0.18215 * init_latents
|
||||||
return init_latents
|
return init_latents
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user