mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
🚧 post-rebase repair
This commit is contained in:
parent
adaa1c7c3e
commit
494936a8d2
4
.github/workflows/test-invoke-conda.yml
vendored
4
.github/workflows/test-invoke-conda.yml
vendored
@ -86,14 +86,14 @@ jobs:
|
|||||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: run preload_models.py
|
- name: run configure_invokeai.py
|
||||||
id: run-preload-models
|
id: run-preload-models
|
||||||
run: |
|
run: |
|
||||||
if [ "${HAVE_SECRETS}" == true ] ; then
|
if [ "${HAVE_SECRETS}" == true ] ; then
|
||||||
mkdir -p ~/.huggingface
|
mkdir -p ~/.huggingface
|
||||||
echo -n '${{ secrets.HUGGINGFACE_TOKEN }}' > ~/.huggingface/token
|
echo -n '${{ secrets.HUGGINGFACE_TOKEN }}' > ~/.huggingface/token
|
||||||
fi
|
fi
|
||||||
python scripts/preload_models.py \
|
python scripts/configure_invokeai.py \
|
||||||
--no-interactive --yes \
|
--no-interactive --yes \
|
||||||
--full-precision # can't use fp16 weights without a GPU
|
--full-precision # can't use fp16 weights without a GPU
|
||||||
|
|
||||||
|
@ -1,33 +1,31 @@
|
|||||||
import eventlet
|
import base64
|
||||||
import glob
|
import glob
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
import mimetypes
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import mimetypes
|
|
||||||
import traceback
|
import traceback
|
||||||
import math
|
from threading import Event
|
||||||
import io
|
from uuid import uuid4
|
||||||
import base64
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
|
|
||||||
from werkzeug.utils import secure_filename
|
import eventlet
|
||||||
|
from PIL import Image
|
||||||
|
from PIL.Image import Image as ImageType
|
||||||
from flask import Flask, redirect, send_from_directory, request, make_response
|
from flask import Flask, redirect, send_from_directory, request, make_response
|
||||||
from flask_socketio import SocketIO
|
from flask_socketio import SocketIO
|
||||||
from PIL import Image, ImageOps
|
from werkzeug.utils import secure_filename
|
||||||
from PIL.Image import Image as ImageType
|
|
||||||
from uuid import uuid4
|
|
||||||
from threading import Event
|
|
||||||
|
|
||||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
|
||||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
|
||||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
|
||||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
|
||||||
from ldm.invoke.generator.inpaint import infill_methods
|
|
||||||
|
|
||||||
from backend.modules.parameters import parameters_to_command
|
|
||||||
from backend.modules.get_canvas_generation_mode import (
|
from backend.modules.get_canvas_generation_mode import (
|
||||||
get_canvas_generation_mode,
|
get_canvas_generation_mode,
|
||||||
)
|
)
|
||||||
|
from backend.modules.parameters import parameters_to_command
|
||||||
|
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||||
|
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||||
|
from ldm.invoke.generator.inpaint import infill_methods
|
||||||
|
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||||
|
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||||
|
|
||||||
# Loading Arguments
|
# Loading Arguments
|
||||||
opt = Args()
|
opt = Args()
|
||||||
|
@ -40,15 +40,6 @@ dependencies:
|
|||||||
- torch-fidelity==0.3.0
|
- torch-fidelity==0.3.0
|
||||||
- torchmetrics==0.7.0
|
- torchmetrics==0.7.0
|
||||||
- transformers==4.21.3
|
- transformers==4.21.3
|
||||||
- diffusers~=0.7
|
|
||||||
- torchmetrics==0.7.0
|
|
||||||
- flask==2.1.3
|
|
||||||
- flask_socketio==5.3.0
|
|
||||||
- flask_cors==3.0.10
|
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- eventlet
|
|
||||||
- getpass_asterisk
|
|
||||||
- kornia==0.6.0
|
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
|
@ -9,9 +9,10 @@ import os.path as osp
|
|||||||
import random
|
import random
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from PIL import Image, ImageFilter
|
from PIL import Image, ImageFilter, ImageChops
|
||||||
from diffusers import DiffusionPipeline
|
from diffusers import DiffusionPipeline
|
||||||
from einops import rearrange
|
from einops import rearrange
|
||||||
from pytorch_lightning import seed_everything
|
from pytorch_lightning import seed_everything
|
||||||
@ -169,7 +170,7 @@ class Generator:
|
|||||||
# Blur the mask out (into init image) by specified amount
|
# Blur the mask out (into init image) by specified amount
|
||||||
if mask_blur_radius > 0:
|
if mask_blur_radius > 0:
|
||||||
nm = np.asarray(pil_init_mask, dtype=np.uint8)
|
nm = np.asarray(pil_init_mask, dtype=np.uint8)
|
||||||
nmd = cv.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2))
|
nmd = cv2.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2))
|
||||||
pmd = Image.fromarray(nmd, mode='L')
|
pmd = Image.fromarray(nmd, mode='L')
|
||||||
blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius))
|
blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius))
|
||||||
else:
|
else:
|
||||||
@ -181,8 +182,6 @@ class Generator:
|
|||||||
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
||||||
return matched_result
|
return matched_result
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def sample_to_lowres_estimated_image(self,samples):
|
def sample_to_lowres_estimated_image(self,samples):
|
||||||
# origingally adapted from code by @erucipe and @keturn here:
|
# origingally adapted from code by @erucipe and @keturn here:
|
||||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||||
|
@ -21,9 +21,6 @@ from typing import Union
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
import textwrap
|
|
||||||
import contextlib
|
|
||||||
from typing import Union
|
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
from omegaconf.errors import ConfigAttributeError
|
from omegaconf.errors import ConfigAttributeError
|
||||||
from picklescan.scanner import scan_file_path
|
from picklescan.scanner import scan_file_path
|
||||||
@ -99,7 +96,7 @@ class ModelCache(object):
|
|||||||
assert self.current_model,'** FATAL: no current model to restore to'
|
assert self.current_model,'** FATAL: no current model to restore to'
|
||||||
print(f'** restoring {self.current_model}')
|
print(f'** restoring {self.current_model}')
|
||||||
self.get_model(self.current_model)
|
self.get_model(self.current_model)
|
||||||
return None
|
return
|
||||||
|
|
||||||
self.current_model = model_name
|
self.current_model = model_name
|
||||||
self._push_newest_model(model_name)
|
self._push_newest_model(model_name)
|
||||||
@ -219,7 +216,7 @@ class ModelCache(object):
|
|||||||
if model_format == 'ckpt':
|
if model_format == 'ckpt':
|
||||||
weights = mconfig.weights
|
weights = mconfig.weights
|
||||||
print(f'>> Loading {model_name} from {weights}')
|
print(f'>> Loading {model_name} from {weights}')
|
||||||
model, width, height, model_hash = self._load_ckpt_model(mconfig)
|
model, width, height, model_hash = self._load_ckpt_model(model_name, mconfig)
|
||||||
elif model_format == 'diffusers':
|
elif model_format == 'diffusers':
|
||||||
model, width, height, model_hash = self._load_diffusers_model(mconfig)
|
model, width, height, model_hash = self._load_diffusers_model(mconfig)
|
||||||
else:
|
else:
|
||||||
@ -237,10 +234,10 @@ class ModelCache(object):
|
|||||||
)
|
)
|
||||||
return model, width, height, model_hash
|
return model, width, height, model_hash
|
||||||
|
|
||||||
def _load_ckpt_model(self, mconfig):
|
def _load_ckpt_model(self, model_name, mconfig):
|
||||||
config = mconfig.config
|
config = mconfig.config
|
||||||
weights = mconfig.weights
|
weights = mconfig.weights
|
||||||
vae = mconfig.get('vae', None)
|
vae = mconfig.get('vae')
|
||||||
width = mconfig.width
|
width = mconfig.width
|
||||||
height = mconfig.height
|
height = mconfig.height
|
||||||
|
|
||||||
@ -249,9 +246,21 @@ class ModelCache(object):
|
|||||||
if not os.path.isabs(weights):
|
if not os.path.isabs(weights):
|
||||||
weights = os.path.normpath(os.path.join(Globals.root,weights))
|
weights = os.path.normpath(os.path.join(Globals.root,weights))
|
||||||
# scan model
|
# scan model
|
||||||
self._scan_model(model_name, weights)
|
self.scan_model(model_name, weights)
|
||||||
|
|
||||||
c = OmegaConf.load(config)
|
print(f'>> Loading {model_name} from {weights}')
|
||||||
|
|
||||||
|
# for usage statistics
|
||||||
|
if self._has_cuda():
|
||||||
|
torch.cuda.reset_peak_memory_stats()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
tic = time.time()
|
||||||
|
|
||||||
|
# this does the work
|
||||||
|
if not os.path.isabs(config):
|
||||||
|
config = os.path.join(Globals.root,config)
|
||||||
|
omega_config = OmegaConf.load(config)
|
||||||
with open(weights,'rb') as f:
|
with open(weights,'rb') as f:
|
||||||
weight_bytes = f.read()
|
weight_bytes = f.read()
|
||||||
model_hash = self._cached_sha256(weights, weight_bytes)
|
model_hash = self._cached_sha256(weights, weight_bytes)
|
||||||
@ -289,6 +298,18 @@ class ModelCache(object):
|
|||||||
if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
|
if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
|
||||||
module._orig_padding_mode = module.padding_mode
|
module._orig_padding_mode = module.padding_mode
|
||||||
|
|
||||||
|
# usage statistics
|
||||||
|
toc = time.time()
|
||||||
|
print(f'>> Model loaded in', '%4.2fs' % (toc - tic))
|
||||||
|
|
||||||
|
if self._has_cuda():
|
||||||
|
print(
|
||||||
|
'>> Max VRAM used to load the model:',
|
||||||
|
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||||
|
'\n>> Current VRAM usage:'
|
||||||
|
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
|
||||||
|
)
|
||||||
|
|
||||||
return model, width, height, model_hash
|
return model, width, height, model_hash
|
||||||
|
|
||||||
def _load_diffusers_model(self, mconfig):
|
def _load_diffusers_model(self, mconfig):
|
||||||
@ -308,6 +329,8 @@ class ModelCache(object):
|
|||||||
|
|
||||||
print(f'>> Loading diffusers model from {name_or_path}')
|
print(f'>> Loading diffusers model from {name_or_path}')
|
||||||
|
|
||||||
|
# TODO: scan weights maybe?
|
||||||
|
|
||||||
if self.precision == 'float16':
|
if self.precision == 'float16':
|
||||||
print(' | Using faster float16 precision')
|
print(' | Using faster float16 precision')
|
||||||
pipeline_args.update(revision="fp16", torch_dtype=torch.float16)
|
pipeline_args.update(revision="fp16", torch_dtype=torch.float16)
|
||||||
@ -342,7 +365,7 @@ class ModelCache(object):
|
|||||||
else:
|
else:
|
||||||
raise ValueError("Model config must specify either repo_name or path.")
|
raise ValueError("Model config must specify either repo_name or path.")
|
||||||
|
|
||||||
def offload_model(self, model_name:str):
|
def offload_model(self, model_name:str) -> None:
|
||||||
'''
|
'''
|
||||||
Offload the indicated model to CPU. Will call
|
Offload the indicated model to CPU. Will call
|
||||||
_make_cache_room() to free space if needed.
|
_make_cache_room() to free space if needed.
|
||||||
|
@ -34,6 +34,12 @@ warnings.filterwarnings('ignore')
|
|||||||
import torch
|
import torch
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
|
|
||||||
|
try:
|
||||||
|
from ldm.invoke.model_cache import ModelCache
|
||||||
|
except ImportError:
|
||||||
|
sys.path.append('.')
|
||||||
|
from ldm.invoke.model_cache import ModelCache
|
||||||
|
|
||||||
#--------------------------globals-----------------------
|
#--------------------------globals-----------------------
|
||||||
Model_dir = 'models'
|
Model_dir = 'models'
|
||||||
Weights_dir = 'ldm/stable-diffusion-v1/'
|
Weights_dir = 'ldm/stable-diffusion-v1/'
|
||||||
@ -267,6 +273,19 @@ def download_weight_datasets(models:dict, access_token:str):
|
|||||||
print(f'Successfully installed {keys}')
|
print(f'Successfully installed {keys}')
|
||||||
return successful
|
return successful
|
||||||
|
|
||||||
|
#---------------------------------------------
|
||||||
|
def is_huggingface_authenticated():
|
||||||
|
# huggingface_hub 0.10 API isn't great for this, it could be OSError, ValueError,
|
||||||
|
# maybe other things, not all end-user-friendly.
|
||||||
|
# noinspection PyBroadException
|
||||||
|
try:
|
||||||
|
response = hf_whoami()
|
||||||
|
if response.get('id') is not None:
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
|
|
||||||
#---------------------------------------------
|
#---------------------------------------------
|
||||||
def hf_download_with_resume(repo_id:str, model_dir:str, model_name:str, access_token:str=None)->bool:
|
def hf_download_with_resume(repo_id:str, model_dir:str, model_name:str, access_token:str=None)->bool:
|
||||||
model_dest = os.path.join(model_dir, model_name)
|
model_dest = os.path.join(model_dir, model_name)
|
||||||
@ -749,6 +768,12 @@ def main():
|
|||||||
action=argparse.BooleanOptionalAction,
|
action=argparse.BooleanOptionalAction,
|
||||||
default=True,
|
default=True,
|
||||||
help='run in interactive mode (default)')
|
help='run in interactive mode (default)')
|
||||||
|
parser.add_argument('--full-precision',
|
||||||
|
dest='full_precision',
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help='use 32-bit weights instead of faster 16-bit weights')
|
||||||
parser.add_argument('--yes','-y',
|
parser.add_argument('--yes','-y',
|
||||||
dest='yes_to_all',
|
dest='yes_to_all',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
|
Loading…
Reference in New Issue
Block a user