mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
🚧 post-rebase repair
This commit is contained in:
parent
adaa1c7c3e
commit
494936a8d2
4
.github/workflows/test-invoke-conda.yml
vendored
4
.github/workflows/test-invoke-conda.yml
vendored
@ -86,14 +86,14 @@ jobs:
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: run preload_models.py
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
if [ "${HAVE_SECRETS}" == true ] ; then
|
||||
mkdir -p ~/.huggingface
|
||||
echo -n '${{ secrets.HUGGINGFACE_TOKEN }}' > ~/.huggingface/token
|
||||
fi
|
||||
python scripts/preload_models.py \
|
||||
python scripts/configure_invokeai.py \
|
||||
--no-interactive --yes \
|
||||
--full-precision # can't use fp16 weights without a GPU
|
||||
|
||||
|
@ -1,33 +1,31 @@
|
||||
import eventlet
|
||||
import base64
|
||||
import glob
|
||||
import io
|
||||
import json
|
||||
import math
|
||||
import mimetypes
|
||||
import os
|
||||
import shutil
|
||||
import mimetypes
|
||||
import traceback
|
||||
import math
|
||||
import io
|
||||
import base64
|
||||
import os
|
||||
import json
|
||||
from threading import Event
|
||||
from uuid import uuid4
|
||||
|
||||
from werkzeug.utils import secure_filename
|
||||
import eventlet
|
||||
from PIL import Image
|
||||
from PIL.Image import Image as ImageType
|
||||
from flask import Flask, redirect, send_from_directory, request, make_response
|
||||
from flask_socketio import SocketIO
|
||||
from PIL import Image, ImageOps
|
||||
from PIL.Image import Image as ImageType
|
||||
from uuid import uuid4
|
||||
from threading import Event
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
|
||||
from backend.modules.parameters import parameters_to_command
|
||||
from backend.modules.get_canvas_generation_mode import (
|
||||
get_canvas_generation_mode,
|
||||
)
|
||||
from backend.modules.parameters import parameters_to_command
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||
|
||||
# Loading Arguments
|
||||
opt = Args()
|
||||
|
@ -40,15 +40,6 @@ dependencies:
|
||||
- torch-fidelity==0.3.0
|
||||
- torchmetrics==0.7.0
|
||||
- transformers==4.21.3
|
||||
- diffusers~=0.7
|
||||
- torchmetrics==0.7.0
|
||||
- flask==2.1.3
|
||||
- flask_socketio==5.3.0
|
||||
- flask_cors==3.0.10
|
||||
- dependency_injector==4.40.0
|
||||
- eventlet
|
||||
- getpass_asterisk
|
||||
- kornia==0.6.0
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
|
@ -9,9 +9,10 @@ import os.path as osp
|
||||
import random
|
||||
import traceback
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image, ImageFilter
|
||||
from PIL import Image, ImageFilter, ImageChops
|
||||
from diffusers import DiffusionPipeline
|
||||
from einops import rearrange
|
||||
from pytorch_lightning import seed_everything
|
||||
@ -169,7 +170,7 @@ class Generator:
|
||||
# Blur the mask out (into init image) by specified amount
|
||||
if mask_blur_radius > 0:
|
||||
nm = np.asarray(pil_init_mask, dtype=np.uint8)
|
||||
nmd = cv.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2))
|
||||
nmd = cv2.erode(nm, kernel=np.ones((3,3), dtype=np.uint8), iterations=int(mask_blur_radius / 2))
|
||||
pmd = Image.fromarray(nmd, mode='L')
|
||||
blurred_init_mask = pmd.filter(ImageFilter.BoxBlur(mask_blur_radius))
|
||||
else:
|
||||
@ -181,8 +182,6 @@ class Generator:
|
||||
matched_result.paste(init_image, (0,0), mask = multiplied_blurred_init_mask)
|
||||
return matched_result
|
||||
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(self,samples):
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||
|
@ -21,9 +21,6 @@ from typing import Union
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
import textwrap
|
||||
import contextlib
|
||||
from typing import Union
|
||||
from omegaconf import OmegaConf
|
||||
from omegaconf.errors import ConfigAttributeError
|
||||
from picklescan.scanner import scan_file_path
|
||||
@ -99,7 +96,7 @@ class ModelCache(object):
|
||||
assert self.current_model,'** FATAL: no current model to restore to'
|
||||
print(f'** restoring {self.current_model}')
|
||||
self.get_model(self.current_model)
|
||||
return None
|
||||
return
|
||||
|
||||
self.current_model = model_name
|
||||
self._push_newest_model(model_name)
|
||||
@ -219,7 +216,7 @@ class ModelCache(object):
|
||||
if model_format == 'ckpt':
|
||||
weights = mconfig.weights
|
||||
print(f'>> Loading {model_name} from {weights}')
|
||||
model, width, height, model_hash = self._load_ckpt_model(mconfig)
|
||||
model, width, height, model_hash = self._load_ckpt_model(model_name, mconfig)
|
||||
elif model_format == 'diffusers':
|
||||
model, width, height, model_hash = self._load_diffusers_model(mconfig)
|
||||
else:
|
||||
@ -237,10 +234,10 @@ class ModelCache(object):
|
||||
)
|
||||
return model, width, height, model_hash
|
||||
|
||||
def _load_ckpt_model(self, mconfig):
|
||||
def _load_ckpt_model(self, model_name, mconfig):
|
||||
config = mconfig.config
|
||||
weights = mconfig.weights
|
||||
vae = mconfig.get('vae', None)
|
||||
vae = mconfig.get('vae')
|
||||
width = mconfig.width
|
||||
height = mconfig.height
|
||||
|
||||
@ -249,10 +246,22 @@ class ModelCache(object):
|
||||
if not os.path.isabs(weights):
|
||||
weights = os.path.normpath(os.path.join(Globals.root,weights))
|
||||
# scan model
|
||||
self._scan_model(model_name, weights)
|
||||
self.scan_model(model_name, weights)
|
||||
|
||||
c = OmegaConf.load(config)
|
||||
with open(weights, 'rb') as f:
|
||||
print(f'>> Loading {model_name} from {weights}')
|
||||
|
||||
# for usage statistics
|
||||
if self._has_cuda():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
tic = time.time()
|
||||
|
||||
# this does the work
|
||||
if not os.path.isabs(config):
|
||||
config = os.path.join(Globals.root,config)
|
||||
omega_config = OmegaConf.load(config)
|
||||
with open(weights,'rb') as f:
|
||||
weight_bytes = f.read()
|
||||
model_hash = self._cached_sha256(weights, weight_bytes)
|
||||
sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu')
|
||||
@ -289,6 +298,18 @@ class ModelCache(object):
|
||||
if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
|
||||
module._orig_padding_mode = module.padding_mode
|
||||
|
||||
# usage statistics
|
||||
toc = time.time()
|
||||
print(f'>> Model loaded in', '%4.2fs' % (toc - tic))
|
||||
|
||||
if self._has_cuda():
|
||||
print(
|
||||
'>> Max VRAM used to load the model:',
|
||||
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||
'\n>> Current VRAM usage:'
|
||||
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
|
||||
)
|
||||
|
||||
return model, width, height, model_hash
|
||||
|
||||
def _load_diffusers_model(self, mconfig):
|
||||
@ -308,6 +329,8 @@ class ModelCache(object):
|
||||
|
||||
print(f'>> Loading diffusers model from {name_or_path}')
|
||||
|
||||
# TODO: scan weights maybe?
|
||||
|
||||
if self.precision == 'float16':
|
||||
print(' | Using faster float16 precision')
|
||||
pipeline_args.update(revision="fp16", torch_dtype=torch.float16)
|
||||
@ -342,7 +365,7 @@ class ModelCache(object):
|
||||
else:
|
||||
raise ValueError("Model config must specify either repo_name or path.")
|
||||
|
||||
def offload_model(self, model_name:str):
|
||||
def offload_model(self, model_name:str) -> None:
|
||||
'''
|
||||
Offload the indicated model to CPU. Will call
|
||||
_make_cache_room() to free space if needed.
|
||||
|
@ -34,6 +34,12 @@ warnings.filterwarnings('ignore')
|
||||
import torch
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
try:
|
||||
from ldm.invoke.model_cache import ModelCache
|
||||
except ImportError:
|
||||
sys.path.append('.')
|
||||
from ldm.invoke.model_cache import ModelCache
|
||||
|
||||
#--------------------------globals-----------------------
|
||||
Model_dir = 'models'
|
||||
Weights_dir = 'ldm/stable-diffusion-v1/'
|
||||
@ -267,6 +273,19 @@ def download_weight_datasets(models:dict, access_token:str):
|
||||
print(f'Successfully installed {keys}')
|
||||
return successful
|
||||
|
||||
#---------------------------------------------
|
||||
def is_huggingface_authenticated():
|
||||
# huggingface_hub 0.10 API isn't great for this, it could be OSError, ValueError,
|
||||
# maybe other things, not all end-user-friendly.
|
||||
# noinspection PyBroadException
|
||||
try:
|
||||
response = hf_whoami()
|
||||
if response.get('id') is not None:
|
||||
return True
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
#---------------------------------------------
|
||||
def hf_download_with_resume(repo_id:str, model_dir:str, model_name:str, access_token:str=None)->bool:
|
||||
model_dest = os.path.join(model_dir, model_name)
|
||||
@ -749,6 +768,12 @@ def main():
|
||||
action=argparse.BooleanOptionalAction,
|
||||
default=True,
|
||||
help='run in interactive mode (default)')
|
||||
parser.add_argument('--full-precision',
|
||||
dest='full_precision',
|
||||
action=argparse.BooleanOptionalAction,
|
||||
type=bool,
|
||||
default=False,
|
||||
help='use 32-bit weights instead of faster 16-bit weights')
|
||||
parser.add_argument('--yes','-y',
|
||||
dest='yes_to_all',
|
||||
action='store_true',
|
||||
|
Loading…
Reference in New Issue
Block a user