2022-08-21 23:57:48 +00:00
|
|
|
#!/usr/bin/env python3
|
2022-08-24 13:22:04 +00:00
|
|
|
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
2022-08-17 16:00:00 +00:00
|
|
|
# Before running stable-diffusion on an internet-isolated machine,
|
|
|
|
# run this script from one with internet connectivity. The
|
|
|
|
# two machines must share a common .cache directory.
|
2022-10-29 05:02:45 +00:00
|
|
|
#
|
|
|
|
# Coauthor: Kevin Turner http://github.com/keturn
|
|
|
|
#
|
2022-11-01 18:34:23 +00:00
|
|
|
print('Loading Python libraries...\n')
|
2022-10-30 04:33:48 +00:00
|
|
|
import argparse
|
2022-08-22 19:33:27 +00:00
|
|
|
import sys
|
2022-08-26 05:20:01 +00:00
|
|
|
import os
|
|
|
|
import warnings
|
2022-10-30 04:17:05 +00:00
|
|
|
from urllib import request
|
|
|
|
from tqdm import tqdm
|
2022-10-29 05:02:45 +00:00
|
|
|
from omegaconf import OmegaConf
|
2022-11-02 19:26:02 +00:00
|
|
|
from huggingface_hub import HfFolder, hf_hub_url
|
2022-10-29 05:02:45 +00:00
|
|
|
from pathlib import Path
|
2022-11-02 19:26:02 +00:00
|
|
|
from getpass_asterisk import getpass_asterisk
|
2022-10-31 15:33:05 +00:00
|
|
|
import traceback
|
|
|
|
import requests
|
2022-11-01 18:34:23 +00:00
|
|
|
import clip
|
|
|
|
import transformers
|
|
|
|
import torch
|
|
|
|
transformers.logging.set_verbosity_error()
|
2022-08-22 19:33:27 +00:00
|
|
|
|
2022-11-01 16:02:22 +00:00
|
|
|
import warnings
|
|
|
|
warnings.filterwarnings('ignore')
|
|
|
|
#warnings.simplefilter('ignore')
|
|
|
|
#warnings.filterwarnings('ignore',category=DeprecationWarning)
|
|
|
|
#warnings.filterwarnings('ignore',category=UserWarning)
|
2022-08-17 16:00:00 +00:00
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
#--------------------------globals--
|
|
|
|
Model_dir = './models/ldm/stable-diffusion-v1/'
|
2022-10-31 15:33:05 +00:00
|
|
|
Default_config_file = './configs/models.yaml'
|
2022-10-29 05:02:45 +00:00
|
|
|
SD_Configs = './configs/stable-diffusion'
|
|
|
|
Datasets = {
|
|
|
|
'stable-diffusion-1.5': {
|
|
|
|
'description': 'The newest Stable Diffusion version 1.5 weight file (4.27 GB)',
|
|
|
|
'repo_id': 'runwayml/stable-diffusion-v1-5',
|
|
|
|
'config': 'v1-inference.yaml',
|
|
|
|
'file': 'v1-5-pruned-emaonly.ckpt',
|
|
|
|
'recommended': True,
|
|
|
|
'width': 512,
|
|
|
|
'height': 512,
|
|
|
|
},
|
|
|
|
'inpainting-1.5': {
|
|
|
|
'description': 'RunwayML SD 1.5 model optimized for inpainting (4.27 GB)',
|
|
|
|
'repo_id': 'runwayml/stable-diffusion-inpainting',
|
|
|
|
'config': 'v1-inpainting-inference.yaml',
|
|
|
|
'file': 'sd-v1-5-inpainting.ckpt',
|
|
|
|
'recommended': True,
|
|
|
|
'width': 512,
|
|
|
|
'height': 512,
|
|
|
|
},
|
|
|
|
'stable-diffusion-1.4': {
|
|
|
|
'description': 'The original Stable Diffusion version 1.4 weight file (4.27 GB)',
|
|
|
|
'repo_id': 'CompVis/stable-diffusion-v-1-4-original',
|
|
|
|
'config': 'v1-inference.yaml',
|
|
|
|
'file': 'sd-v1-4.ckpt',
|
|
|
|
'recommended': False,
|
|
|
|
'width': 512,
|
|
|
|
'height': 512,
|
|
|
|
},
|
|
|
|
'waifu-diffusion-1.3': {
|
|
|
|
'description': 'Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)',
|
|
|
|
'repo_id': 'hakurei/waifu-diffusion-v1-3',
|
|
|
|
'config': 'v1-inference.yaml',
|
|
|
|
'file': 'model-epoch09-float32.ckpt',
|
|
|
|
'recommended': False,
|
|
|
|
'width': 512,
|
|
|
|
'height': 512,
|
|
|
|
},
|
|
|
|
'ft-mse-improved-autoencoder-840000': {
|
|
|
|
'description': 'StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB)',
|
|
|
|
'repo_id': 'stabilityai/sd-vae-ft-mse-original',
|
|
|
|
'config': 'VAE',
|
|
|
|
'file': 'vae-ft-mse-840000-ema-pruned.ckpt',
|
|
|
|
'recommended': True,
|
|
|
|
'width': 512,
|
|
|
|
'height': 512,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
Config_preamble = '''# This file describes the alternative machine learning models
|
|
|
|
# available to InvokeAI script.
|
|
|
|
#
|
|
|
|
# To add a new model, follow the examples below. Each
|
|
|
|
# model requires a model config file, a weights file,
|
|
|
|
# and the width and height of the images it
|
|
|
|
# was trained on.
|
|
|
|
'''
|
|
|
|
|
|
|
|
#---------------------------------------------
|
|
|
|
def introduction():
|
|
|
|
print(
|
|
|
|
'''Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
|
|
|
|
and other large models that are needed for text to image generation. At any point you may interrupt
|
|
|
|
this program and resume later.\n'''
|
|
|
|
)
|
|
|
|
|
2022-10-29 13:18:48 +00:00
|
|
|
#--------------------------------------------
|
|
|
|
def postscript():
|
|
|
|
print(
|
2022-11-01 17:54:42 +00:00
|
|
|
'''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands:
|
2022-10-29 13:18:48 +00:00
|
|
|
Web version:
|
|
|
|
|
|
|
|
python scripts/invoke.py --web (connect to http://localhost:9090)
|
|
|
|
|
|
|
|
Command-line version:
|
|
|
|
|
|
|
|
python scripts/invoke.py
|
|
|
|
|
|
|
|
Have fun!
|
|
|
|
'''
|
|
|
|
)
|
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
#---------------------------------------------
|
|
|
|
def yes_or_no(prompt:str, default_yes=True):
|
|
|
|
default = "y" if default_yes else 'n'
|
|
|
|
response = input(f'{prompt} [{default}] ') or default
|
|
|
|
if default_yes:
|
|
|
|
return response[0] not in ('n','N')
|
|
|
|
else:
|
|
|
|
return response[0] in ('y','Y')
|
|
|
|
|
|
|
|
#---------------------------------------------
|
2022-10-30 04:17:05 +00:00
|
|
|
def user_wants_to_download_weights()->str:
|
|
|
|
'''
|
|
|
|
Returns one of "skip", "recommended" or "customized"
|
|
|
|
'''
|
2022-10-29 15:28:17 +00:00
|
|
|
print('''You can download and configure the weights files manually or let this
|
|
|
|
script do it for you. Manual installation is described at:
|
|
|
|
|
|
|
|
https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALLING_MODELS.md
|
|
|
|
|
2022-10-30 04:17:05 +00:00
|
|
|
You may download the recommended models (about 10GB total), select a customized set, or
|
|
|
|
completely skip this step.
|
2022-10-29 15:28:17 +00:00
|
|
|
'''
|
|
|
|
)
|
2022-10-30 04:17:05 +00:00
|
|
|
selection = None
|
|
|
|
while selection is None:
|
|
|
|
choice = input('Download <r>ecommended models, <c>ustomize the list, or <s>kip this step? [r]: ')
|
|
|
|
if choice.startswith(('r','R')) or len(choice)==0:
|
|
|
|
selection = 'recommended'
|
|
|
|
elif choice.startswith(('c','C')):
|
|
|
|
selection = 'customized'
|
|
|
|
elif choice.startswith(('s','S')):
|
|
|
|
selection = 'skip'
|
|
|
|
return selection
|
2022-10-29 05:02:45 +00:00
|
|
|
|
|
|
|
#---------------------------------------------
|
2022-10-30 04:17:05 +00:00
|
|
|
def select_datasets(action:str):
|
2022-10-29 05:02:45 +00:00
|
|
|
done = False
|
|
|
|
while not done:
|
2022-10-30 04:17:05 +00:00
|
|
|
datasets = dict()
|
|
|
|
dflt = None # the first model selected will be the default; TODO let user change
|
|
|
|
counter = 1
|
|
|
|
|
|
|
|
if action == 'customized':
|
|
|
|
print('''
|
2022-10-29 05:02:45 +00:00
|
|
|
Choose the weight file(s) you wish to download. Before downloading you
|
|
|
|
will be given the option to view and change your selections.
|
|
|
|
'''
|
|
|
|
)
|
2022-10-30 04:17:05 +00:00
|
|
|
for ds in Datasets.keys():
|
|
|
|
recommended = '(recommended)' if Datasets[ds]['recommended'] else ''
|
|
|
|
print(f'[{counter}] {ds}:\n {Datasets[ds]["description"]} {recommended}')
|
|
|
|
if yes_or_no(' Download?',default_yes=Datasets[ds]['recommended']):
|
|
|
|
datasets[ds]=counter
|
|
|
|
counter += 1
|
|
|
|
else:
|
|
|
|
for ds in Datasets.keys():
|
|
|
|
if Datasets[ds]['recommended']:
|
|
|
|
datasets[ds]=counter
|
|
|
|
counter += 1
|
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
print('The following weight files will be downloaded:')
|
|
|
|
for ds in datasets:
|
|
|
|
dflt = '*' if dflt is None else ''
|
|
|
|
print(f' [{datasets[ds]}] {ds}{dflt}')
|
|
|
|
print("*default")
|
|
|
|
ok_to_download = yes_or_no('Ok to download?')
|
|
|
|
if not ok_to_download:
|
|
|
|
if yes_or_no('Change your selection?'):
|
2022-10-30 04:17:05 +00:00
|
|
|
action = 'customized'
|
2022-10-29 05:02:45 +00:00
|
|
|
pass
|
|
|
|
else:
|
|
|
|
done = True
|
|
|
|
else:
|
|
|
|
done = True
|
|
|
|
return datasets if ok_to_download else None
|
2022-10-30 04:17:05 +00:00
|
|
|
|
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
#-------------------------------Authenticate against Hugging Face
|
|
|
|
def authenticate():
|
|
|
|
print('''
|
2022-10-29 13:18:48 +00:00
|
|
|
To download the Stable Diffusion weight files from the official Hugging Face
|
|
|
|
repository, you need to read and accept the CreativeML Responsible AI license.
|
|
|
|
|
|
|
|
This involves a few easy steps.
|
|
|
|
|
|
|
|
1. If you have not already done so, create an account on Hugging Face's web site
|
|
|
|
using the "Sign Up" button:
|
|
|
|
|
|
|
|
https://huggingface.co/join
|
|
|
|
|
|
|
|
You will need to verify your email address as part of the HuggingFace
|
|
|
|
registration process.
|
|
|
|
|
2022-10-30 04:17:05 +00:00
|
|
|
2. Log into your Hugging Face account:
|
2022-10-29 13:18:48 +00:00
|
|
|
|
|
|
|
https://huggingface.co/login
|
|
|
|
|
|
|
|
3. Accept the license terms located here:
|
|
|
|
|
2022-10-30 04:17:05 +00:00
|
|
|
https://huggingface.co/runwayml/stable-diffusion-v1-5
|
|
|
|
|
|
|
|
and here:
|
|
|
|
|
|
|
|
https://huggingface.co/runwayml/stable-diffusion-inpainting
|
|
|
|
|
|
|
|
(Yes, you have to accept two slightly different license agreements)
|
2022-10-29 05:02:45 +00:00
|
|
|
'''
|
|
|
|
)
|
|
|
|
input('Press <enter> when you are ready to continue:')
|
2022-11-02 22:35:00 +00:00
|
|
|
<<<<<<< HEAD
|
2022-10-31 15:33:05 +00:00
|
|
|
|
|
|
|
from huggingface_hub import HfFolder
|
2022-11-02 22:35:00 +00:00
|
|
|
=======
|
2022-11-02 19:26:02 +00:00
|
|
|
print('(Fetching Hugging Face token from cache...',end='')
|
2022-11-02 22:35:00 +00:00
|
|
|
>>>>>>> spezialspezial-patch-9
|
2022-10-29 05:02:45 +00:00
|
|
|
access_token = HfFolder.get_token()
|
2022-11-02 19:26:02 +00:00
|
|
|
if access_token is not None:
|
|
|
|
print('found')
|
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
if access_token is None:
|
2022-11-02 19:26:02 +00:00
|
|
|
print('not found')
|
2022-10-29 05:02:45 +00:00
|
|
|
print('''
|
2022-10-29 13:18:48 +00:00
|
|
|
4. Thank you! The last step is to enter your HuggingFace access token so that
|
|
|
|
this script is authorized to initiate the download. Go to the access tokens
|
|
|
|
page of your Hugging Face account and create a token by clicking the
|
|
|
|
"New token" button:
|
|
|
|
|
|
|
|
https://huggingface.co/settings/tokens
|
|
|
|
|
|
|
|
(You can enter anything you like in the token creation field marked "Name".
|
|
|
|
"Role" should be "read").
|
|
|
|
|
|
|
|
Now copy the token to your clipboard and paste it here: '''
|
2022-10-29 05:02:45 +00:00
|
|
|
)
|
2022-11-02 19:26:02 +00:00
|
|
|
access_token = getpass_asterisk.getpass_asterisk()
|
2022-10-29 05:02:45 +00:00
|
|
|
return access_token
|
|
|
|
|
|
|
|
#---------------------------------------------
|
|
|
|
# look for legacy model.ckpt in models directory and offer to
|
|
|
|
# normalize its name
|
|
|
|
def migrate_models_ckpt():
|
|
|
|
if not os.path.exists(os.path.join(Model_dir,'model.ckpt')):
|
|
|
|
return
|
|
|
|
new_name = Datasets['stable-diffusion-1.4']['file']
|
|
|
|
print('You seem to have the Stable Diffusion v4.1 "model.ckpt" already installed.')
|
|
|
|
rename = yes_or_no(f'Ok to rename it to "{new_name}" for future reference?')
|
|
|
|
if rename:
|
|
|
|
print(f'model.ckpt => {new_name}')
|
|
|
|
os.rename(os.path.join(Model_dir,'model.ckpt'),os.path.join(Model_dir,new_name))
|
|
|
|
|
|
|
|
#---------------------------------------------
|
|
|
|
def download_weight_datasets(models:dict, access_token:str):
|
|
|
|
migrate_models_ckpt()
|
|
|
|
successful = dict()
|
|
|
|
for mod in models.keys():
|
|
|
|
repo_id = Datasets[mod]['repo_id']
|
|
|
|
filename = Datasets[mod]['file']
|
2022-10-30 04:17:05 +00:00
|
|
|
success = download_with_resume(
|
2022-10-29 05:02:45 +00:00
|
|
|
repo_id=repo_id,
|
|
|
|
model_name=filename,
|
|
|
|
access_token=access_token
|
|
|
|
)
|
|
|
|
if success:
|
|
|
|
successful[mod] = True
|
2022-11-01 16:02:22 +00:00
|
|
|
if len(successful) < len(models):
|
2022-11-02 19:26:02 +00:00
|
|
|
print(f'\n\n** There were errors downloading one or more files. **')
|
|
|
|
print('Please double-check your license agreements, and your access token.')
|
|
|
|
HfFolder.delete_token()
|
|
|
|
print('Press any key to try again. Type ^C to quit.\n')
|
|
|
|
input()
|
2022-11-01 16:02:22 +00:00
|
|
|
return None
|
|
|
|
|
|
|
|
HfFolder.save_token(access_token)
|
2022-10-29 05:02:45 +00:00
|
|
|
keys = ', '.join(successful.keys())
|
|
|
|
print(f'Successfully installed {keys}')
|
|
|
|
return successful
|
|
|
|
|
|
|
|
#---------------------------------------------
|
2022-10-30 04:17:05 +00:00
|
|
|
def download_with_resume(repo_id:str, model_name:str, access_token:str)->bool:
|
2022-11-02 22:35:00 +00:00
|
|
|
<<<<<<< HEAD
|
2022-10-31 15:33:05 +00:00
|
|
|
from huggingface_hub import hf_hub_url
|
2022-10-30 04:17:05 +00:00
|
|
|
|
2022-11-02 22:35:00 +00:00
|
|
|
=======
|
|
|
|
>>>>>>> spezialspezial-patch-9
|
2022-10-29 05:02:45 +00:00
|
|
|
model_dest = os.path.join(Model_dir, model_name)
|
2022-10-30 04:17:05 +00:00
|
|
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
|
|
|
url = hf_hub_url(repo_id, model_name)
|
|
|
|
|
|
|
|
header = {"Authorization": f'Bearer {access_token}'}
|
|
|
|
open_mode = 'wb'
|
|
|
|
exist_size = 0
|
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
if os.path.exists(model_dest):
|
2022-10-30 04:17:05 +00:00
|
|
|
exist_size = os.path.getsize(model_dest)
|
|
|
|
header['Range'] = f'bytes={exist_size}-'
|
|
|
|
open_mode = 'ab'
|
|
|
|
|
|
|
|
resp = requests.get(url, headers=header, stream=True)
|
|
|
|
total = int(resp.headers.get('content-length', 0))
|
|
|
|
|
|
|
|
if resp.status_code==416: # "range not satisfiable", which means nothing to return
|
|
|
|
print(f'* {model_name}: complete file found. Skipping.')
|
2022-10-29 05:02:45 +00:00
|
|
|
return True
|
2022-11-01 16:02:22 +00:00
|
|
|
elif resp.status_code != 200:
|
|
|
|
print(f'** An error occurred during downloading {model_name}: {resp.reason}')
|
2022-10-30 04:17:05 +00:00
|
|
|
elif exist_size > 0:
|
|
|
|
print(f'* {model_name}: partial file found. Resuming...')
|
|
|
|
else:
|
|
|
|
print(f'* {model_name}: Downloading...')
|
2022-10-29 05:02:45 +00:00
|
|
|
|
|
|
|
try:
|
2022-10-30 04:17:05 +00:00
|
|
|
if total < 2000:
|
2022-11-01 16:02:22 +00:00
|
|
|
print(f'*** ERROR DOWNLOADING {model_name}: {resp.text}')
|
2022-10-30 04:17:05 +00:00
|
|
|
return False
|
|
|
|
|
|
|
|
with open(model_dest, open_mode) as file, tqdm(
|
|
|
|
desc=model_name,
|
|
|
|
initial=exist_size,
|
|
|
|
total=total+exist_size,
|
|
|
|
unit='iB',
|
|
|
|
unit_scale=True,
|
|
|
|
unit_divisor=1000,
|
|
|
|
) as bar:
|
|
|
|
for data in resp.iter_content(chunk_size=1024):
|
|
|
|
size = file.write(data)
|
|
|
|
bar.update(size)
|
2022-10-29 05:02:45 +00:00
|
|
|
except Exception as e:
|
2022-10-30 04:17:05 +00:00
|
|
|
print(f'An error occurred while downloading {model_name}: {str(e)}')
|
2022-10-29 05:02:45 +00:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
#---------------------------------------------
|
2022-10-31 15:33:05 +00:00
|
|
|
def update_config_file(successfully_downloaded:dict,opt:dict):
|
|
|
|
Config_file = opt.config_file or Default_config_file
|
|
|
|
|
|
|
|
yaml = new_config_file_contents(successfully_downloaded,Config_file)
|
2022-10-31 15:08:19 +00:00
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
try:
|
2022-10-31 15:08:19 +00:00
|
|
|
if os.path.exists(Config_file):
|
2022-11-02 22:35:00 +00:00
|
|
|
<<<<<<< HEAD
|
2022-10-31 15:08:19 +00:00
|
|
|
print(f'** {Config_file} exists. Renaming to {Config_file}.orig')
|
2022-11-02 22:35:00 +00:00
|
|
|
=======
|
2022-11-01 16:02:22 +00:00
|
|
|
print(f'* {Config_file} exists. Renaming to {Config_file}.orig')
|
2022-11-02 22:35:00 +00:00
|
|
|
>>>>>>> spezialspezial-patch-9
|
2022-10-31 15:08:19 +00:00
|
|
|
os.rename(Config_file,f'{Config_file}.orig')
|
2022-10-29 05:02:45 +00:00
|
|
|
tmpfile = os.path.join(os.path.dirname(Config_file),'new_config.tmp')
|
|
|
|
with open(tmpfile, 'w') as outfile:
|
|
|
|
outfile.write(Config_preamble)
|
|
|
|
outfile.write(yaml)
|
|
|
|
os.rename(tmpfile,Config_file)
|
2022-10-31 15:08:19 +00:00
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
except Exception as e:
|
|
|
|
print(f'**Error creating config file {Config_file}: {str(e)} **')
|
|
|
|
return
|
2022-10-31 15:08:19 +00:00
|
|
|
|
2022-10-29 05:02:45 +00:00
|
|
|
print(f'Successfully created new configuration file {Config_file}')
|
|
|
|
|
|
|
|
|
|
|
|
#---------------------------------------------
|
2022-10-31 15:33:05 +00:00
|
|
|
def new_config_file_contents(successfully_downloaded:dict, Config_file:str)->str:
|
2022-10-31 15:08:19 +00:00
|
|
|
if os.path.exists(Config_file):
|
|
|
|
conf = OmegaConf.load(Config_file)
|
|
|
|
else:
|
|
|
|
conf = OmegaConf.create()
|
2022-10-29 05:02:45 +00:00
|
|
|
|
|
|
|
# find the VAE file, if there is one
|
|
|
|
vae = None
|
|
|
|
default_selected = False
|
|
|
|
|
|
|
|
for model in successfully_downloaded:
|
|
|
|
if Datasets[model]['config'] == 'VAE':
|
|
|
|
vae = Datasets[model]['file']
|
|
|
|
|
|
|
|
for model in successfully_downloaded:
|
|
|
|
if Datasets[model]['config'] == 'VAE': # skip VAE entries
|
|
|
|
continue
|
|
|
|
stanza = conf[model] if model in conf else { }
|
|
|
|
|
|
|
|
stanza['description'] = Datasets[model]['description']
|
|
|
|
stanza['weights'] = os.path.join(Model_dir,Datasets[model]['file'])
|
|
|
|
stanza['config'] =os.path.join(SD_Configs, Datasets[model]['config'])
|
|
|
|
stanza['width'] = Datasets[model]['width']
|
|
|
|
stanza['height'] = Datasets[model]['height']
|
|
|
|
stanza.pop('default',None) # this will be set later
|
|
|
|
if vae:
|
|
|
|
stanza['vae'] = os.path.join(Model_dir,vae)
|
|
|
|
# BUG - the first stanza is always the default. User should select.
|
|
|
|
if not default_selected:
|
|
|
|
stanza['default'] = True
|
|
|
|
default_selected = True
|
|
|
|
conf[model] = stanza
|
|
|
|
return OmegaConf.to_yaml(conf)
|
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#---------------------------------------------
|
2022-08-17 16:00:00 +00:00
|
|
|
# this will preload the Bert tokenizer fles
|
2022-10-24 02:26:18 +00:00
|
|
|
def download_bert():
|
|
|
|
print('Installing bert tokenizer (ignore deprecation errors)...', end='')
|
2022-11-02 22:35:00 +00:00
|
|
|
<<<<<<< HEAD
|
2022-10-31 15:33:05 +00:00
|
|
|
from transformers import BertTokenizerFast, AutoFeatureExtractor
|
2022-11-02 22:35:00 +00:00
|
|
|
=======
|
2022-11-01 16:02:22 +00:00
|
|
|
sys.stdout.flush()
|
2022-11-02 22:35:00 +00:00
|
|
|
>>>>>>> spezialspezial-patch-9
|
2022-10-24 02:26:18 +00:00
|
|
|
with warnings.catch_warnings():
|
|
|
|
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
2022-11-01 16:02:22 +00:00
|
|
|
from transformers import BertTokenizerFast, AutoFeatureExtractor
|
2022-10-24 02:26:18 +00:00
|
|
|
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
|
|
|
|
print('...success')
|
2022-08-17 16:00:00 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#---------------------------------------------
|
2022-08-17 16:00:00 +00:00
|
|
|
# this will download requirements for Kornia
|
2022-10-24 02:26:18 +00:00
|
|
|
def download_kornia():
|
2022-11-01 16:02:22 +00:00
|
|
|
print('Installing Kornia requirements (ignore deprecation errors)...', end='')
|
|
|
|
sys.stdout.flush()
|
|
|
|
import kornia
|
2022-10-24 02:26:18 +00:00
|
|
|
print('...success')
|
2022-08-22 19:33:27 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#---------------------------------------------
|
|
|
|
def download_clip():
|
|
|
|
print('Loading CLIP model...',end='')
|
2022-11-02 22:35:00 +00:00
|
|
|
<<<<<<< HEAD
|
2022-10-31 15:33:05 +00:00
|
|
|
from transformers import CLIPTokenizer, CLIPTextModel
|
2022-11-02 22:35:00 +00:00
|
|
|
=======
|
2022-11-01 16:02:22 +00:00
|
|
|
with warnings.catch_warnings():
|
|
|
|
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
|
|
|
from transformers import CLIPTokenizer, CLIPTextModel
|
2022-11-02 22:35:00 +00:00
|
|
|
>>>>>>> spezialspezial-patch-9
|
2022-10-31 15:33:05 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
version = 'openai/clip-vit-large-patch14'
|
2022-10-24 02:26:18 +00:00
|
|
|
tokenizer = CLIPTokenizer.from_pretrained(version)
|
|
|
|
transformer = CLIPTextModel.from_pretrained(version)
|
|
|
|
print('...success')
|
2022-08-26 07:15:42 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#---------------------------------------------
|
|
|
|
def download_gfpgan():
|
|
|
|
print('Installing models from RealESRGAN and facexlib...',end='')
|
2022-08-26 05:20:01 +00:00
|
|
|
try:
|
2022-10-24 02:26:18 +00:00
|
|
|
from realesrgan import RealESRGANer
|
2022-09-25 22:11:59 +00:00
|
|
|
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
2022-08-26 05:20:01 +00:00
|
|
|
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
|
2022-08-26 07:15:42 +00:00
|
|
|
|
2022-08-28 20:14:29 +00:00
|
|
|
RealESRGANer(
|
|
|
|
scale=4,
|
2022-09-25 22:11:59 +00:00
|
|
|
model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth',
|
|
|
|
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
2022-08-28 20:14:29 +00:00
|
|
|
)
|
|
|
|
|
2022-08-26 07:15:42 +00:00
|
|
|
FaceRestoreHelper(1, det_model='retinaface_resnet50')
|
|
|
|
print('...success')
|
2022-08-26 05:20:01 +00:00
|
|
|
except Exception:
|
2022-09-17 05:32:31 +00:00
|
|
|
print('Error loading ESRGAN:')
|
|
|
|
print(traceback.format_exc())
|
2022-08-24 13:22:04 +00:00
|
|
|
|
2022-10-05 16:44:16 +00:00
|
|
|
print('Loading models from GFPGAN')
|
|
|
|
for model in (
|
|
|
|
[
|
|
|
|
'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
|
|
|
'src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth'
|
|
|
|
],
|
|
|
|
[
|
|
|
|
'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth',
|
|
|
|
'./gfpgan/weights/detection_Resnet50_Final.pth'
|
|
|
|
],
|
|
|
|
[
|
|
|
|
'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth',
|
|
|
|
'./gfpgan/weights/parsing_parsenet.pth'
|
|
|
|
],
|
|
|
|
):
|
|
|
|
model_url,model_dest = model
|
|
|
|
try:
|
|
|
|
if not os.path.exists(model_dest):
|
|
|
|
print(f'Downloading gfpgan model file {model_url}...',end='')
|
|
|
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
2022-10-30 21:43:48 +00:00
|
|
|
request.urlretrieve(model_url,model_dest)
|
2022-10-05 16:44:16 +00:00
|
|
|
print('...success')
|
|
|
|
except Exception:
|
|
|
|
print('Error loading GFPGAN:')
|
|
|
|
print(traceback.format_exc())
|
2022-09-18 19:01:05 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#---------------------------------------------
|
|
|
|
def download_codeformer():
|
|
|
|
print('Installing CodeFormer model file...',end='')
|
|
|
|
try:
|
|
|
|
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
|
|
|
|
model_dest = 'ldm/invoke/restoration/codeformer/weights/codeformer.pth'
|
|
|
|
if not os.path.exists(model_dest):
|
|
|
|
print('Downloading codeformer model file...')
|
|
|
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
2022-10-30 21:43:48 +00:00
|
|
|
request.urlretrieve(model_url,model_dest)
|
2022-10-24 02:26:18 +00:00
|
|
|
except Exception:
|
|
|
|
print('Error loading CodeFormer:')
|
|
|
|
print(traceback.format_exc())
|
|
|
|
print('...success')
|
|
|
|
|
|
|
|
#---------------------------------------------
|
|
|
|
def download_clipseg():
|
|
|
|
print('Installing clipseg model for text-based masking...',end='')
|
2022-11-01 18:34:23 +00:00
|
|
|
import zipfile
|
2022-10-24 02:26:18 +00:00
|
|
|
try:
|
|
|
|
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
|
|
|
model_dest = 'src/clipseg/clipseg_weights.zip'
|
|
|
|
weights_dir = 'src/clipseg/weights'
|
|
|
|
if not os.path.exists(weights_dir):
|
2022-09-18 19:01:05 +00:00
|
|
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
2022-11-01 18:34:23 +00:00
|
|
|
if not os.path.exists('src/clipseg/weights/rd64-uni-refined.pth'):
|
2022-10-30 21:43:48 +00:00
|
|
|
request.urlretrieve(model_url,model_dest)
|
2022-10-24 02:26:18 +00:00
|
|
|
with zipfile.ZipFile(model_dest,'r') as zip:
|
|
|
|
zip.extractall('src/clipseg')
|
|
|
|
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
|
|
|
os.remove(model_dest)
|
|
|
|
from clipseg_models.clipseg import CLIPDensePredT
|
|
|
|
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
|
|
|
model.eval()
|
|
|
|
model.load_state_dict(
|
|
|
|
torch.load(
|
|
|
|
'src/clipseg/weights/rd64-uni-refined.pth',
|
|
|
|
map_location=torch.device('cpu')
|
|
|
|
),
|
|
|
|
strict=False,
|
|
|
|
)
|
|
|
|
except Exception:
|
|
|
|
print('Error installing clipseg model:')
|
|
|
|
print(traceback.format_exc())
|
|
|
|
print('...success')
|
2022-10-16 20:45:07 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#-------------------------------------
|
|
|
|
def download_safety_checker():
|
|
|
|
print('Installing safety model for NSFW content detection...',end='')
|
|
|
|
try:
|
|
|
|
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
2022-10-31 15:33:05 +00:00
|
|
|
from transformers import AutoFeatureExtractor
|
2022-10-24 02:26:18 +00:00
|
|
|
except ModuleNotFoundError:
|
|
|
|
print('Error installing safety checker model:')
|
|
|
|
print(traceback.format_exc())
|
|
|
|
return
|
|
|
|
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
|
|
|
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id)
|
|
|
|
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
|
|
|
|
print('...success')
|
2022-10-30 04:17:05 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
#-------------------------------------
|
|
|
|
if __name__ == '__main__':
|
2022-10-30 04:33:48 +00:00
|
|
|
parser = argparse.ArgumentParser(description='InvokeAI model downloader')
|
|
|
|
parser.add_argument('--interactive',
|
|
|
|
dest='interactive',
|
|
|
|
action=argparse.BooleanOptionalAction,
|
|
|
|
default=True,
|
|
|
|
help='run in interactive mode (default)')
|
2022-10-31 15:33:05 +00:00
|
|
|
parser.add_argument('--config_file',
|
|
|
|
'-c',
|
|
|
|
dest='config_file',
|
|
|
|
type=str,
|
|
|
|
default='./configs/models.yaml',
|
|
|
|
help='path to configuration file to create')
|
2022-10-30 04:33:48 +00:00
|
|
|
opt = parser.parse_args()
|
|
|
|
|
2022-10-29 13:18:48 +00:00
|
|
|
try:
|
2022-10-30 04:33:48 +00:00
|
|
|
if opt.interactive:
|
|
|
|
introduction()
|
|
|
|
print('** WEIGHT SELECTION **')
|
|
|
|
choice = user_wants_to_download_weights()
|
|
|
|
if choice != 'skip':
|
|
|
|
models = select_datasets(choice)
|
|
|
|
if models is None:
|
|
|
|
if yes_or_no('Quit?',default_yes=False):
|
|
|
|
sys.exit(0)
|
|
|
|
print('** LICENSE AGREEMENT FOR WEIGHT FILES **')
|
|
|
|
access_token = authenticate()
|
|
|
|
print('\n** DOWNLOADING WEIGHTS **')
|
|
|
|
successfully_downloaded = download_weight_datasets(models, access_token)
|
2022-10-31 15:33:05 +00:00
|
|
|
update_config_file(successfully_downloaded,opt)
|
2022-10-30 21:43:48 +00:00
|
|
|
print('\n** DOWNLOADING SUPPORT MODELS **')
|
|
|
|
download_bert()
|
|
|
|
download_kornia()
|
|
|
|
download_clip()
|
|
|
|
download_gfpgan()
|
|
|
|
download_codeformer()
|
|
|
|
download_clipseg()
|
|
|
|
download_safety_checker()
|
|
|
|
postscript()
|
2022-10-29 13:18:48 +00:00
|
|
|
except KeyboardInterrupt:
|
2022-10-29 15:28:17 +00:00
|
|
|
print('\nGoodbye! Come back soon.')
|
|
|
|
except Exception as e:
|
|
|
|
print(f'\nA problem occurred during download.\nThe error was: "{str(e)}"')
|
2022-10-29 13:18:48 +00:00
|
|
|
|
2022-10-16 20:45:07 +00:00
|
|
|
|
2022-10-24 02:26:18 +00:00
|
|
|
|