mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
move all models into subdirectories of ./models
- this required an update to the invoke-ai fork of gfpgan - simultaneously reverted consolidation of environment and requirements files, as their presence in a directory triggered setup.py to try to install a sub-package.
This commit is contained in:
parent
abd6407394
commit
8cd5d95b8a
@ -70,7 +70,7 @@ Some Suggestions of variables you may want to change besides the Token:
|
|||||||
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint |
|
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint |
|
||||||
| `ARCH=aarch64` | if you are using a ARM based CPU |
|
| `ARCH=aarch64` | if you are using a ARM based CPU |
|
||||||
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used |
|
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used |
|
||||||
| `INVOKEAI_CONDA_ENV_FILE=environments/environment-linux-aarch64.yml`| since environment.yml wouldn't work with aarch |
|
| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch |
|
||||||
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork |
|
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork |
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
@ -48,7 +48,7 @@ title: Manual Installation, Linux
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||||
(base) ~/InvokeAI$ conda env create -f environments/environment-cuda.yml
|
(base) ~/InvokeAI$ conda env create -f environment-cuda.yml
|
||||||
(base) ~/InvokeAI$ conda activate invokeai
|
(base) ~/InvokeAI$ conda activate invokeai
|
||||||
(invokeai) ~/InvokeAI$
|
(invokeai) ~/InvokeAI$
|
||||||
```
|
```
|
||||||
@ -57,7 +57,7 @@ title: Manual Installation, Linux
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||||
(base) ~/InvokeAI$ conda env create -f environments/environment-rocm.yml
|
(base) ~/InvokeAI$ conda env create -f environment-AMD.yml
|
||||||
(base) ~/InvokeAI$ conda activate invokeai
|
(base) ~/InvokeAI$ conda activate invokeai
|
||||||
(invokeai) ~/InvokeAI$
|
(invokeai) ~/InvokeAI$
|
||||||
```
|
```
|
||||||
|
@ -45,7 +45,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
rmdir src # (this is a precaution in case there is already a src directory)
|
rmdir src # (this is a precaution in case there is already a src directory)
|
||||||
conda env create -f environments/environment-cuda.yml
|
conda env create -f environment-cuda.yml
|
||||||
conda activate invokeai
|
conda activate invokeai
|
||||||
(invokeai)>
|
(invokeai)>
|
||||||
```
|
```
|
||||||
@ -54,7 +54,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
rmdir src # (this is a precaution in case there is already a src directory)
|
rmdir src # (this is a precaution in case there is already a src directory)
|
||||||
conda env create -f environments/environment-rocm.yml
|
conda env create -f environment-AMD.yml
|
||||||
conda activate invokeai
|
conda activate invokeai
|
||||||
(invokeai)>
|
(invokeai)>
|
||||||
```
|
```
|
||||||
|
@ -500,13 +500,13 @@ class Args(object):
|
|||||||
postprocessing_group.add_argument(
|
postprocessing_group.add_argument(
|
||||||
'--gfpgan_model_path',
|
'--gfpgan_model_path',
|
||||||
type=str,
|
type=str,
|
||||||
default='experiments/pretrained_models/GFPGANv1.4.pth',
|
default='./GFPGANv1.4.pth',
|
||||||
help='Indicates the path to the GFPGAN model, relative to --gfpgan_dir.',
|
help='Indicates the path to the GFPGAN model, relative to --gfpgan_dir.',
|
||||||
)
|
)
|
||||||
postprocessing_group.add_argument(
|
postprocessing_group.add_argument(
|
||||||
'--gfpgan_dir',
|
'--gfpgan_dir',
|
||||||
type=str,
|
type=str,
|
||||||
default='./src/gfpgan',
|
default='./models/gfpgan',
|
||||||
help='Indicates the directory containing the GFPGAN code.',
|
help='Indicates the directory containing the GFPGAN code.',
|
||||||
)
|
)
|
||||||
web_server_group.add_argument(
|
web_server_group.add_argument(
|
||||||
|
@ -47,7 +47,6 @@ def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_n
|
|||||||
parsed_prompt = pp.parse_conjunction(prompt_string_cleaned).prompts[0]
|
parsed_prompt = pp.parse_conjunction(prompt_string_cleaned).prompts[0]
|
||||||
|
|
||||||
parsed_negative_prompt: FlattenedPrompt = pp.parse_conjunction(unconditioned_words).prompts[0]
|
parsed_negative_prompt: FlattenedPrompt = pp.parse_conjunction(unconditioned_words).prompts[0]
|
||||||
print(f">> Parsed prompt to {parsed_prompt}")
|
|
||||||
|
|
||||||
conditioning = None
|
conditioning = None
|
||||||
cac_args:CrossAttentionControl.Arguments = None
|
cac_args:CrossAttentionControl.Arguments = None
|
||||||
|
@ -10,8 +10,9 @@ from PIL import Image
|
|||||||
class GFPGAN():
|
class GFPGAN():
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
gfpgan_dir='src/gfpgan',
|
gfpgan_dir='models/gfpgan',
|
||||||
gfpgan_model_path='experiments/pretrained_models/GFPGANv1.4.pth') -> None:
|
gfpgan_model_path='GFPGANv1.4.pth'
|
||||||
|
) -> None:
|
||||||
|
|
||||||
self.model_path = os.path.join(gfpgan_dir, gfpgan_model_path)
|
self.model_path = os.path.join(gfpgan_dir, gfpgan_model_path)
|
||||||
self.gfpgan_model_exists = os.path.isfile(self.model_path)
|
self.gfpgan_model_exists = os.path.isfile(self.model_path)
|
||||||
@ -74,6 +75,7 @@ class GFPGAN():
|
|||||||
image = image.resize(res.size)
|
image = image.resize(res.size)
|
||||||
res = Image.blend(image, res, strength)
|
res = Image.blend(image, res, strength)
|
||||||
|
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
self.gfpgan = None
|
self.gfpgan = None
|
||||||
|
@ -29,14 +29,14 @@ work fine.
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from clipseg_models.clipseg import CLIPDensePredT
|
from clipseg.clipseg import CLIPDensePredT
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
from PIL import Image, ImageOps
|
from PIL import Image, ImageOps
|
||||||
from torchvision import transforms
|
from torchvision import transforms
|
||||||
|
|
||||||
CLIP_VERSION = 'ViT-B/16'
|
CLIP_VERSION = 'ViT-B/16'
|
||||||
CLIPSEG_WEIGHTS = 'src/clipseg/weights/rd64-uni.pth'
|
CLIPSEG_WEIGHTS = 'models/clipseg/clipseg_weights/rd64-uni.pth'
|
||||||
CLIPSEG_WEIGHTS_REFINED = 'src/clipseg/weights/rd64-uni-refined.pth'
|
CLIPSEG_WEIGHTS_REFINED = 'models/clipseg/clipseg_weights/rd64-uni-refined.pth'
|
||||||
CLIPSEG_SIZE = 352
|
CLIPSEG_SIZE = 352
|
||||||
|
|
||||||
class SegmentedGrayscale(object):
|
class SegmentedGrayscale(object):
|
||||||
|
@ -415,7 +415,7 @@ def download_kornia():
|
|||||||
|
|
||||||
#---------------------------------------------
|
#---------------------------------------------
|
||||||
def download_clip():
|
def download_clip():
|
||||||
print('Loading CLIP model...',end='')
|
print('Loading CLIP model (ignore deprecation errors)...',end='')
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
version = 'openai/clip-vit-large-patch14'
|
version = 'openai/clip-vit-large-patch14'
|
||||||
tokenizer = CLIPTokenizer.from_pretrained(version)
|
tokenizer = CLIPTokenizer.from_pretrained(version)
|
||||||
@ -424,7 +424,7 @@ def download_clip():
|
|||||||
|
|
||||||
#---------------------------------------------
|
#---------------------------------------------
|
||||||
def download_gfpgan():
|
def download_gfpgan():
|
||||||
print('Installing models from RealESRGAN and facexlib...',end='')
|
print('Installing models from RealESRGAN and facexlib (ignore deprecation errors)...',end='')
|
||||||
try:
|
try:
|
||||||
from realesrgan import RealESRGANer
|
from realesrgan import RealESRGANer
|
||||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
||||||
@ -442,19 +442,19 @@ def download_gfpgan():
|
|||||||
print('Error loading ESRGAN:')
|
print('Error loading ESRGAN:')
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
print('Loading models from GFPGAN')
|
print('Loading models from GFPGAN...',end='')
|
||||||
for model in (
|
for model in (
|
||||||
[
|
[
|
||||||
'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
||||||
'src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth'
|
'models/gfpgan/GFPGANv1.4.pth'
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth',
|
'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth',
|
||||||
'./gfpgan/weights/detection_Resnet50_Final.pth'
|
'models/gfpgan/weights/detection_Resnet50_Final.pth'
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth',
|
'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth',
|
||||||
'./gfpgan/weights/parsing_parsenet.pth'
|
'models/gfpgan/weights/parsing_parsenet.pth'
|
||||||
],
|
],
|
||||||
):
|
):
|
||||||
model_url,model_dest = model
|
model_url,model_dest = model
|
||||||
@ -489,22 +489,23 @@ def download_clipseg():
|
|||||||
import zipfile
|
import zipfile
|
||||||
try:
|
try:
|
||||||
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
||||||
model_dest = 'src/clipseg/clipseg_weights.zip'
|
model_dest = 'models/clipseg/clipseg_weights'
|
||||||
weights_dir = 'src/clipseg/weights'
|
weights_zip = 'models/clipseg/weights.zip'
|
||||||
if not os.path.exists(weights_dir):
|
|
||||||
|
if not os.path.exists(model_dest):
|
||||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||||
if not os.path.exists('src/clipseg/weights/rd64-uni-refined.pth'):
|
if not os.path.exists(f'{model_dest}/rd64-uni-refined.pth'):
|
||||||
request.urlretrieve(model_url,model_dest)
|
request.urlretrieve(model_url,weights_zip)
|
||||||
with zipfile.ZipFile(model_dest,'r') as zip:
|
with zipfile.ZipFile(weights_zip,'r') as zip:
|
||||||
zip.extractall('src/clipseg')
|
zip.extractall('models/clipseg')
|
||||||
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
os.remove(weights_zip)
|
||||||
os.remove(model_dest)
|
|
||||||
from clipseg_models.clipseg import CLIPDensePredT
|
from clipseg.clipseg import CLIPDensePredT
|
||||||
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
||||||
model.eval()
|
model.eval()
|
||||||
model.load_state_dict(
|
model.load_state_dict(
|
||||||
torch.load(
|
torch.load(
|
||||||
'src/clipseg/weights/rd64-uni-refined.pth',
|
'models/clipseg/clipseg_weights/rd64-uni-refined.pth',
|
||||||
map_location=torch.device('cpu')
|
map_location=torch.device('cpu')
|
||||||
),
|
),
|
||||||
strict=False,
|
strict=False,
|
||||||
|
3
setup.py
3
setup.py
@ -3,7 +3,7 @@ from setuptools import setup, find_packages
|
|||||||
setup(
|
setup(
|
||||||
name='invoke-ai',
|
name='invoke-ai',
|
||||||
version='2.1.3',
|
version='2.1.3',
|
||||||
description='',
|
description='InvokeAI text to image generation toolkit',
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[
|
install_requires=[
|
||||||
'torch',
|
'torch',
|
||||||
@ -11,3 +11,4 @@ setup(
|
|||||||
'tqdm',
|
'tqdm',
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user