mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix clipseg model loading
- This fixes the clipseg loading code so that it looks in the root directory for the model. - It also adds several __init__.py files needed to allow InvokeAI to be installed without the -e (editable) flag. This lets you delete the source code directory after installation.
This commit is contained in:
parent
a4204abfce
commit
2ec9792f50
3
.gitignore
vendored
3
.gitignore
vendored
@ -235,3 +235,6 @@ update.sh
|
|||||||
|
|
||||||
# this may be present if the user created a venv
|
# this may be present if the user created a venv
|
||||||
invokeai
|
invokeai
|
||||||
|
|
||||||
|
# no longer stored in source directory
|
||||||
|
models
|
0
ldm/__init__.py
Normal file
0
ldm/__init__.py
Normal file
0
ldm/invoke/__init__.py
Normal file
0
ldm/invoke/__init__.py
Normal file
@ -29,10 +29,12 @@ work fine.
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import os
|
||||||
from clipseg.clipseg import CLIPDensePredT
|
from clipseg.clipseg import CLIPDensePredT
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
from PIL import Image, ImageOps
|
from PIL import Image, ImageOps
|
||||||
from torchvision import transforms
|
from torchvision import transforms
|
||||||
|
from ldm.invoke.globals import Globals
|
||||||
|
|
||||||
CLIP_VERSION = 'ViT-B/16'
|
CLIP_VERSION = 'ViT-B/16'
|
||||||
CLIPSEG_WEIGHTS = 'models/clipseg/clipseg_weights/rd64-uni.pth'
|
CLIPSEG_WEIGHTS = 'models/clipseg/clipseg_weights/rd64-uni.pth'
|
||||||
@ -80,7 +82,11 @@ class Txt2Mask(object):
|
|||||||
self.model.eval()
|
self.model.eval()
|
||||||
# initially we keep everything in cpu to conserve space
|
# initially we keep everything in cpu to conserve space
|
||||||
self.model.to('cpu')
|
self.model.to('cpu')
|
||||||
self.model.load_state_dict(torch.load(CLIPSEG_WEIGHTS_REFINED if refined else CLIPSEG_WEIGHTS, map_location=torch.device('cpu')), strict=False)
|
self.model.load_state_dict(torch.load(os.path.join(Globals.root,CLIPSEG_WEIGHTS_REFINED)
|
||||||
|
if refined
|
||||||
|
else os.path.join(Globals.root,CLIPSEG_WEIGHTS),
|
||||||
|
map_location=torch.device('cpu')), strict=False
|
||||||
|
)
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def segment(self, image, prompt:str) -> SegmentedGrayscale:
|
def segment(self, image, prompt:str) -> SegmentedGrayscale:
|
||||||
|
0
ldm/models/__init__.py
Normal file
0
ldm/models/__init__.py
Normal file
0
ldm/modules/__init__.py
Normal file
0
ldm/modules/__init__.py
Normal file
2
setup.py
2
setup.py
@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='invoke-ai',
|
name='invoke-ai',
|
||||||
version='2.1.3',
|
version='2.1.4',
|
||||||
description='InvokeAI text to image generation toolkit',
|
description='InvokeAI text to image generation toolkit',
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
install_requires=[
|
install_requires=[
|
||||||
|
Loading…
Reference in New Issue
Block a user