fix clipseg model loading

- This fixes the clipseg loading code so that it looks in the root directory
  for the model.

- It also adds several __init__.py files needed to allow InvokeAI to be
  installed without the -e (editable) flag. This lets you delete the
  source code directory after installation.
This commit is contained in:
Lincoln Stein 2022-11-15 19:12:47 +00:00
parent a4204abfce
commit 2ec9792f50
7 changed files with 11 additions and 2 deletions

3
.gitignore vendored
View File

@ -235,3 +235,6 @@ update.sh
# this may be present if the user created a venv
invokeai
# no longer stored in source directory
models

0
ldm/__init__.py Normal file
View File

0
ldm/invoke/__init__.py Normal file
View File

View File

@ -29,10 +29,12 @@ work fine.
import torch
import numpy as np
import os
from clipseg.clipseg import CLIPDensePredT
from einops import rearrange, repeat
from PIL import Image, ImageOps
from torchvision import transforms
from ldm.invoke.globals import Globals
CLIP_VERSION = 'ViT-B/16'
CLIPSEG_WEIGHTS = 'models/clipseg/clipseg_weights/rd64-uni.pth'
@ -80,7 +82,11 @@ class Txt2Mask(object):
self.model.eval()
# initially we keep everything in cpu to conserve space
self.model.to('cpu')
self.model.load_state_dict(torch.load(CLIPSEG_WEIGHTS_REFINED if refined else CLIPSEG_WEIGHTS, map_location=torch.device('cpu')), strict=False)
self.model.load_state_dict(torch.load(os.path.join(Globals.root,CLIPSEG_WEIGHTS_REFINED)
if refined
else os.path.join(Globals.root,CLIPSEG_WEIGHTS),
map_location=torch.device('cpu')), strict=False
)
@torch.no_grad()
def segment(self, image, prompt:str) -> SegmentedGrayscale:

0
ldm/models/__init__.py Normal file
View File

0
ldm/modules/__init__.py Normal file
View File

View File

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name='invoke-ai',
version='2.1.3',
version='2.1.4',
description='InvokeAI text to image generation toolkit',
packages=find_packages(),
install_requires=[