diff --git a/environment.yml b/environment.yml index 72468067e4..820f940608 100644 --- a/environment.yml +++ b/environment.yml @@ -37,5 +37,5 @@ dependencies: - -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers - -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion - -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan - - -e git+https://github.com/invoke-ai/clipseg.git#egg=clipseg + - -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg - -e . diff --git a/ldm/invoke/txt2mask.py b/ldm/invoke/txt2mask.py index 01d93546e3..bd4269f7e1 100644 --- a/ldm/invoke/txt2mask.py +++ b/ldm/invoke/txt2mask.py @@ -29,7 +29,7 @@ work fine. import torch import numpy as np -from models.clipseg import CLIPDensePredT +from clipseg_models.clipseg import CLIPDensePredT from einops import rearrange, repeat from PIL import Image from torchvision import transforms diff --git a/scripts/preload_models.py b/scripts/preload_models.py index 97b79e1845..560fcca825 100644 --- a/scripts/preload_models.py +++ b/scripts/preload_models.py @@ -117,7 +117,7 @@ try: with zipfile.ZipFile(model_dest,'r') as zip: zip.extractall('src/clipseg') os.rename('src/clipseg/clipseg_weights','src/clipseg/weights') - from models.clipseg import CLIPDensePredT + from clipseg_models.clipseg import CLIPDensePredT model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, ) model.eval() model.load_state_dict(