diff --git a/ldm/invoke/txt2mask.py b/ldm/invoke/txt2mask.py index 01d93546e3..f9a731d1f6 100644 --- a/ldm/invoke/txt2mask.py +++ b/ldm/invoke/txt2mask.py @@ -29,7 +29,7 @@ work fine. import torch import numpy as np -from models.clipseg import CLIPDensePredT +from clipseg import CLIPDensePredT from einops import rearrange, repeat from PIL import Image from torchvision import transforms diff --git a/scripts/preload_models.py b/scripts/preload_models.py index b23bec11f3..2da39c5680 100644 --- a/scripts/preload_models.py +++ b/scripts/preload_models.py @@ -117,7 +117,7 @@ try: with zipfile.ZipFile(model_dest,'r') as zip: zip.extractall('src/clipseg') os.rename('src/clipseg/clipseg_weights','src/clipseg/weights') - from models.clipseg import CLIPDensePredT + from clipseg import CLIPDensePredT model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, ) model.eval() model.load_state_dict( @@ -126,6 +126,7 @@ try: map_location=torch.device('cpu'), strict=False, ) + ) except Exception: print('Error installing clipseg model:') print(traceback.format_exc())