mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix clipseg loading problems
- The directory "models" in the main InvokeAI directory was conflicting with loading "models.clipseg". To fix this issue, I have renamed the models.clipseg to clipseg_models.clipseg, and applied this change to the 'models-rename' branch of invoke-ai's fork of clipseg.
This commit is contained in:
parent
027990928e
commit
2ca4242f5f
@ -37,5 +37,5 @@ dependencies:
|
|||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||||
- -e git+https://github.com/invoke-ai/clipseg.git#egg=clipseg
|
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -29,7 +29,7 @@ work fine.
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from models.clipseg import CLIPDensePredT
|
from clipseg_models.clipseg import CLIPDensePredT
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from torchvision import transforms
|
from torchvision import transforms
|
||||||
|
@ -117,7 +117,7 @@ try:
|
|||||||
with zipfile.ZipFile(model_dest,'r') as zip:
|
with zipfile.ZipFile(model_dest,'r') as zip:
|
||||||
zip.extractall('src/clipseg')
|
zip.extractall('src/clipseg')
|
||||||
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
||||||
from models.clipseg import CLIPDensePredT
|
from clipseg_models.clipseg import CLIPDensePredT
|
||||||
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
||||||
model.eval()
|
model.eval()
|
||||||
model.load_state_dict(
|
model.load_state_dict(
|
||||||
|
Loading…
Reference in New Issue
Block a user