Merge branch 'main' into mps-fp16-fixes

This commit is contained in:
Lincoln Stein 2023-07-05 21:01:48 -04:00 committed by GitHub
commit f610045a14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 15 additions and 16 deletions

2
.gitignore vendored
View File

@ -201,8 +201,6 @@ checkpoints
# If it's a Mac # If it's a Mac
.DS_Store .DS_Store
invokeai/frontend/web/dist/*
# Let the frontend manage its own gitignore # Let the frontend manage its own gitignore
!invokeai/frontend/web/* !invokeai/frontend/web/*

View File

@ -228,6 +228,7 @@ class MigrateTo3(object):
self._migrate_pretrained(CLIPTextModel, self._migrate_pretrained(CLIPTextModel,
repo_id = repo_id, repo_id = repo_id,
dest = target_dir / 'clip-vit-large-patch14', dest = target_dir / 'clip-vit-large-patch14',
force = True,
**kwargs) **kwargs)
# sd-2 # sd-2
@ -291,21 +292,21 @@ class MigrateTo3(object):
def _model_probe_to_path(self, info: ModelProbeInfo)->Path: def _model_probe_to_path(self, info: ModelProbeInfo)->Path:
return Path(self.dest_models, info.base_type.value, info.model_type.value) return Path(self.dest_models, info.base_type.value, info.model_type.value)
def _migrate_pretrained(self, model_class, repo_id: str, dest: Path, **kwargs): def _migrate_pretrained(self, model_class, repo_id: str, dest: Path, force:bool=False, **kwargs):
if dest.exists(): if dest.exists() and not force:
logger.info(f'Skipping existing {dest}') logger.info(f'Skipping existing {dest}')
return return
model = model_class.from_pretrained(repo_id, **kwargs) model = model_class.from_pretrained(repo_id, **kwargs)
self._save_pretrained(model, dest) self._save_pretrained(model, dest, overwrite=force)
def _save_pretrained(self, model, dest: Path): def _save_pretrained(self, model, dest: Path, overwrite: bool=False):
if dest.exists():
logger.info(f'Skipping existing {dest}')
return
model_name = dest.name model_name = dest.name
download_path = dest.with_name(f'{model_name}.downloading') if overwrite:
model.save_pretrained(download_path, safe_serialization=True) model.save_pretrained(dest, safe_serialization=True)
download_path.replace(dest) else:
download_path = dest.with_name(f'{model_name}.downloading')
model.save_pretrained(download_path, safe_serialization=True)
download_path.replace(dest)
def _download_vae(self, repo_id: str, subfolder:str=None)->Path: def _download_vae(self, repo_id: str, subfolder:str=None)->Path:
vae = AutoencoderKL.from_pretrained(repo_id, cache_dir=self.root_directory / 'models/hub', subfolder=subfolder) vae = AutoencoderKL.from_pretrained(repo_id, cache_dir=self.root_directory / 'models/hub', subfolder=subfolder)
@ -573,8 +574,10 @@ script, which will perform a full upgrade in place."""
dest_directory = args.dest_directory dest_directory = args.dest_directory
assert dest_directory.is_dir(), f"{dest_directory} is not a valid directory" assert dest_directory.is_dir(), f"{dest_directory} is not a valid directory"
assert (dest_directory / 'models').is_dir(), f"{dest_directory} does not contain a 'models' subdirectory"
assert (dest_directory / 'invokeai.yaml').exists(), f"{dest_directory} does not contain an InvokeAI init file." # TODO: revisit
# assert (dest_directory / 'models').is_dir(), f"{dest_directory} does not contain a 'models' subdirectory"
# assert (dest_directory / 'invokeai.yaml').exists(), f"{dest_directory} does not contain an InvokeAI init file."
do_migrate(root_directory,dest_directory) do_migrate(root_directory,dest_directory)

View File

@ -100,8 +100,6 @@ class ModelCache(object):
:param sha_chunksize: Chunksize to use when calculating sha256 model hash :param sha_chunksize: Chunksize to use when calculating sha256 model hash
''' '''
#max_cache_size = 9999 #max_cache_size = 9999
execution_device = torch.device('cuda')
self.model_infos: Dict[str, ModelBase] = dict() self.model_infos: Dict[str, ModelBase] = dict()
self.lazy_offloading = lazy_offloading self.lazy_offloading = lazy_offloading
#self.sequential_offload: bool=sequential_offload #self.sequential_offload: bool=sequential_offload