mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
attempt to address memory issues when loading ckpt models (#2128)
- A couple of users have reported that switching back and forth between ckpt models is causing a "GPU out of memory" crash. Traceback suggests there is actually a CPU RAM issue. - This speculative test simply performs a round of garbage collection before the point where the crash occurs.
This commit is contained in:
parent
4cc60669c1
commit
9d103ef030
@ -242,6 +242,9 @@ class ModelCache(object):
|
||||
# merged models from auto11 merge board are flat for some reason
|
||||
if 'state_dict' in sd:
|
||||
sd = sd['state_dict']
|
||||
|
||||
print(f' | Forcing garbage collection prior to loading new model')
|
||||
gc.collect()
|
||||
model = instantiate_from_config(omega_config.model)
|
||||
model.load_state_dict(sd, strict=False)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user