mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
final fixups to memory_cache
- fixed backwards calculation of minimum available memory - only execute m.padding adjustment code once upon load
This commit is contained in:
parent
aa6aa68753
commit
1c102c71fc
@ -679,11 +679,6 @@ class Generate:
|
|||||||
)
|
)
|
||||||
|
|
||||||
self._set_sampler()
|
self._set_sampler()
|
||||||
|
|
||||||
for m in self.model.modules():
|
|
||||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
|
||||||
m._orig_padding_mode = m.padding_mode
|
|
||||||
|
|
||||||
self.model_name = model_name
|
self.model_name = model_name
|
||||||
return self.model
|
return self.model
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ class ModelCache(object):
|
|||||||
|
|
||||||
def _check_memory(self):
|
def _check_memory(self):
|
||||||
avail_memory = psutil.virtual_memory()[1]
|
avail_memory = psutil.virtual_memory()[1]
|
||||||
if avail_memory + AVG_MODEL_SIZE < self.min_avail_mem:
|
if AVG_MODEL_SIZE + self.min_avail_mem > avail_memory:
|
||||||
least_recent_model = self._pop_oldest_model()
|
least_recent_model = self._pop_oldest_model()
|
||||||
if least_recent_model is not None:
|
if least_recent_model is not None:
|
||||||
del self.models[least_recent_model]
|
del self.models[least_recent_model]
|
||||||
@ -172,6 +172,10 @@ class ModelCache(object):
|
|||||||
model.cond_stage_model.device = self.device
|
model.cond_stage_model.device = self.device
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
||||||
|
for m in model.modules():
|
||||||
|
if isinstance(m, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
|
||||||
|
m._orig_padding_mode = m.padding_mode
|
||||||
|
|
||||||
# usage statistics
|
# usage statistics
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
print(f'>> Model loaded in', '%4.2fs' % (toc - tic))
|
print(f'>> Model loaded in', '%4.2fs' % (toc - tic))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user