mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Remove hardcoded cuda device in model manager init (#3624)
There was a line in model_manager.py in which the GPU device was hardcoded to "cuda". This has now been removed.
This commit is contained in:
commit
bbfb5bb1d4
@ -100,8 +100,6 @@ class ModelCache(object):
|
||||
:param sha_chunksize: Chunksize to use when calculating sha256 model hash
|
||||
'''
|
||||
#max_cache_size = 9999
|
||||
execution_device = torch.device('cuda')
|
||||
|
||||
self.model_infos: Dict[str, ModelBase] = dict()
|
||||
self.lazy_offloading = lazy_offloading
|
||||
#self.sequential_offload: bool=sequential_offload
|
||||
|
Loading…
Reference in New Issue
Block a user