diff --git a/ldm/invoke/model_cache.py b/ldm/invoke/model_cache.py index 7d1654718a..f937b7eae5 100644 --- a/ldm/invoke/model_cache.py +++ b/ldm/invoke/model_cache.py @@ -227,7 +227,9 @@ class ModelCache(object): model_hash = self._cached_sha256(weights,weight_bytes) sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu') del weight_bytes - sd = sd['state_dict'] + # merged models from auto11 merge board are flat for some reason + if 'state_dict' in sd: + sd = sd['state_dict'] model = instantiate_from_config(omega_config.model) model.load_state_dict(sd, strict=False)