mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into feat/clip_skip
This commit is contained in:
commit
bc5371eeee
@ -430,13 +430,13 @@ to allow InvokeAI to download restricted styles & subjects from the "Concept Lib
|
||||
max_height=len(PRECISION_CHOICES) + 1,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.max_loaded_models = self.add_widget_intelligent(
|
||||
self.max_cache_size = self.add_widget_intelligent(
|
||||
IntTitleSlider,
|
||||
name="Number of models to cache in CPU memory (each will use 2-4 GB!)",
|
||||
value=old_opts.max_loaded_models,
|
||||
out_of=10,
|
||||
lowest=1,
|
||||
begin_entry_at=4,
|
||||
name="Size of the RAM cache used for fast model switching (GB)",
|
||||
value=old_opts.max_cache_size,
|
||||
out_of=20,
|
||||
lowest=3,
|
||||
begin_entry_at=6,
|
||||
scroll_exit=True,
|
||||
)
|
||||
self.nextrely += 1
|
||||
@ -539,7 +539,7 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
||||
"outdir",
|
||||
"nsfw_checker",
|
||||
"free_gpu_mem",
|
||||
"max_loaded_models",
|
||||
"max_cache_size",
|
||||
"xformers_enabled",
|
||||
"always_use_cpu",
|
||||
]:
|
||||
@ -555,9 +555,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
||||
new_opts.license_acceptance = self.license_acceptance.value
|
||||
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
|
||||
|
||||
# widget library workaround to make max_loaded_models an int rather than a float
|
||||
new_opts.max_loaded_models = int(new_opts.max_loaded_models)
|
||||
|
||||
return new_opts
|
||||
|
||||
|
||||
|
@ -193,7 +193,10 @@ class ModelInstall(object):
|
||||
models_installed.update(self._install_path(path))
|
||||
|
||||
# folders style or similar
|
||||
elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
||||
elif path.is_dir() and any([(path/x).exists() for x in \
|
||||
{'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}
|
||||
]
|
||||
):
|
||||
models_installed.update(self._install_path(path))
|
||||
|
||||
# recursive scan
|
||||
|
@ -8,7 +8,7 @@ The cache returns context manager generators designed to load the
|
||||
model into the GPU within the context, and unload outside the
|
||||
context. Use like this:
|
||||
|
||||
cache = ModelCache(max_models_cached=6)
|
||||
cache = ModelCache(max_cache_size=7.5)
|
||||
with cache.get_model('runwayml/stable-diffusion-1-5') as SD1,
|
||||
cache.get_model('stabilityai/stable-diffusion-2') as SD2:
|
||||
do_something_in_GPU(SD1,SD2)
|
||||
@ -91,7 +91,7 @@ class ModelCache(object):
|
||||
logger: types.ModuleType = logger
|
||||
):
|
||||
'''
|
||||
:param max_models: Maximum number of models to cache in CPU RAM [4]
|
||||
:param max_cache_size: Maximum size of the RAM cache [6.0 GB]
|
||||
:param execution_device: Torch device to load active model into [torch.device('cuda')]
|
||||
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
|
||||
:param precision: Precision for loaded models [torch.float16]
|
||||
@ -126,16 +126,6 @@ class ModelCache(object):
|
||||
key += f":{submodel_type}"
|
||||
return key
|
||||
|
||||
#def get_model(
|
||||
# self,
|
||||
# repo_id_or_path: Union[str, Path],
|
||||
# model_type: ModelType = ModelType.Diffusers,
|
||||
# subfolder: Path = None,
|
||||
# submodel: ModelType = None,
|
||||
# revision: str = None,
|
||||
# attach_model_part: Tuple[ModelType, str] = (None, None),
|
||||
# gpu_load: bool = True,
|
||||
#) -> ModelLocker: # ?? what does it return
|
||||
def _get_model_info(
|
||||
self,
|
||||
model_path: str,
|
||||
|
@ -785,7 +785,7 @@ class ModelManager(object):
|
||||
if path in known_paths or path.parent in scanned_dirs:
|
||||
scanned_dirs.add(path)
|
||||
continue
|
||||
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
||||
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]):
|
||||
new_models_found.update(installer.heuristic_import(path))
|
||||
scanned_dirs.add(path)
|
||||
|
||||
@ -794,7 +794,8 @@ class ModelManager(object):
|
||||
if path in known_paths or path.parent in scanned_dirs:
|
||||
continue
|
||||
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
|
||||
new_models_found.update(installer.heuristic_import(path))
|
||||
import_result = installer.heuristic_import(path)
|
||||
new_models_found.update(import_result)
|
||||
|
||||
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
|
||||
installed.update(new_models_found)
|
||||
|
@ -78,7 +78,6 @@ class ModelProbe(object):
|
||||
format_type = 'diffusers' if model_path.is_dir() else 'checkpoint'
|
||||
else:
|
||||
format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint'
|
||||
|
||||
model_info = None
|
||||
try:
|
||||
model_type = cls.get_model_type_from_folder(model_path, model) \
|
||||
@ -105,7 +104,7 @@ class ModelProbe(object):
|
||||
) else 512,
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
raise
|
||||
|
||||
return model_info
|
||||
|
||||
@ -127,6 +126,8 @@ class ModelProbe(object):
|
||||
return ModelType.Vae
|
||||
elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}):
|
||||
return ModelType.Lora
|
||||
elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}):
|
||||
return ModelType.Lora
|
||||
elif any(key.startswith(v) for v in {"control_model", "input_blocks"}):
|
||||
return ModelType.ControlNet
|
||||
elif key in {"emb_params", "string_to_param"}:
|
||||
@ -137,7 +138,7 @@ class ModelProbe(object):
|
||||
if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()):
|
||||
return ModelType.TextualInversion
|
||||
|
||||
raise ValueError("Unable to determine model type")
|
||||
raise ValueError(f"Unable to determine model type for {model_path}")
|
||||
|
||||
@classmethod
|
||||
def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->ModelType:
|
||||
@ -167,7 +168,7 @@ class ModelProbe(object):
|
||||
return type
|
||||
|
||||
# give up
|
||||
raise ValueError("Unable to determine model type")
|
||||
raise ValueError("Unable to determine model type for {folder_path}")
|
||||
|
||||
@classmethod
|
||||
def _scan_and_load_checkpoint(cls,model_path: Path)->dict:
|
||||
|
@ -678,9 +678,8 @@ def select_and_download_models(opt: Namespace):
|
||||
|
||||
# this is where the TUI is called
|
||||
else:
|
||||
# needed because the torch library is loaded, even though we don't use it
|
||||
# currently commented out because it has started generating errors (?)
|
||||
# torch.multiprocessing.set_start_method("spawn")
|
||||
# needed to support the probe() method running under a subprocess
|
||||
torch.multiprocessing.set_start_method("spawn")
|
||||
|
||||
# the third argument is needed in the Windows 11 environment in
|
||||
# order to launch and resize a console window running this program
|
||||
|
@ -1,6 +1,5 @@
|
||||
import { log } from 'app/logging/useLogger';
|
||||
import { appSocketConnected, socketConnected } from 'services/events/actions';
|
||||
import { receivedPageOfImages } from 'services/api/thunks/image';
|
||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||
import { startAppListening } from '../..';
|
||||
|
||||
@ -14,19 +13,10 @@ export const addSocketConnectedEventListener = () => {
|
||||
|
||||
moduleLog.debug({ timestamp }, 'Connected');
|
||||
|
||||
const { nodes, config, gallery } = getState();
|
||||
const { nodes, config } = getState();
|
||||
|
||||
const { disabledTabs } = config;
|
||||
|
||||
if (!gallery.ids.length) {
|
||||
dispatch(
|
||||
receivedPageOfImages({
|
||||
categories: ['general'],
|
||||
is_intermediate: false,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (!nodes.schema && !disabledTabs.includes('nodes')) {
|
||||
dispatch(receivedOpenAPISchema());
|
||||
}
|
||||
|
@ -6,10 +6,15 @@ import { validateSeedWeights } from 'common/util/seedWeightPairs';
|
||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import {
|
||||
modelsApi,
|
||||
useGetMainModelsQuery,
|
||||
} from '../../services/api/endpoints/models';
|
||||
|
||||
const readinessSelector = createSelector(
|
||||
[stateSelector, activeTabNameSelector],
|
||||
({ generation, system, batch }, activeTabName) => {
|
||||
(state, activeTabName) => {
|
||||
const { generation, system, batch } = state;
|
||||
const { shouldGenerateVariations, seedWeights, initialImage, seed } =
|
||||
generation;
|
||||
|
||||
@ -32,6 +37,13 @@ const readinessSelector = createSelector(
|
||||
reasonsWhyNotReady.push('No initial image selected');
|
||||
}
|
||||
|
||||
const { isSuccess: mainModelsSuccessfullyLoaded } =
|
||||
modelsApi.endpoints.getMainModels.select()(state);
|
||||
if (!mainModelsSuccessfullyLoaded) {
|
||||
isReady = false;
|
||||
reasonsWhyNotReady.push('Models are not loaded');
|
||||
}
|
||||
|
||||
// TODO: job queue
|
||||
// Cannot generate if already processing an image
|
||||
if (isProcessing) {
|
||||
|
@ -182,6 +182,15 @@ const ImageGalleryContent = () => {
|
||||
return () => osInstance()?.destroy();
|
||||
}, [scroller, initialize, osInstance]);
|
||||
|
||||
useEffect(() => {
|
||||
dispatch(
|
||||
receivedPageOfImages({
|
||||
categories: ['general'],
|
||||
is_intermediate: false,
|
||||
})
|
||||
);
|
||||
}, [dispatch]);
|
||||
|
||||
const handleClickImagesCategory = useCallback(() => {
|
||||
dispatch(imageCategoriesChanged(IMAGE_CATEGORIES));
|
||||
dispatch(setGalleryView('images'));
|
||||
|
Loading…
Reference in New Issue
Block a user