mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into feat/clip_skip
This commit is contained in:
commit
bc5371eeee
@ -430,13 +430,13 @@ to allow InvokeAI to download restricted styles & subjects from the "Concept Lib
|
|||||||
max_height=len(PRECISION_CHOICES) + 1,
|
max_height=len(PRECISION_CHOICES) + 1,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.max_loaded_models = self.add_widget_intelligent(
|
self.max_cache_size = self.add_widget_intelligent(
|
||||||
IntTitleSlider,
|
IntTitleSlider,
|
||||||
name="Number of models to cache in CPU memory (each will use 2-4 GB!)",
|
name="Size of the RAM cache used for fast model switching (GB)",
|
||||||
value=old_opts.max_loaded_models,
|
value=old_opts.max_cache_size,
|
||||||
out_of=10,
|
out_of=20,
|
||||||
lowest=1,
|
lowest=3,
|
||||||
begin_entry_at=4,
|
begin_entry_at=6,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -539,7 +539,7 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
|||||||
"outdir",
|
"outdir",
|
||||||
"nsfw_checker",
|
"nsfw_checker",
|
||||||
"free_gpu_mem",
|
"free_gpu_mem",
|
||||||
"max_loaded_models",
|
"max_cache_size",
|
||||||
"xformers_enabled",
|
"xformers_enabled",
|
||||||
"always_use_cpu",
|
"always_use_cpu",
|
||||||
]:
|
]:
|
||||||
@ -555,9 +555,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
|
|||||||
new_opts.license_acceptance = self.license_acceptance.value
|
new_opts.license_acceptance = self.license_acceptance.value
|
||||||
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
|
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
|
||||||
|
|
||||||
# widget library workaround to make max_loaded_models an int rather than a float
|
|
||||||
new_opts.max_loaded_models = int(new_opts.max_loaded_models)
|
|
||||||
|
|
||||||
return new_opts
|
return new_opts
|
||||||
|
|
||||||
|
|
||||||
|
@ -193,7 +193,10 @@ class ModelInstall(object):
|
|||||||
models_installed.update(self._install_path(path))
|
models_installed.update(self._install_path(path))
|
||||||
|
|
||||||
# folders style or similar
|
# folders style or similar
|
||||||
elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
elif path.is_dir() and any([(path/x).exists() for x in \
|
||||||
|
{'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}
|
||||||
|
]
|
||||||
|
):
|
||||||
models_installed.update(self._install_path(path))
|
models_installed.update(self._install_path(path))
|
||||||
|
|
||||||
# recursive scan
|
# recursive scan
|
||||||
|
@ -8,7 +8,7 @@ The cache returns context manager generators designed to load the
|
|||||||
model into the GPU within the context, and unload outside the
|
model into the GPU within the context, and unload outside the
|
||||||
context. Use like this:
|
context. Use like this:
|
||||||
|
|
||||||
cache = ModelCache(max_models_cached=6)
|
cache = ModelCache(max_cache_size=7.5)
|
||||||
with cache.get_model('runwayml/stable-diffusion-1-5') as SD1,
|
with cache.get_model('runwayml/stable-diffusion-1-5') as SD1,
|
||||||
cache.get_model('stabilityai/stable-diffusion-2') as SD2:
|
cache.get_model('stabilityai/stable-diffusion-2') as SD2:
|
||||||
do_something_in_GPU(SD1,SD2)
|
do_something_in_GPU(SD1,SD2)
|
||||||
@ -91,7 +91,7 @@ class ModelCache(object):
|
|||||||
logger: types.ModuleType = logger
|
logger: types.ModuleType = logger
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
:param max_models: Maximum number of models to cache in CPU RAM [4]
|
:param max_cache_size: Maximum size of the RAM cache [6.0 GB]
|
||||||
:param execution_device: Torch device to load active model into [torch.device('cuda')]
|
:param execution_device: Torch device to load active model into [torch.device('cuda')]
|
||||||
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
|
:param storage_device: Torch device to save inactive model in [torch.device('cpu')]
|
||||||
:param precision: Precision for loaded models [torch.float16]
|
:param precision: Precision for loaded models [torch.float16]
|
||||||
@ -126,16 +126,6 @@ class ModelCache(object):
|
|||||||
key += f":{submodel_type}"
|
key += f":{submodel_type}"
|
||||||
return key
|
return key
|
||||||
|
|
||||||
#def get_model(
|
|
||||||
# self,
|
|
||||||
# repo_id_or_path: Union[str, Path],
|
|
||||||
# model_type: ModelType = ModelType.Diffusers,
|
|
||||||
# subfolder: Path = None,
|
|
||||||
# submodel: ModelType = None,
|
|
||||||
# revision: str = None,
|
|
||||||
# attach_model_part: Tuple[ModelType, str] = (None, None),
|
|
||||||
# gpu_load: bool = True,
|
|
||||||
#) -> ModelLocker: # ?? what does it return
|
|
||||||
def _get_model_info(
|
def _get_model_info(
|
||||||
self,
|
self,
|
||||||
model_path: str,
|
model_path: str,
|
||||||
|
@ -785,7 +785,7 @@ class ModelManager(object):
|
|||||||
if path in known_paths or path.parent in scanned_dirs:
|
if path in known_paths or path.parent in scanned_dirs:
|
||||||
scanned_dirs.add(path)
|
scanned_dirs.add(path)
|
||||||
continue
|
continue
|
||||||
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]):
|
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]):
|
||||||
new_models_found.update(installer.heuristic_import(path))
|
new_models_found.update(installer.heuristic_import(path))
|
||||||
scanned_dirs.add(path)
|
scanned_dirs.add(path)
|
||||||
|
|
||||||
@ -794,7 +794,8 @@ class ModelManager(object):
|
|||||||
if path in known_paths or path.parent in scanned_dirs:
|
if path in known_paths or path.parent in scanned_dirs:
|
||||||
continue
|
continue
|
||||||
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
|
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
|
||||||
new_models_found.update(installer.heuristic_import(path))
|
import_result = installer.heuristic_import(path)
|
||||||
|
new_models_found.update(import_result)
|
||||||
|
|
||||||
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
|
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
|
||||||
installed.update(new_models_found)
|
installed.update(new_models_found)
|
||||||
|
@ -78,7 +78,6 @@ class ModelProbe(object):
|
|||||||
format_type = 'diffusers' if model_path.is_dir() else 'checkpoint'
|
format_type = 'diffusers' if model_path.is_dir() else 'checkpoint'
|
||||||
else:
|
else:
|
||||||
format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint'
|
format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint'
|
||||||
|
|
||||||
model_info = None
|
model_info = None
|
||||||
try:
|
try:
|
||||||
model_type = cls.get_model_type_from_folder(model_path, model) \
|
model_type = cls.get_model_type_from_folder(model_path, model) \
|
||||||
@ -105,7 +104,7 @@ class ModelProbe(object):
|
|||||||
) else 512,
|
) else 512,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
return None
|
raise
|
||||||
|
|
||||||
return model_info
|
return model_info
|
||||||
|
|
||||||
@ -127,6 +126,8 @@ class ModelProbe(object):
|
|||||||
return ModelType.Vae
|
return ModelType.Vae
|
||||||
elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}):
|
elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}):
|
||||||
return ModelType.Lora
|
return ModelType.Lora
|
||||||
|
elif any(key.endswith(v) for v in {"to_k_lora.up.weight", "to_q_lora.down.weight"}):
|
||||||
|
return ModelType.Lora
|
||||||
elif any(key.startswith(v) for v in {"control_model", "input_blocks"}):
|
elif any(key.startswith(v) for v in {"control_model", "input_blocks"}):
|
||||||
return ModelType.ControlNet
|
return ModelType.ControlNet
|
||||||
elif key in {"emb_params", "string_to_param"}:
|
elif key in {"emb_params", "string_to_param"}:
|
||||||
@ -137,7 +138,7 @@ class ModelProbe(object):
|
|||||||
if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()):
|
if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()):
|
||||||
return ModelType.TextualInversion
|
return ModelType.TextualInversion
|
||||||
|
|
||||||
raise ValueError("Unable to determine model type")
|
raise ValueError(f"Unable to determine model type for {model_path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->ModelType:
|
def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->ModelType:
|
||||||
@ -167,7 +168,7 @@ class ModelProbe(object):
|
|||||||
return type
|
return type
|
||||||
|
|
||||||
# give up
|
# give up
|
||||||
raise ValueError("Unable to determine model type")
|
raise ValueError("Unable to determine model type for {folder_path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _scan_and_load_checkpoint(cls,model_path: Path)->dict:
|
def _scan_and_load_checkpoint(cls,model_path: Path)->dict:
|
||||||
|
@ -678,9 +678,8 @@ def select_and_download_models(opt: Namespace):
|
|||||||
|
|
||||||
# this is where the TUI is called
|
# this is where the TUI is called
|
||||||
else:
|
else:
|
||||||
# needed because the torch library is loaded, even though we don't use it
|
# needed to support the probe() method running under a subprocess
|
||||||
# currently commented out because it has started generating errors (?)
|
torch.multiprocessing.set_start_method("spawn")
|
||||||
# torch.multiprocessing.set_start_method("spawn")
|
|
||||||
|
|
||||||
# the third argument is needed in the Windows 11 environment in
|
# the third argument is needed in the Windows 11 environment in
|
||||||
# order to launch and resize a console window running this program
|
# order to launch and resize a console window running this program
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import { log } from 'app/logging/useLogger';
|
import { log } from 'app/logging/useLogger';
|
||||||
import { appSocketConnected, socketConnected } from 'services/events/actions';
|
import { appSocketConnected, socketConnected } from 'services/events/actions';
|
||||||
import { receivedPageOfImages } from 'services/api/thunks/image';
|
|
||||||
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
|
||||||
import { startAppListening } from '../..';
|
import { startAppListening } from '../..';
|
||||||
|
|
||||||
@ -14,19 +13,10 @@ export const addSocketConnectedEventListener = () => {
|
|||||||
|
|
||||||
moduleLog.debug({ timestamp }, 'Connected');
|
moduleLog.debug({ timestamp }, 'Connected');
|
||||||
|
|
||||||
const { nodes, config, gallery } = getState();
|
const { nodes, config } = getState();
|
||||||
|
|
||||||
const { disabledTabs } = config;
|
const { disabledTabs } = config;
|
||||||
|
|
||||||
if (!gallery.ids.length) {
|
|
||||||
dispatch(
|
|
||||||
receivedPageOfImages({
|
|
||||||
categories: ['general'],
|
|
||||||
is_intermediate: false,
|
|
||||||
})
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!nodes.schema && !disabledTabs.includes('nodes')) {
|
if (!nodes.schema && !disabledTabs.includes('nodes')) {
|
||||||
dispatch(receivedOpenAPISchema());
|
dispatch(receivedOpenAPISchema());
|
||||||
}
|
}
|
||||||
|
@ -6,10 +6,15 @@ import { validateSeedWeights } from 'common/util/seedWeightPairs';
|
|||||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||||
|
import {
|
||||||
|
modelsApi,
|
||||||
|
useGetMainModelsQuery,
|
||||||
|
} from '../../services/api/endpoints/models';
|
||||||
|
|
||||||
const readinessSelector = createSelector(
|
const readinessSelector = createSelector(
|
||||||
[stateSelector, activeTabNameSelector],
|
[stateSelector, activeTabNameSelector],
|
||||||
({ generation, system, batch }, activeTabName) => {
|
(state, activeTabName) => {
|
||||||
|
const { generation, system, batch } = state;
|
||||||
const { shouldGenerateVariations, seedWeights, initialImage, seed } =
|
const { shouldGenerateVariations, seedWeights, initialImage, seed } =
|
||||||
generation;
|
generation;
|
||||||
|
|
||||||
@ -32,6 +37,13 @@ const readinessSelector = createSelector(
|
|||||||
reasonsWhyNotReady.push('No initial image selected');
|
reasonsWhyNotReady.push('No initial image selected');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const { isSuccess: mainModelsSuccessfullyLoaded } =
|
||||||
|
modelsApi.endpoints.getMainModels.select()(state);
|
||||||
|
if (!mainModelsSuccessfullyLoaded) {
|
||||||
|
isReady = false;
|
||||||
|
reasonsWhyNotReady.push('Models are not loaded');
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: job queue
|
// TODO: job queue
|
||||||
// Cannot generate if already processing an image
|
// Cannot generate if already processing an image
|
||||||
if (isProcessing) {
|
if (isProcessing) {
|
||||||
|
@ -182,6 +182,15 @@ const ImageGalleryContent = () => {
|
|||||||
return () => osInstance()?.destroy();
|
return () => osInstance()?.destroy();
|
||||||
}, [scroller, initialize, osInstance]);
|
}, [scroller, initialize, osInstance]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
dispatch(
|
||||||
|
receivedPageOfImages({
|
||||||
|
categories: ['general'],
|
||||||
|
is_intermediate: false,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}, [dispatch]);
|
||||||
|
|
||||||
const handleClickImagesCategory = useCallback(() => {
|
const handleClickImagesCategory = useCallback(() => {
|
||||||
dispatch(imageCategoriesChanged(IMAGE_CATEGORIES));
|
dispatch(imageCategoriesChanged(IMAGE_CATEGORIES));
|
||||||
dispatch(setGalleryView('images'));
|
dispatch(setGalleryView('images'));
|
||||||
|
Loading…
x
Reference in New Issue
Block a user