mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Enable the ram cache slider in invokeai-configure (#4866)
## What type of PR is this? (check all applicable) - [ ] Refactor - [ ] Feature - [X] Bug Fix - [ ] Optimization - [ ] Documentation Update - [ ] Community Node Submission ## Have you discussed this change with the InvokeAI team? - [X] Yes - [ ] No, because: ## Have you updated all relevant documentation? - [ ] Yes - [ ] No ## Description The `invokeai-configure` TUI's slider for the RAM cache was not picking up the current settings in `invokeai.yaml`, leading users to think their change hadn't taken effect. This is fixed in this PR. ## Related Tickets & Documents First described here: https://discord.com/channels/1020123559063990373/1161919551441735711/1162058518417907743
This commit is contained in:
commit
3575cf3b3b
@ -241,8 +241,8 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
||||||
|
|
||||||
# CACHE
|
# CACHE
|
||||||
ram : Union[float, Literal["auto"]] = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
|
ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", category="Model Cache", )
|
||||||
vram : Union[float, Literal["auto"]] = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", )
|
vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", category="Model Cache", )
|
||||||
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", )
|
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", )
|
||||||
|
|
||||||
# DEVICE
|
# DEVICE
|
||||||
|
@ -662,7 +662,7 @@ def default_ramcache() -> float:
|
|||||||
|
|
||||||
def default_startup_options(init_file: Path) -> Namespace:
|
def default_startup_options(init_file: Path) -> Namespace:
|
||||||
opts = InvokeAIAppConfig.get_config()
|
opts = InvokeAIAppConfig.get_config()
|
||||||
opts.ram = default_ramcache()
|
opts.ram = opts.ram or default_ramcache()
|
||||||
return opts
|
return opts
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user