mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
resolve more flake8 problems
This commit is contained in:
parent
ef317be1f9
commit
766cb887e4
@ -2,7 +2,7 @@
|
|||||||
Init file for InvokeAI configure package
|
Init file for InvokeAI configure package
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from .invokeai_config import (
|
from .invokeai_config import ( # noqa F401
|
||||||
InvokeAIAppConfig,
|
InvokeAIAppConfig,
|
||||||
get_invokeai_config,
|
get_invokeai_config,
|
||||||
)
|
)
|
||||||
|
@ -230,10 +230,10 @@ def int_or_float_or_str(value: str) -> Union[int, float, str]:
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return int(value)
|
return int(value)
|
||||||
except:
|
except Exception as e: # noqa F841
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
return float(value)
|
return float(value)
|
||||||
except:
|
except Exception as e: # noqa F841
|
||||||
pass
|
pass
|
||||||
return str(value)
|
return str(value)
|
||||||
|
@ -249,7 +249,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
attention_type : Literal[tuple(["auto", "normal", "xformers", "sliced", "torch-sdp"])] = Field(default="auto", description="Attention type", category="Generation", )
|
attention_type : Literal[tuple(["auto", "normal", "xformers", "sliced", "torch-sdp"])] = Field(default="auto", description="Attention type", category="Generation", )
|
||||||
attention_slice_size: Literal[tuple(["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8])] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", )
|
attention_slice_size: Literal[tuple(["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8])] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', category="Generation", )
|
||||||
force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
|
force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category="Generation",)
|
||||||
|
|
||||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
|
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
|
||||||
free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
|
free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
|
||||||
|
Loading…
Reference in New Issue
Block a user