mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
port more globals changes over
This commit is contained in:
parent
259d6ec90d
commit
27241cdde1
@ -204,7 +204,6 @@ def invoke_cli():
|
||||
sys.stdin = open(infile,"r")
|
||||
|
||||
model_manager = ModelManagerService(config,logger)
|
||||
set_autocompleter(model_manager)
|
||||
|
||||
events = EventServiceBase()
|
||||
output_folder = config.output_path
|
||||
@ -231,9 +230,11 @@ def invoke_cli():
|
||||
logger=logger,
|
||||
configuration=config,
|
||||
)
|
||||
|
||||
|
||||
system_graphs = create_system_graphs(services.graph_library)
|
||||
system_graph_names = set([g.name for g in system_graphs])
|
||||
set_autocompleter(services)
|
||||
|
||||
invoker = Invoker(services)
|
||||
session: GraphExecutionState = invoker.create_execution_state()
|
||||
|
@ -14,7 +14,7 @@ from invokeai.backend.model_management.model_manager import (
|
||||
SDModelInfo,
|
||||
)
|
||||
from invokeai.app.models.exceptions import CanceledException
|
||||
from ...backend import Args, Globals # this must go when pr 3340 merged
|
||||
from .config import InvokeAIAppConfig
|
||||
from ...backend.util import choose_precision, choose_torch_device
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -33,7 +33,7 @@ class ModelManagerServiceBase(ABC):
|
||||
@abstractmethod
|
||||
def __init__(
|
||||
self,
|
||||
config: Args,
|
||||
config: InvokeAIAppConfig,
|
||||
logger: types.ModuleType,
|
||||
):
|
||||
"""
|
||||
@ -248,7 +248,7 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
"""Responsible for managing models on disk and in memory"""
|
||||
def __init__(
|
||||
self,
|
||||
config: Args,
|
||||
config: InvokeAIAppConfig,
|
||||
logger: types.ModuleType,
|
||||
):
|
||||
"""
|
||||
@ -257,10 +257,10 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
and sequential_offload boolean. Note that the default device
|
||||
type and precision are set up for a CUDA system running at half precision.
|
||||
"""
|
||||
if config.conf and Path(config.conf).exists():
|
||||
config_file = config.conf
|
||||
if config.model_conf_path and config.model_conf_path.exists():
|
||||
config_file = config.model_conf_path
|
||||
else:
|
||||
config_file = Path(Globals.root, "configs", "models.yaml")
|
||||
config_file = config.root_dir / "configs/models.yaml"
|
||||
if not config_file.exists():
|
||||
raise IOError(f"The file {config_file} could not be found.")
|
||||
|
||||
|
@ -224,8 +224,8 @@ class ModelManager(object):
|
||||
raise ValueError('config argument must be an OmegaConf object, a Path or a string')
|
||||
|
||||
# check config version number and update on disk/RAM if necessary
|
||||
self._update_config_file_version()
|
||||
self.globals = get_invokeai_config()
|
||||
self._update_config_file_version()
|
||||
self.logger = logger
|
||||
self.cache = ModelCache(
|
||||
max_cache_size=max_cache_size,
|
||||
|
Loading…
Reference in New Issue
Block a user