mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'feat/execution-stats' of github.com:invoke-ai/InvokeAI into feat/execution-stats
This commit is contained in:
commit
05c9207e7b
13
README.md
13
README.md
@ -184,8 +184,9 @@ the command `npm install -g yarn` if needed)
|
||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||
|
||||
```terminal
|
||||
invokeai-configure
|
||||
invokeai-configure --root .
|
||||
```
|
||||
Don't miss the dot at the end!
|
||||
|
||||
7. Launch the web server (do it every time you run InvokeAI):
|
||||
|
||||
@ -193,15 +194,9 @@ the command `npm install -g yarn` if needed)
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
8. Build Node.js assets
|
||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
|
||||
```terminal
|
||||
cd invokeai/frontend/web/
|
||||
yarn vite build
|
||||
```
|
||||
|
||||
9. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
10. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
|
||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||
|
@ -192,8 +192,10 @@ manager, please follow these steps:
|
||||
your outputs.
|
||||
|
||||
```terminal
|
||||
invokeai-configure
|
||||
invokeai-configure --root .
|
||||
```
|
||||
|
||||
Don't miss the dot at the end of the command!
|
||||
|
||||
The script `invokeai-configure` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
@ -225,12 +227,6 @@ manager, please follow these steps:
|
||||
|
||||
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invokeai
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
@ -243,6 +239,12 @@ manager, please follow these steps:
|
||||
invokeai --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invokeai
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
http://localhost:9090 in order to load the GUI.
|
||||
|
||||
|
@ -274,7 +274,7 @@ class InvokeAISettings(BaseSettings):
|
||||
@classmethod
|
||||
def _excluded(self) -> List[str]:
|
||||
# internal fields that shouldn't be exposed as command line options
|
||||
return ["type", "initconf", "cached_root"]
|
||||
return ["type", "initconf"]
|
||||
|
||||
@classmethod
|
||||
def _excluded_from_yaml(self) -> List[str]:
|
||||
@ -290,7 +290,6 @@ class InvokeAISettings(BaseSettings):
|
||||
"restore",
|
||||
"root",
|
||||
"nsfw_checker",
|
||||
"cached_root",
|
||||
]
|
||||
|
||||
class Config:
|
||||
@ -356,7 +355,7 @@ class InvokeAISettings(BaseSettings):
|
||||
def _find_root() -> Path:
|
||||
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
|
||||
if os.environ.get("INVOKEAI_ROOT"):
|
||||
root = Path(os.environ.get("INVOKEAI_ROOT")).resolve()
|
||||
root = Path(os.environ["INVOKEAI_ROOT"])
|
||||
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]):
|
||||
root = (venv.parent).resolve()
|
||||
else:
|
||||
@ -403,7 +402,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
||||
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance')
|
||||
|
||||
root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths')
|
||||
root : Path = Field(default=None, description='InvokeAI runtime root directory', category='Paths')
|
||||
autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths')
|
||||
lora_dir : Path = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths')
|
||||
embedding_dir : Path = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths')
|
||||
@ -424,7 +423,6 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="info", description="Emit logging messages at this level or higher", category="Logging")
|
||||
|
||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
||||
cached_root : Path = Field(default=None, description="internal use only", category="DEPRECATED")
|
||||
# fmt: on
|
||||
|
||||
def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False):
|
||||
@ -472,15 +470,12 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
"""
|
||||
Path to the runtime root directory
|
||||
"""
|
||||
# we cache value of root to protect against it being '.' and the cwd changing
|
||||
if self.cached_root:
|
||||
root = self.cached_root
|
||||
elif self.root:
|
||||
if self.root:
|
||||
root = Path(self.root).expanduser().absolute()
|
||||
else:
|
||||
root = self.find_root()
|
||||
self.cached_root = root
|
||||
return self.cached_root
|
||||
root = self.find_root().expanduser().absolute()
|
||||
self.root = root # insulate ourselves from relative paths that may change
|
||||
return root
|
||||
|
||||
@property
|
||||
def root_dir(self) -> Path:
|
||||
|
@ -13,7 +13,8 @@ import { socketSubscribed, socketUnsubscribed } from './actions';
|
||||
export const socketMiddleware = () => {
|
||||
let areListenersSet = false;
|
||||
|
||||
let socketUrl = `ws://${window.location.host}`;
|
||||
const wsProtocol = window.location.protocol === 'https:' ? 'wss' : 'ws';
|
||||
let socketUrl = `${wsProtocol}://${window.location.host}`;
|
||||
|
||||
const socketOptions: Parameters<typeof io>[0] = {
|
||||
timeout: 60000,
|
||||
|
@ -84,6 +84,21 @@ def test_env_override():
|
||||
assert conf.max_cache_size == 20
|
||||
|
||||
|
||||
def test_root_resists_cwd():
|
||||
previous = os.environ["INVOKEAI_ROOT"]
|
||||
cwd = Path(os.getcwd()).resolve()
|
||||
|
||||
os.environ["INVOKEAI_ROOT"] = "."
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
conf.parse_args([])
|
||||
assert conf.root_path == cwd
|
||||
|
||||
os.chdir("..")
|
||||
assert conf.root_path == cwd
|
||||
os.environ["INVOKEAI_ROOT"] = previous
|
||||
os.chdir(cwd)
|
||||
|
||||
|
||||
def test_type_coercion():
|
||||
conf = InvokeAIAppConfig().get_config()
|
||||
conf.parse_args(argv=["--root=/tmp/foobar"])
|
||||
|
Loading…
Reference in New Issue
Block a user