mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
create image output directory if it doesn't exist
This commit is contained in:
parent
ef605de7af
commit
fab1ae8685
23
README.md
23
README.md
@ -86,6 +86,29 @@ If you don't like this change, just copy over the file
|
||||
ldm/modules/encoders/modules.py from the CompVis/stable-diffusion
|
||||
repository.
|
||||
|
||||
In addition, I have found that the Kornia library needs to do a
|
||||
one-time download of its own. On a non-internet connected system, you
|
||||
may see an error message like this one when running dream.py for the
|
||||
first time
|
||||
|
||||
~~~~
|
||||
Downloading: "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth" to /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth
|
||||
Traceback (most recent call last):
|
||||
File "/u/lstein/.conda/envs/ldm/lib/python3.8/urllib/request.py", line 1350, in do_open
|
||||
h.request(req.get_method(), req.selector, req.data, headers,
|
||||
File "/u/lstein/.conda/envs/ldm/lib/python3.8/http/client.py", line 1255, in request
|
||||
...
|
||||
~~~~
|
||||
|
||||
The fix is to log into an internet-connected machine and manually
|
||||
download the file into the required location. On my system, the incantation was:
|
||||
|
||||
~~~~
|
||||
(ldm) ~/stable-diffusion$ mkdir -p /u/lstein/.cache/torch/hub/checkpoints/
|
||||
(ldm) ~/stable-diffusion$ wget https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth \
|
||||
-O /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth
|
||||
~~~~
|
||||
|
||||
## Minor fixes
|
||||
|
||||
I added the requirement for torchmetrics to environment.yaml.
|
||||
|
@ -4,7 +4,7 @@ import readline
|
||||
import argparse
|
||||
import shlex
|
||||
import atexit
|
||||
from os import path
|
||||
import os
|
||||
|
||||
def main():
|
||||
arg_parser = create_argv_parser()
|
||||
@ -41,6 +41,10 @@ def main():
|
||||
weights=weights,
|
||||
config=config)
|
||||
|
||||
# make sure the output directory exists
|
||||
if not os.path.exists(opt.outdir):
|
||||
os.makedirs(opt.outdir)
|
||||
|
||||
# gets rid of annoying messages about random seed
|
||||
logging.getLogger("pytorch_lightning").setLevel(logging.ERROR)
|
||||
|
||||
@ -48,7 +52,7 @@ def main():
|
||||
t2i.load_model()
|
||||
print("\n* Initialization done! Awaiting your command...")
|
||||
|
||||
log_path = path.join(opt.outdir,"dream_log.txt")
|
||||
log_path = os.path.join(opt.outdir,"dream_log.txt")
|
||||
with open(log_path,'a') as log:
|
||||
cmd_parser = create_cmd_parser()
|
||||
main_loop(t2i,cmd_parser,log)
|
||||
@ -132,7 +136,7 @@ def create_cmd_parser():
|
||||
return parser
|
||||
|
||||
def load_history():
|
||||
histfile = path.join(path.expanduser('~'),".dream_history")
|
||||
histfile = os.path.join(os.path.expanduser('~'),".dream_history")
|
||||
try:
|
||||
readline.read_history_file(histfile)
|
||||
readline.set_history_length(1000)
|
||||
|
Loading…
Reference in New Issue
Block a user