adjusted handling of from_file

This commit is contained in:
Lincoln Stein 2022-08-28 14:20:34 -04:00
commit 8b9a520c5c
3 changed files with 58 additions and 40 deletions

View File

@ -185,7 +185,13 @@ innovative packaging for a squid's dinner -S137038382
Then pass this file's name to dream.py when you invoke it:
~~~~
(ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file="path/to/prompts.txt"
(ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file "path/to/prompts.txt"
~~~~
You may read a series of prompts from standard input by providing a filename of "-":
~~~~
(ldm) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/dream.py --from_file -
~~~~
## Shortcut for reusing seeds from the previous command
@ -298,6 +304,10 @@ repository and associated paper for details and limitations.
## Changes
* v1.12 (28 August 2022)
* Improved file handling, including ability to read prompts from standard input.
(kudos to [Yunsaki](https://github.com/yunsaki)
* v1.11 (26 August 2022)
* NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module. (kudos to [Oceanswave](https://github.com/Oceanswave)
* You now can specify a seed of -1 to use the previous image's seed, -2 to use the seed for the image generated before that, etc.
@ -601,7 +611,7 @@ to send me an email if you use and like the script.
[Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave),
[nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
[tildebyte](https://github.com/tildebyte),
[tildebyte](https://github.com/tildebyte),[yunsaki](https://github.com/yunsaki)
and [Tesseract Cat](https://github.com/TesseractCat)

View File

@ -548,13 +548,15 @@ class T2I:
)
else:
print(
'Using half precision math. Call with --full_precision to use slower but more accurate full precision.'
'Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.'
)
model.half()
return model
def _load_img(self, path):
image = Image.open(path).convert('RGB')
with Image.open(path) as img:
image = img.convert("RGB")
w, h = image.size
print(f'loaded input image of size ({w}, {h}) from {path}')
w, h = map(

View File

@ -64,13 +64,19 @@ def main():
# gets rid of annoying messages about random seed
logging.getLogger('pytorch_lightning').setLevel(logging.ERROR)
# load the infile as a list of lines
infile = None
try:
if opt.infile is not None:
infile = open(opt.infile, 'r')
except FileNotFoundError as e:
print(e)
exit(-1)
if opt.infile:
try:
if os.path.isfile(opt.infile):
infile = open(opt.infile,'r')
elif opt.infile=='-': # stdin
infile = sys.stdin
else:
raise FileNotFoundError(f'{opt.infile} not found.')
except (FileNotFoundError,IOError) as e:
print(f'{e}. Aborting.')
sys.exit(-1)
# preload the model
t2i.load_model()
@ -110,34 +116,31 @@ def main():
print('Error loading GFPGAN:', file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
print(
"\n* Initialization done! Awaiting your command (-h for help, 'q' to quit, 'cd' to change output dir, 'pwd' to print output dir)..."
)
if not infile:
print(
"\n* Initialization done! Awaiting your command (-h for help, 'q' to quit, 'cd' to change output dir, 'pwd' to print output dir)..."
)
log_path = os.path.join(opt.outdir, 'dream_log.txt')
with open(log_path, 'a') as log:
cmd_parser = create_cmd_parser()
main_loop(t2i, opt.outdir, cmd_parser, log, infile)
log.close()
if infile:
infile.close()
cmd_parser = create_cmd_parser()
main_loop(t2i, opt.outdir, cmd_parser, log_path, infile)
def main_loop(t2i, outdir, parser, log, infile):
def main_loop(t2i, outdir, parser, log_path, infile):
"""prompt/read/execute loop"""
done = False
last_seeds = []
while not done:
try:
command = infile.readline() if infile else input('dream> ')
command = get_next_command(infile)
except EOFError:
done = True
break
if infile and len(command) == 0:
done = True
break
# skip empty lines
if not command.strip():
continue
if command.startswith(('#', '//')):
continue
@ -152,9 +155,6 @@ def main_loop(t2i, outdir, parser, log, infile):
print(str(e))
continue
if len(elements) == 0:
continue
if elements[0] == 'q':
done = True
break
@ -239,11 +239,22 @@ def main_loop(t2i, outdir, parser, log, infile):
continue
print('Outputs:')
write_log_message(t2i, normalized_prompt, results, log)
write_log_message(t2i, normalized_prompt, results, log_path)
print('goodbye!')
def get_next_command(infile=None) -> 'command string':
if infile is None:
command = input("dream> ")
else:
command = infile.readline()
if not command:
raise EOFError
else:
command = command.strip()
print(f'#{command}')
return command
def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
import torch
@ -309,19 +320,14 @@ def load_gfpgan_bg_upsampler(bg_upsampler, bg_tile=400):
# return variants
def write_log_message(t2i, prompt, results, logfile):
### the t2i variable doesn't seem to be necessary here. maybe remove it?
def write_log_message(t2i, prompt, results, log_path):
"""logs the name of the output image, its prompt and seed to the terminal, log file, and a Dream text chunk in the PNG metadata"""
last_seed = None
img_num = 1
seenit = {}
log_lines = [f"{r[0]}: {prompt} -S{r[1]}\n" for r in results]
print(*log_lines, sep="")
for r in results:
seed = r[1]
log_message = f'{r[0]}: {prompt} -S{seed}'
print(log_message)
logfile.write(log_message + '\n')
logfile.flush()
with open(log_path, "a") as file:
file.writelines(log_lines)
def create_argv_parser():