Merge branch 'development' into patch-1

This commit is contained in:
Lincoln Stein 2022-09-03 10:43:59 -04:00 committed by GitHub
commit a406b588b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 75 additions and 44 deletions

View File

@ -46,8 +46,11 @@ improvements and bug fixes.
# Table of Contents
1. [Major Features](#features)
2. [Changelog](#latest)
2. [Changelog](#latest-changes)
3. [Installation](#installation)
1. [Linux](#linux)
1. [Windows](#windows)
1. [MacOS](README-Mac-MPS.md)
4. [Troubleshooting](#troubleshooting)
5. [Support](#support)

View File

@ -48,12 +48,12 @@ variation.
dream> "prompt" -n6 -S3357757885 -v0.2
...
Outputs:
./outputs/Xena/000002.784039624.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 784039624,0.2 -S3357757885
./outputs/Xena/000002.3647897225.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.2 -S3357757885
./outputs/Xena/000002.917731034.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 917731034,0.2 -S3357757885
./outputs/Xena/000002.4116285959.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 4116285959,0.2 -S3357757885
./outputs/Xena/000002.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1614299449,0.2 -S3357757885
./outputs/Xena/000002.1335553075.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1335553075,0.2 -S3357757885
./outputs/Xena/000002.784039624.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 784039624:0.2 -S3357757885
./outputs/Xena/000002.3647897225.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.2 -S3357757885
./outputs/Xena/000002.917731034.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 917731034:0.2 -S3357757885
./outputs/Xena/000002.4116285959.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 4116285959:0.2 -S3357757885
./outputs/Xena/000002.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1614299449:0.2 -S3357757885
./outputs/Xena/000002.1335553075.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1335553075:0.2 -S3357757885
~~~
Note that the output for each image has a -V option giving the
@ -78,7 +78,7 @@ this to work.
~~~
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1
Outputs:
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1 -S3357757885
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885
~~~
Here we are providing equal weights (0.1 and 0.1) for both the
@ -95,12 +95,12 @@ strength) options. Note that we use -n6 to generate 6 variations:
~~~~
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1 -v0.05 -n6
Outputs:
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;3279757577,0.05 -S3357757885
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;2853129515,0.05 -S3357757885
./outputs/Xena/000004.3747154981.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;3747154981,0.05 -S3357757885
./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;2664260391,0.05 -S3357757885
./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;1642517170,0.05 -S3357757885
./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;2183375608,0.05 -S3357757885
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2853129515:0.05 -S3357757885
./outputs/Xena/000004.3747154981.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3747154981:0.05 -S3357757885
./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2664260391:0.05 -S3357757885
./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,1642517170:0.05 -S3357757885
./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2183375608:0.05 -S3357757885
~~~~
This produces six images, all slight variations on the combination of

View File

@ -70,10 +70,10 @@ class PromptFormatter:
if opt.upscale:
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
if opt.variation_amount > 0:
switches.append(f'-v {opt.variation_amount}')
switches.append(f'-v{opt.variation_amount}')
if opt.with_variations:
formatted_variations = ';'.join(f'{seed},{weight}' for seed, weight in opt.with_variations)
switches.append(f'-V {formatted_variations}')
formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in opt.with_variations)
switches.append(f'-V{formatted_variations}')
if t2i.full_precision:
switches.append('-F')
return ' '.join(switches)

View File

@ -151,7 +151,7 @@ class T2I:
self.grid = grid
self.ddim_eta = ddim_eta
self.precision = precision
self.full_precision = full_precision
self.full_precision = True if choose_torch_device() == 'mps' else full_precision
self.strength = strength
self.embedding_path = embedding_path
self.device_type = device_type
@ -286,7 +286,7 @@ class T2I:
0.0 <= variation_amount <= 1.0
), '-v --variation_amount must be in [0.0, 1.0]'
if len(with_variations) > 0:
if len(with_variations) > 0 or variation_amount > 1.0:
assert seed is not None,\
'seed must be specified when using with_variations'
if variation_amount == 0.0:
@ -324,6 +324,7 @@ class T2I:
self.model.encode_first_stage(init_image)
) # move to latent space
print(f' DEBUG: seed at make_image time ={seed}')
make_image = self._img2img(
prompt,
steps=steps,
@ -346,25 +347,15 @@ class T2I:
callback=step_callback,
)
def get_noise():
if init_img:
return torch.randn_like(init_latent, device=self.device)
else:
return torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
device=self.device)
initial_noise = None
if variation_amount > 0 or len(with_variations) > 0:
# use fixed initial noise plus random noise per iteration
seed_everything(seed)
initial_noise = get_noise()
initial_noise = self._get_noise(init_img,width,height)
for v_seed, v_weight in with_variations:
seed = v_seed
seed_everything(seed)
next_noise = get_noise()
next_noise = self._get_noise(init_img,width,height)
initial_noise = self.slerp(v_weight, initial_noise, next_noise)
if variation_amount > 0:
random.seed() # reset RNG to an actually random state, so we can get a random seed for variations
@ -376,14 +367,17 @@ class T2I:
x_T = None
if variation_amount > 0:
seed_everything(seed)
target_noise = get_noise()
target_noise = self._get_noise(init_img,width,height)
x_T = self.slerp(variation_amount, initial_noise, target_noise)
elif initial_noise is not None:
# i.e. we specified particular variations
x_T = initial_noise
else:
seed_everything(seed)
if self.device.type == 'mps':
x_T = self._get_noise(init_img,width,height)
# make_image will do the equivalent of get_noise itself
print(f' DEBUG: seed at make_image() invocation time ={seed}')
image = make_image(x_T)
results.append([image, seed])
if image_callback is not None:
@ -611,6 +605,27 @@ class T2I:
return self.model
# returns a tensor filled with random numbers from a normal distribution
def _get_noise(self,init_img,width,height):
if init_img:
if self.device.type == 'mps':
return torch.randn_like(init_latent, device='cpu').to(self.device)
else:
return torch.randn_like(init_latent, device=self.device)
else:
if self.device.type == 'mps':
return torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
device='cpu').to(self.device)
else:
return torch.randn([1,
self.latent_channels,
height // self.downsampling_factor,
width // self.downsampling_factor],
device=self.device)
def _set_sampler(self):
msg = f'>> Setting Sampler to {self.sampler_name}'
if self.sampler_name == 'plms':

View File

@ -9,7 +9,6 @@ import sys
import copy
import warnings
import time
from ldm.dream.devices import choose_torch_device
import ldm.dream.readline
from ldm.dream.pngwriter import PngWriter, PromptFormatter
from ldm.dream.server import DreamServer, ThreadingDreamServer
@ -99,7 +98,7 @@ def main():
cmd_parser = create_cmd_parser()
if opt.web:
dream_server_loop(t2i)
dream_server_loop(t2i, opt.host, opt.port)
else:
main_loop(t2i, opt.outdir, opt.prompt_as_dir, cmd_parser, infile)
@ -187,8 +186,8 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
# shotgun parsing, woo
parts = []
broken = False # python doesn't have labeled loops...
for part in opt.with_variations.split(';'):
seed_and_weight = part.split(',')
for part in opt.with_variations.split(','):
seed_and_weight = part.split(':')
if len(seed_and_weight) != 2:
print(f'could not parse with_variation part "{part}"')
broken = True
@ -310,7 +309,7 @@ def get_next_command(infile=None) -> str: #command string
print(f'#{command}')
return command
def dream_server_loop(t2i):
def dream_server_loop(t2i, host, port):
print('\n* --web was specified, starting web server...')
# Change working directory to the stable-diffusion directory
os.chdir(
@ -319,9 +318,13 @@ def dream_server_loop(t2i):
# Start server
DreamServer.model = t2i
dream_server = ThreadingDreamServer(("0.0.0.0", 9090))
print("\nStarted Stable Diffusion dream server!")
print("Point your browser at http://localhost:9090 or use the host's DNS name or IP address.")
dream_server = ThreadingDreamServer((host, port))
print(">> Started Stable Diffusion dream server!")
if host == '0.0.0.0':
print(f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.")
else:
print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.")
print(f">> Point your browser at http://{host}:{port}.")
try:
dream_server.serve_forever()
@ -387,9 +390,7 @@ def create_argv_parser():
'--full_precision',
dest='full_precision',
action='store_true',
help='Use slower full precision math for calculations',
# MPS only functions with full precision, see https://github.com/lstein/stable-diffusion/issues/237
default=choose_torch_device() == 'mps',
help='Use more memory-intensive full precision math for calculations',
)
parser.add_argument(
'-g',
@ -457,6 +458,18 @@ def create_argv_parser():
action='store_true',
help='Start in web server mode.',
)
parser.add_argument(
'--host',
type=str,
default='127.0.0.1',
help='Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.'
)
parser.add_argument(
'--port',
type=int,
default='9090',
help='Web server: Port to listen on'
)
parser.add_argument(
'--weights',
default='model',
@ -598,7 +611,7 @@ def create_cmd_parser():
'--with_variations',
default=None,
type=str,
help='list of variations to apply, in the format `seed,weight;seed,weight;...'
help='list of variations to apply, in the format `seed:weight,seed:weight,...'
)
return parser