Merge branch 'development' into development

This commit is contained in:
Peter Baylies 2022-09-15 09:59:11 -04:00 committed by GitHub
commit 357e1ad35f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 52 additions and 29 deletions

View File

@ -74,7 +74,7 @@ We combine the two variations using `-V` (--with_variations). Again, we must pro
this to work.
```
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1
dream> "prompt" -S3357757885 -V3647897225:0.1,1614299449:0.1
Outputs:
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885
```
@ -86,7 +86,7 @@ Here we are providing equal weights (0.1 and 0.1) for both the subseeds. The res
We could either try combining the images with different weights, or we can generate more variations around the almost-but-not-quite image. We do the latter, using both the `-V` (combining) and `-v` (variation strength) options. Note that we use `-n6` to generate 6 variations:
```
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1 -v0.05 -n6
dream> "prompt" -S3357757885 -V3647897225:0.1,1614299449:0.1 -v0.05 -n6
Outputs:
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2853129515:0.05 -S3357757885

View File

@ -22,6 +22,12 @@ def build_opt(post_data, seed, gfpgan_model_exists):
setattr(opt, 'invert_mask', 'invert_mask' in post_data)
setattr(opt, 'cfg_scale', float(post_data['cfg_scale']))
setattr(opt, 'sampler_name', post_data['sampler_name'])
# embiggen not practical at this point because we have no way of feeding images back into img2img
# however, this code is here against that eventuality
setattr(opt, 'embiggen', None)
setattr(opt, 'embiggen_tiles', None)
setattr(opt, 'gfpgan_strength', float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0)
setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None)
setattr(opt, 'progress_images', 'progress_images' in post_data)
@ -109,7 +115,7 @@ class DreamServer(BaseHTTPRequestHandler):
out_dir = os.path.realpath(self.outdir.rstrip('/'))
if self.path.startswith('/static/dream_web/'):
path = '.' + self.path
elif out_dir.endswith(path_dir):
elif out_dir.replace('\\', '/').endswith(path_dir):
file = os.path.basename(self.path)
path = os.path.join(self.outdir,file)
else:
@ -157,6 +163,7 @@ class DreamServer(BaseHTTPRequestHandler):
def image_done(image, seed, upscaled=False):
name = f'{prefix}.{seed}.png'
iter_opt = argparse.Namespace(**vars(opt)) # copy
print(f'iter_opt = {iter_opt}')
if opt.variation_amount > 0:
this_variation = [[seed, opt.variation_amount]]
if opt.with_variations is None:

25
main.py
View File

@ -40,7 +40,8 @@ def load_model_from_config(config, ckpt, verbose=False):
print('unexpected keys:')
print(u)
model.cuda()
if torch.cuda.is_available():
model.cuda()
return model
@ -549,23 +550,26 @@ class CUDACallback(Callback):
# see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py
def on_train_epoch_start(self, trainer, pl_module):
# Reset the memory use counter
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module, outputs):
torch.cuda.synchronize(trainer.root_gpu)
max_memory = (
torch.cuda.max_memory_allocated(trainer.root_gpu) / 2**20
)
if torch.cuda.is_available():
torch.cuda.synchronize(trainer.root_gpu)
epoch_time = time.time() - self.start_time
try:
max_memory = trainer.training_type_plugin.reduce(max_memory)
epoch_time = trainer.training_type_plugin.reduce(epoch_time)
rank_zero_info(f'Average Epoch time: {epoch_time:.2f} seconds')
rank_zero_info(f'Average Peak memory {max_memory:.2f}MiB')
if torch.cuda.is_available():
max_memory = (
torch.cuda.max_memory_allocated(trainer.root_gpu) / 2**20
)
max_memory = trainer.training_type_plugin.reduce(max_memory)
rank_zero_info(f'Average Peak memory {max_memory:.2f}MiB')
except AttributeError:
pass
@ -872,7 +876,6 @@ if __name__ == '__main__':
config.data.params.validation.params.data_root = opt.data_root
data = instantiate_from_config(config.data)
data = instantiate_from_config(config.data)
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
# calling these ourselves should not be necessary but it is.
# lightning still takes care of proper multiprocessing though

View File

@ -620,6 +620,7 @@ def create_cmd_parser():
)
parser.add_argument(
'-embiggen',
'--embiggen',
nargs='+',
default=None,
type=float,
@ -627,6 +628,7 @@ def create_cmd_parser():
)
parser.add_argument(
'-embiggen_tiles',
'--embiggen_tiles',
nargs='+',
default=None,
type=int,

View File

@ -91,6 +91,7 @@ header h1 {
}
#fieldset-config {
line-height:2em;
background-color: #F0F0F0;
}
input[type="number"] {
width: 60px;
@ -122,6 +123,9 @@ label {
cursor: pointer;
color: red;
}
#basic-parameters {
background-color: #EEEEEE;
}
#txt2img {
background-color: #DCDCDC;
}
@ -129,15 +133,19 @@ label {
background-color: #EEEEEE;
}
#img2img {
background-color: #F5F5F5;
background-color: #DCDCDC;
}
#gfpgan {
background-color: #DCDCDC;
background-color: #EEEEEE;
}
#progress-section {
background-color: #F5F5F5;
}
.section-header {
text-align: left;
font-weight: bold;
padding: 0 0 0 0;
}
#no-results-message:not(:only-child) {
display: none;
}

View File

@ -25,6 +25,7 @@
</div>
</fieldset>
<fieldset id="fieldset-config">
<div class="section-header">Basic options</div>
<label for="iterations">Images to generate:</label>
<input value="1" type="number" id="iterations" name="iterations" size="4">
<label for="steps">Steps:</label>
@ -39,11 +40,11 @@
<option value="k_dpm_2">KDPM_2</option>
<option value="k_dpm_2_a">KDPM_2A</option>
<option value="k_euler">KEULER</option>
<option value="k_euler_a">KEULER_A</option>
<option value="k_euler_a">KEULER_A</option>
<option value="k_heun">KHEUN</option>
</select>
<input type="checkbox" name="seamless" id="seamless">
<label for="seamless">Seamless circular tiling</label>
<label for="seamless">Seamless circular tiling</label>
<br>
<label title="Set to multiple of 64" for="width">Width:</label>
<select id="width" name="width" value="512">
@ -79,26 +80,28 @@
<input value="0" type="number" id="perlin" name="perlin" step="0.01" min="0" max="1">
<button type="button" id="reset-all">Reset to Defaults</button>
</div>
<div id="variations">
<span id="variations">
<label title="If > 0, generates variations on the initial seed instead of random seeds per iteration. Must be between 0 and 1. Higher values will be more different." for="variation_amount">Variation amount (0 to disable):</label>
<input value="0" type="number" id="variation_amount" name="variation_amount" step="0.01" min="0" max="1">
<label title="list of variations to apply, in the format `seed:weight,seed:weight,..." for="with_variations">With variations (seed:weight,seed:weight,...):</label>
<input value="" type="text" id="with_variations" name="with_variations">
</div>
<div id="img2img">
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
<button type="button" id="remove-image">Remove Image</button>
</span>
</fieldset>
<fieldset id="img2img">
<div class="section-header">Image-to-image options</div>
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
<button type="button" id="remove-image">Remove Image</button>
<br>
<label for="strength">Img2Img Strength:</label>
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
<input type="checkbox" id="fit" name="fit" checked>
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height:</label>
</div>
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height</label>
</fieldset>
<fieldset id="gfpgan">
<div class="section-header">Post-processing options</div>
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
<input value="0.8" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.05">
<input value="0.0" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.1">
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
<select id="upscale_level" name="upscale_level" value="">
<option value="" selected>None</option>
@ -119,7 +122,7 @@
<div id="scaling-inprocess-message">
<i><span>Postprocessing...</span><span id="processing_cnt">1/3</span></i>
</div>
</div>
</span>
</section>
<div id="results">