Merge remote-tracking branch 'upstream/development' into mkdocs-updates

This commit is contained in:
mauwii 2022-09-19 08:55:13 +02:00
commit 5ab7c68cc7
No known key found for this signature in database
GPG Key ID: D923DB04ADB3F5AB
12 changed files with 79 additions and 22 deletions

View File

@ -4,6 +4,11 @@
![project logo](docs/assets/logo.png)
<p align='center'>
<a href="https://discord.gg/ZmtBAhwWhy"><img src="docs/assets/join-us-on-discord-image.png"/></a>
</p>
# **Stable Diffusion Dream Script**
[![discord badge]][discord link]
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

BIN
docs/assets/step1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 503 KiB

BIN
docs/assets/step2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

BIN
docs/assets/step4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 KiB

BIN
docs/assets/step5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.6 KiB

BIN
docs/assets/step6.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 395 KiB

BIN
docs/assets/step7.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1014 KiB

View File

@ -39,5 +39,44 @@ We are hoping to get rid of the need for this workaround in an upcoming release.
5. Open the Layers toolbar (++ctrl+l++) and select "Floating Selection"
6. Set opacity to 0%
7. Export as PNG
8. In the export dialogue, Make sure the "Save colour values from
transparent pixels" checkbox is selected.
## Recipe for Adobe Photoshop
1. Open image in Photoshop
<p align='left'>
<img src="../assets/step1.png"/>
</p>
2. Use any of the selection tools (Marquee, Lasso, or Wand) to select the area you desire to inpaint.
<p align='left'>
<img src="../assets/step2.png"/>
</p>
3. Because we'll be applying a mask over the area we want to preserve, you should now select the inverse by using the Shift + Ctrl + I shortcut, or right clicking and using the "Select Inverse" option.
4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the underlying image, or your inpainting results will be dramatically impacted.
<p align='left'>
<img src="../assets/step4.png"/>
</p>
5. Make sure to hide any background layers that are present. You should see the mask applied to your image layer, and the image on your canvas should display the checkered background.
<p align='left'>
<img src="../assets/step5.png"/>
</p>
<p align='left'>
<img src="../assets/step6.png"/>
</p>
6. Save the image as a transparent PNG by using the "Save a Copy" option in the File menu, or using the Alt + Ctrl + S keyboard shortcut.
7. After following the inpainting instructions above (either through the CLI or the Web UI), marvel at your newfound ability to selectively dream. Lookin' good!
<p align='left'>
<img src="../assets/step7.png"/>
</p>
8. In the export dialogue, Make sure the "Save colour values from transparent pixels" checkbox is
selected.

View File

@ -602,6 +602,16 @@ def metadata_dumps(opt,
This is intended to be turned into JSON and stored in the
"sd
'''
# top-level metadata minus `image` or `images`
metadata = {
'model' : 'stable diffusion',
'model_id' : opt.model,
'model_hash' : model_hash,
'app_id' : APP_ID,
'app_version' : APP_VERSION,
}
# add some RFC266 fields that are generated internally, and not as
# user args
image_dict = opt.to_dict(
@ -647,22 +657,22 @@ def metadata_dumps(opt,
else:
rfc_dict['type'] = 'txt2img'
images = []
if len(seeds)==0 and opt.seed:
seeds=[seed]
for seed in seeds:
rfc_dict['seed'] = seed
images.append(copy.copy(rfc_dict))
return {
'model' : 'stable diffusion',
'model_id' : opt.model,
'model_hash' : model_hash,
'app_id' : APP_ID,
'app_version' : APP_VERSION,
'images' : images,
}
if opt.grid:
images = []
for seed in seeds:
rfc_dict['seed'] = seed
images.append(copy.copy(rfc_dict))
metadata['images'] = images
else:
# there should only ever be a single seed if we did not generate a grid
assert len(seeds) == 1, 'Expected a single seed'
rfc_dict['seed'] = seeds[0]
metadata['image'] = rfc_dict
return metadata
def metadata_loads(metadata):
'''
@ -671,7 +681,10 @@ def metadata_loads(metadata):
'''
results = []
try:
images = metadata['sd-metadata']['images']
if 'grid' in metadata['sd-metadata']:
images = metadata['sd-metadata']['images']
else:
images = [metadata['sd-metadata']['image']]
for image in images:
# repack the prompt and variations
image['prompt'] = ','.join([':'.join([x['prompt'], str(x['weight'])]) for x in image['prompt']])

View File

@ -38,14 +38,14 @@ def get_uc_and_c(prompt, model, log_tokens=False, skip_normalize=False):
c = torch.zeros_like(uc)
# normalize each "sub prompt" and add it
for subprompt, weight in weighted_subprompts:
log_tokenization(subprompt, model, log_tokens)
log_tokenization(subprompt, model, log_tokens, weight)
c = torch.add(
c,
model.get_learned_conditioning([subprompt]),
alpha=weight,
)
else: # just standard 1 prompt
log_tokenization(prompt, model, log_tokens)
log_tokenization(prompt, model, log_tokens, 1)
c = model.get_learned_conditioning([prompt])
uc = model.get_learned_conditioning([unconditioned_words])
return (uc, c)
@ -86,7 +86,7 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
# shows how the prompt is tokenized
# usually tokens have '</w>' to indicate end-of-word,
# but for readability it has been replaced with ' '
def log_tokenization(text, model, log=False):
def log_tokenization(text, model, log=False, weight=1):
if not log:
return
tokens = model.cond_stage_model.tokenizer._tokenize(text)
@ -103,8 +103,8 @@ def log_tokenization(text, model, log=False):
usedTokens += 1
else: # over max token length
discarded = discarded + f"\x1b[0;3{s};40m{token}"
print(f"\n>> Tokens ({usedTokens}):\n{tokenized}\x1b[0m")
if discarded != "":
print(
f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
)
print(f"\n>> Tokens ({usedTokens}), Weight ({weight:.2f}):\n{tokenized}\x1b[0m")
if discarded != "":
print(
f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
)