mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
tweaks to documentation and call signature for advanced prompting
This commit is contained in:
parent
135c62f1a4
commit
16e7cbdb38
@ -110,10 +110,15 @@ See the section below on "Prompt Blending" for more information about how this w
|
||||
|
||||
### Cross-Attention Control ('prompt2prompt')
|
||||
|
||||
Generate an image with a given prompt and then paint over the image
|
||||
using the `prompt2prompt` syntax to substitute words in the original
|
||||
prompt for words in a new prompt. Based off [bloc97's
|
||||
colab](https://github.com/bloc97/CrossAttentionControl).
|
||||
Sometimes an image you generate is almost right, and you just want to
|
||||
change one detail without affecting the rest. You could use a photo editor and inpainting
|
||||
to overpaint the area, but that's a pain. Here's where `prompt2prompt`
|
||||
comes in handy.
|
||||
|
||||
Generate an image with a given prompt, record the seed of the image,
|
||||
and then use the `prompt2prompt` syntax to substitute words in the
|
||||
original prompt for words in a new prompt. This works for `img2img` as well.
|
||||
|
||||
|
||||
* `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
||||
* quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
||||
@ -125,6 +130,9 @@ colab](https://github.com/bloc97/CrossAttentionControl).
|
||||
* Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable Diffusion should have to change the shape of the subject being swapped.
|
||||
* `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
||||
|
||||
The `prompt2prompt` code is based off [bloc97's
|
||||
colab](https://github.com/bloc97/CrossAttentionControl).
|
||||
|
||||
Note that `prompt2prompt` is not currently working with the runwayML
|
||||
inpainting model, and may never work due to the way this model is set
|
||||
up. If you attempt to use `prompt2prompt` you will get the original
|
||||
|
@ -114,7 +114,7 @@ def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_n
|
||||
|
||||
conditioning = original_embeddings
|
||||
edited_conditioning = edited_embeddings
|
||||
print('got edit_opcodes', edit_opcodes, 'options', edit_options)
|
||||
print('>> got edit_opcodes', edit_opcodes, 'options', edit_options)
|
||||
cac_args = CrossAttentionControl.Arguments(
|
||||
edited_conditioning = edited_conditioning,
|
||||
edit_opcodes = edit_opcodes,
|
||||
|
@ -1,5 +1,5 @@
|
||||
from math import ceil
|
||||
from typing import Callable, Optional
|
||||
from typing import Callable, Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
@ -54,7 +54,7 @@ class InvokeAIDiffuserComponent:
|
||||
CrossAttentionControl.remove_cross_attention_control(self.model)
|
||||
|
||||
def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor,
|
||||
unconditioning: torch.Tensor, conditioning: torch.Tensor,
|
||||
unconditioning: Union[torch.Tensor,dict], conditioning: Union[torch.Tensor,dict],
|
||||
unconditional_guidance_scale: float,
|
||||
step_index: int=None
|
||||
):
|
||||
|
Loading…
Reference in New Issue
Block a user