mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
cleanup: Some basic cleanup
This commit is contained in:
parent
07381e5a26
commit
65a76a086b
@ -1 +1 @@
|
||||
from .ip_adapter import IPAdapter, IPAdapterXL, IPAdapterPlus
|
||||
from .ip_adapter import IPAdapter, IPAdapterPlus, IPAdapterXL
|
||||
|
@ -1,13 +1,11 @@
|
||||
# copied from https://github.com/tencent-ailab/IP-Adapter (Apache License 2.0)
|
||||
# and modified as needed
|
||||
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
|
||||
from PIL import Image
|
||||
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
||||
|
||||
# FIXME: Getting errors when trying to use PyTorch 2.0 versions of IPAttnProcessor and AttnProcessor
|
||||
# so for now falling back to the default versions
|
||||
@ -16,7 +14,7 @@ from PIL import Image
|
||||
# from .attention_processor import IPAttnProcessor2_0 as IPAttnProcessor, AttnProcessor2_0 as AttnProcessor
|
||||
# else:
|
||||
# from .attention_processor import IPAttnProcessor, AttnProcessor
|
||||
from .attention_processor import IPAttnProcessor, AttnProcessor
|
||||
from .attention_processor import AttnProcessor, IPAttnProcessor
|
||||
from .resampler import Resampler
|
||||
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
# copied from https://github.com/tencent-ailab/IP-Adapter (Apache License 2.0)
|
||||
|
||||
# tencent ailab comment: modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
|
||||
# tencent ailab comment: modified from
|
||||
# https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
|
||||
import math
|
||||
|
||||
import torch
|
||||
|
@ -1,18 +1,16 @@
|
||||
# copied from https://github.com/tencent-ailab/IP-Adapter (Apache License 2.0)
|
||||
# and modified as needed
|
||||
|
||||
import inspect
|
||||
import warnings
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from diffusers.utils import is_compiled_module
|
||||
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
||||
from diffusers.models import ControlNetModel
|
||||
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.utils import is_compiled_module
|
||||
|
||||
|
||||
def is_torch2_available():
|
||||
@ -59,7 +57,8 @@ def generate(
|
||||
prompt (`str` or `List[str]`, *optional*):
|
||||
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
||||
instead.
|
||||
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
||||
image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,
|
||||
`List[np.ndarray]`,:
|
||||
`List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
||||
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
||||
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
|
||||
|
Loading…
Reference in New Issue
Block a user