mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Apply black
This commit is contained in:
@ -8,9 +8,7 @@ from .txt2mask import Txt2Mask
|
||||
from .util import InitImageResizer, make_grid
|
||||
|
||||
|
||||
def debug_image(
|
||||
debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False
|
||||
):
|
||||
def debug_image(debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False):
|
||||
if not debug_status:
|
||||
return
|
||||
|
||||
|
@ -9,26 +9,26 @@ from PIL import Image
|
||||
from imwatermark import WatermarkEncoder
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
import invokeai.backend.util.logging as logger
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
|
||||
class InvisibleWatermark:
|
||||
"""
|
||||
Wrapper around InvisibleWatermark module.
|
||||
"""
|
||||
|
||||
|
||||
@classmethod
|
||||
def invisible_watermark_available(self) -> bool:
|
||||
return config.invisible_watermark
|
||||
|
||||
@classmethod
|
||||
def add_watermark(self, image: Image, watermark_text:str) -> Image:
|
||||
def add_watermark(self, image: Image, watermark_text: str) -> Image:
|
||||
if not self.invisible_watermark_available():
|
||||
return image
|
||||
logger.debug(f'Applying invisible watermark "{watermark_text}"')
|
||||
bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
|
||||
encoder = WatermarkEncoder()
|
||||
encoder.set_watermark('bytes', watermark_text.encode('utf-8'))
|
||||
bgr_encoded = encoder.encode(bgr, 'dwtDct')
|
||||
return Image.fromarray(
|
||||
cv2.cvtColor(bgr_encoded, cv2.COLOR_BGR2RGB)
|
||||
).convert("RGBA")
|
||||
encoder.set_watermark("bytes", watermark_text.encode("utf-8"))
|
||||
bgr_encoded = encoder.encode(bgr, "dwtDct")
|
||||
return Image.fromarray(cv2.cvtColor(bgr_encoded, cv2.COLOR_BGR2RGB)).convert("RGBA")
|
||||
|
@ -7,8 +7,10 @@ be suppressed or deferred
|
||||
import numpy as np
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
|
||||
class PatchMatch:
|
||||
"""
|
||||
Thin class wrapper around the patchmatch function.
|
||||
|
@ -34,9 +34,7 @@ class PngWriter:
|
||||
|
||||
# saves image named _image_ to outdir/name, writing metadata from prompt
|
||||
# returns full path of output
|
||||
def save_image_and_prompt_to_png(
|
||||
self, image, dream_prompt, name, metadata=None, compress_level=6
|
||||
):
|
||||
def save_image_and_prompt_to_png(self, image, dream_prompt, name, metadata=None, compress_level=6):
|
||||
path = os.path.join(self.outdir, name)
|
||||
info = PngImagePlugin.PngInfo()
|
||||
info.add_text("Dream", dream_prompt)
|
||||
@ -114,8 +112,6 @@ class PromptFormatter:
|
||||
if opt.variation_amount > 0:
|
||||
switches.append(f"-v{opt.variation_amount}")
|
||||
if opt.with_variations:
|
||||
formatted_variations = ",".join(
|
||||
f"{seed}:{weight}" for seed, weight in opt.with_variations
|
||||
)
|
||||
formatted_variations = ",".join(f"{seed}:{weight}" for seed, weight in opt.with_variations)
|
||||
switches.append(f"-V{formatted_variations}")
|
||||
return " ".join(switches)
|
||||
|
@ -9,14 +9,17 @@ from invokeai.backend import SilenceWarnings
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
import invokeai.backend.util.logging as logger
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
CHECKER_PATH = 'core/convert/stable-diffusion-safety-checker'
|
||||
CHECKER_PATH = "core/convert/stable-diffusion-safety-checker"
|
||||
|
||||
|
||||
class SafetyChecker:
|
||||
"""
|
||||
Wrapper around SafetyChecker model.
|
||||
"""
|
||||
|
||||
safety_checker = None
|
||||
feature_extractor = None
|
||||
tried_load: bool = False
|
||||
@ -25,21 +28,19 @@ class SafetyChecker:
|
||||
def _load_safety_checker(self):
|
||||
if self.tried_load:
|
||||
return
|
||||
|
||||
|
||||
if config.nsfw_checker:
|
||||
try:
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from transformers import AutoFeatureExtractor
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
||||
config.models_path / CHECKER_PATH
|
||||
)
|
||||
self.feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
config.models_path / CHECKER_PATH)
|
||||
logger.info('NSFW checker initialized')
|
||||
|
||||
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(config.models_path / CHECKER_PATH)
|
||||
self.feature_extractor = AutoFeatureExtractor.from_pretrained(config.models_path / CHECKER_PATH)
|
||||
logger.info("NSFW checker initialized")
|
||||
except Exception as e:
|
||||
logger.warning(f'Could not load NSFW checker: {str(e)}')
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
else:
|
||||
logger.info('NSFW checker loading disabled')
|
||||
logger.info("NSFW checker loading disabled")
|
||||
self.tried_load = True
|
||||
|
||||
@classmethod
|
||||
@ -51,7 +52,7 @@ class SafetyChecker:
|
||||
def has_nsfw_concept(self, image: Image) -> bool:
|
||||
if not self.safety_checker_available():
|
||||
return False
|
||||
|
||||
|
||||
device = choose_torch_device()
|
||||
features = self.feature_extractor([image], return_tensors="pt")
|
||||
features.to(device)
|
||||
|
@ -5,12 +5,8 @@ def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(
|
||||
input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"]
|
||||
)
|
||||
working = nn.functional.pad(
|
||||
working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"]
|
||||
)
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
@ -32,18 +28,14 @@ def configure_model_padding(model, seamless, seamless_axes):
|
||||
if seamless:
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = (
|
||||
"circular" if ("x" in seamless_axes) else "constant"
|
||||
)
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = (
|
||||
"circular" if ("y" in seamless_axes) else "constant"
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
|
@ -39,23 +39,18 @@ CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||
CLIPSEG_SIZE = 352
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
def __init__(self, image: Image, heatmap: torch.Tensor):
|
||||
self.heatmap = heatmap
|
||||
self.image = image
|
||||
|
||||
def to_grayscale(self, invert: bool = False) -> Image:
|
||||
return self._rescale(
|
||||
Image.fromarray(
|
||||
np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255)
|
||||
)
|
||||
)
|
||||
return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255)))
|
||||
|
||||
def to_mask(self, threshold: float = 0.5) -> Image:
|
||||
discrete_heatmap = self.heatmap.lt(threshold).int()
|
||||
return self._rescale(
|
||||
Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L")
|
||||
)
|
||||
return self._rescale(Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L"))
|
||||
|
||||
def to_transparent(self, invert: bool = False) -> Image:
|
||||
transparent_image = self.image.copy()
|
||||
@ -67,11 +62,7 @@ class SegmentedGrayscale(object):
|
||||
|
||||
# unscales and uncrops the 352x352 heatmap so that it matches the image again
|
||||
def _rescale(self, heatmap: Image) -> Image:
|
||||
size = (
|
||||
self.image.width
|
||||
if (self.image.width > self.image.height)
|
||||
else self.image.height
|
||||
)
|
||||
size = self.image.width if (self.image.width > self.image.height) else self.image.height
|
||||
resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS)
|
||||
return resized_image.crop((0, 0, self.image.width, self.image.height))
|
||||
|
||||
@ -87,12 +78,8 @@ class Txt2Mask(object):
|
||||
|
||||
# BUG: we are not doing anything with the device option at this time
|
||||
self.device = device
|
||||
self.processor = AutoProcessor.from_pretrained(
|
||||
CLIPSEG_MODEL, cache_dir=config.cache_dir
|
||||
)
|
||||
self.model = CLIPSegForImageSegmentation.from_pretrained(
|
||||
CLIPSEG_MODEL, cache_dir=config.cache_dir
|
||||
)
|
||||
self.processor = AutoProcessor.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir)
|
||||
self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir)
|
||||
|
||||
@torch.no_grad()
|
||||
def segment(self, image, prompt: str) -> SegmentedGrayscale:
|
||||
@ -107,9 +94,7 @@ class Txt2Mask(object):
|
||||
image = ImageOps.exif_transpose(image)
|
||||
img = self._scale_and_crop(image)
|
||||
|
||||
inputs = self.processor(
|
||||
text=[prompt], images=[img], padding=True, return_tensors="pt"
|
||||
)
|
||||
inputs = self.processor(text=[prompt], images=[img], padding=True, return_tensors="pt")
|
||||
outputs = self.model(**inputs)
|
||||
heatmap = torch.sigmoid(outputs.logits)
|
||||
return SegmentedGrayscale(image, heatmap)
|
||||
|
Reference in New Issue
Block a user