2023-03-09 05:18:29 +00:00
|
|
|
'''
|
|
|
|
SafetyChecker class - checks images against the StabilityAI NSFW filter
|
|
|
|
and blurs images that contain potential NSFW content.
|
|
|
|
'''
|
|
|
|
import diffusers
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
import traceback
|
|
|
|
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
|
|
|
StableDiffusionSafetyChecker,
|
|
|
|
)
|
|
|
|
from pathlib import Path
|
|
|
|
from PIL import Image, ImageFilter
|
|
|
|
from transformers import AutoFeatureExtractor
|
|
|
|
|
|
|
|
import invokeai.assets.web as web_assets
|
2023-04-29 13:43:40 +00:00
|
|
|
import invokeai.backend.util.logging as logger
|
2023-05-26 00:41:26 +00:00
|
|
|
from invokeai.app.services.config import InvokeAIAppConfig
|
2023-03-11 21:16:44 +00:00
|
|
|
from .util import CPU_DEVICE
|
2023-03-09 05:18:29 +00:00
|
|
|
|
2023-05-26 00:41:26 +00:00
|
|
|
config = InvokeAIAppConfig.get_config()
|
|
|
|
|
2023-03-09 05:18:29 +00:00
|
|
|
class SafetyChecker(object):
|
|
|
|
CAUTION_IMG = "caution.png"
|
|
|
|
|
|
|
|
def __init__(self, device: torch.device):
|
2023-03-11 21:16:44 +00:00
|
|
|
path = Path(web_assets.__path__[0]) / self.CAUTION_IMG
|
|
|
|
caution = Image.open(path)
|
|
|
|
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
|
2023-03-09 05:18:29 +00:00
|
|
|
self.device = device
|
2023-05-18 14:48:23 +00:00
|
|
|
|
2023-03-09 05:18:29 +00:00
|
|
|
try:
|
|
|
|
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
2023-05-04 03:36:51 +00:00
|
|
|
safety_model_path = config.cache_dir
|
2023-03-09 05:18:29 +00:00
|
|
|
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
|
|
|
|
safety_model_id,
|
|
|
|
local_files_only=True,
|
|
|
|
cache_dir=safety_model_path,
|
|
|
|
)
|
|
|
|
self.safety_feature_extractor = AutoFeatureExtractor.from_pretrained(
|
|
|
|
safety_model_id,
|
|
|
|
local_files_only=True,
|
|
|
|
cache_dir=safety_model_path,
|
|
|
|
)
|
|
|
|
except Exception:
|
2023-04-29 13:43:40 +00:00
|
|
|
logger.error(
|
2023-04-14 19:15:14 +00:00
|
|
|
"An error was encountered while installing the safety checker:"
|
2023-03-09 05:18:29 +00:00
|
|
|
)
|
|
|
|
print(traceback.format_exc())
|
|
|
|
|
|
|
|
def check(self, image: Image.Image):
|
|
|
|
"""
|
|
|
|
Check provided image against the StabilityAI safety checker and return
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2023-03-11 21:16:44 +00:00
|
|
|
self.safety_checker.to(self.device)
|
2023-03-09 05:18:29 +00:00
|
|
|
features = self.safety_feature_extractor([image], return_tensors="pt")
|
2023-03-11 21:16:44 +00:00
|
|
|
features.to(self.device)
|
|
|
|
|
2023-03-09 05:18:29 +00:00
|
|
|
# unfortunately checker requires the numpy version, so we have to convert back
|
|
|
|
x_image = np.array(image).astype(np.float32) / 255.0
|
|
|
|
x_image = x_image[None].transpose(0, 3, 1, 2)
|
|
|
|
|
|
|
|
diffusers.logging.set_verbosity_error()
|
|
|
|
checked_image, has_nsfw_concept = self.safety_checker(
|
|
|
|
images=x_image, clip_input=features.pixel_values
|
|
|
|
)
|
2023-03-11 21:16:44 +00:00
|
|
|
self.safety_checker.to(CPU_DEVICE) # offload
|
2023-03-09 05:18:29 +00:00
|
|
|
if has_nsfw_concept[0]:
|
2023-04-29 13:43:40 +00:00
|
|
|
logger.warning(
|
2023-04-14 19:15:14 +00:00
|
|
|
"An image with potential non-safe content has been detected. A blurred image will be returned."
|
2023-03-09 05:18:29 +00:00
|
|
|
)
|
|
|
|
return self.blur(image)
|
|
|
|
else:
|
|
|
|
return image
|
|
|
|
|
|
|
|
def blur(self, input):
|
|
|
|
blurry = input.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
|
|
|
try:
|
2023-03-11 21:16:44 +00:00
|
|
|
if caution := self.caution_img:
|
2023-03-09 05:18:29 +00:00
|
|
|
blurry.paste(caution, (0, 0), caution)
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
return blurry
|