Merge branch 'main' into feat/use-custom-vaes

This commit is contained in:
Lincoln Stein
2023-03-23 10:32:56 -04:00
55 changed files with 854 additions and 724 deletions

View File

@ -490,7 +490,7 @@ class Args(object):
"-z",
type=int,
default=6,
choices=range(0, 9),
choices=range(0, 10),
dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.",
)
@ -943,7 +943,6 @@ class Args(object):
"--png_compression",
"-z",
type=int,
default=6,
choices=range(0, 10),
dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). [6]",

View File

@ -497,7 +497,8 @@ class Generator:
matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask)
return matched_result
def sample_to_lowres_estimated_image(self, samples):
@staticmethod
def sample_to_lowres_estimated_image(samples):
# origingally adapted from code by @erucipe and @keturn here:
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7

View File

@ -159,6 +159,7 @@ class Inpaint(Img2Img):
seam_size: int,
seam_blur: int,
prompt,
seed,
sampler,
steps,
cfg_scale,
@ -192,7 +193,7 @@ class Inpaint(Img2Img):
seam_noise = self.get_noise(im.width, im.height)
result = make_image(seam_noise)
result = make_image(seam_noise, seed)
return result
@ -342,6 +343,7 @@ class Inpaint(Img2Img):
seam_size,
seam_blur,
prompt,
seed,
sampler,
seam_steps,
cfg_scale,

View File

@ -1086,9 +1086,10 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
dlogging.set_verbosity_error()
checkpoint = (
load_file(checkpoint_path)
if Path(checkpoint_path).suffix == ".safetensors"
else torch.load(checkpoint_path)
torch.load(checkpoint_path)
if Path(checkpoint_path).suffix == ".ckpt"
else load_file(checkpoint_path)
)
cache_dir = global_cache_dir("hub")
pipeline_class = (

View File

@ -730,9 +730,9 @@ v Apply picklescanner to the indicated checkpoint and issue a warning
# another round of heuristics to guess the correct config file.
checkpoint = (
safetensors.torch.load_file(model_path)
if model_path.suffix == ".safetensors"
else torch.load(model_path)
torch.load(model_path)
if model_path.suffix == ".ckpt"
else safetensors.torch.load_file(model_path)
)
# additional probing needed if no config file provided

View File

@ -3,6 +3,9 @@ import math
import multiprocessing as mp
import os
import re
import io
import base64
from collections import abc
from inspect import isfunction
from pathlib import Path
@ -364,3 +367,16 @@ def url_attachment_name(url: str) -> dict:
def download_with_progress_bar(url: str, dest: Path) -> bool:
result = download_with_resume(url, dest, access_token=None)
return result is not None
def image_to_dataURL(image: Image.Image, image_format: str = "PNG") -> str:
"""
Converts an image into a base64 image dataURL.
"""
buffered = io.BytesIO()
image.save(buffered, format=image_format)
mime_type = Image.MIME.get(image_format.upper(), "image/" + image_format.lower())
image_base64 = f"data:{mime_type};base64," + base64.b64encode(
buffered.getvalue()
).decode("UTF-8")
return image_base64