From fb30b7d17a06602a326e80c1b6bf4b8db263c440 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Mar 2023 23:46:35 +1100 Subject: [PATCH 1/4] feat(backend): add image_to_dataURL util --- invokeai/backend/util/util.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 3fab6e18b0..d5239af834 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -3,6 +3,9 @@ import math import multiprocessing as mp import os import re +import io +import base64 + from collections import abc from inspect import isfunction from pathlib import Path @@ -364,3 +367,16 @@ def url_attachment_name(url: str) -> dict: def download_with_progress_bar(url: str, dest: Path) -> bool: result = download_with_resume(url, dest, access_token=None) return result is not None + + +def image_to_dataURL(image: Image.Image, image_format: str = "PNG") -> str: + """ + Converts an image into a base64 image dataURL. + """ + buffered = io.BytesIO() + image.save(buffered, format=image_format) + mime_type = Image.MIME.get(image_format.upper(), "image/" + image_format.lower()) + image_base64 = f"data:{mime_type};base64," + base64.b64encode( + buffered.getvalue() + ).decode("UTF-8") + return image_base64 From b194180f768ec49f9aac08d50dcb845cdf40a16b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Mar 2023 23:47:18 +1100 Subject: [PATCH 2/4] feat(backend): make fast latents method static --- invokeai/backend/generator/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/generator/base.py b/invokeai/backend/generator/base.py index db1a387002..125f208b0a 100644 --- a/invokeai/backend/generator/base.py +++ b/invokeai/backend/generator/base.py @@ -497,7 +497,8 @@ class Generator: matched_result.paste(init_image, (0, 0), mask=multiplied_blurred_init_mask) return matched_result - def sample_to_lowres_estimated_image(self, samples): + @staticmethod + def sample_to_lowres_estimated_image(samples): # origingally adapted from code by @erucipe and @keturn here: # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7 From 5347c12fed1ab55233f1e2452988ed13054ea11c Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Mar 2023 23:48:30 +1100 Subject: [PATCH 3/4] fix(nodes): fix schema gen for GraphExecutionState --- invokeai/app/services/graph.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/invokeai/app/services/graph.py b/invokeai/app/services/graph.py index 8134b47167..0d4102c416 100644 --- a/invokeai/app/services/graph.py +++ b/invokeai/app/services/graph.py @@ -773,6 +773,24 @@ class GraphExecutionState(BaseModel): default_factory=dict, ) + # Declare all fields as required; necessary for OpenAPI schema generation build. + # Technically only fields without a `default_factory` need to be listed here. + # See: https://github.com/pydantic/pydantic/discussions/4577 + class Config: + schema_extra = { + 'required': [ + 'id', + 'graph', + 'execution_graph', + 'executed', + 'executed_history', + 'results', + 'errors', + 'prepared_source_mapping', + 'source_prepared_mapping', + ] + } + def next(self) -> BaseInvocation | None: """Gets the next node ready to execute.""" From 67f8f222d913cf6770495854e43133d991e0c720 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 15 Mar 2023 23:50:26 +1100 Subject: [PATCH 4/4] fix(nodes): fix step_callback + fast latents generation this depends on the small change in #2957 --- invokeai/app/invocations/generate.py | 30 ++++++++++++++++++++++------ invokeai/app/services/events.py | 11 +++++++--- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 70892ecde9..b8140b11e9 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -4,6 +4,8 @@ from datetime import datetime, timezone from typing import Any, Literal, Optional, Union import numpy as np + +from torch import Tensor from PIL import Image from pydantic import Field from skimage.exposure.histogram_matching import match_histograms @@ -12,7 +14,9 @@ from ..services.image_storage import ImageType from ..services.invocation_services import InvocationServices from .baseinvocation import BaseInvocation, InvocationContext from .image import ImageField, ImageOutput -from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator +from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator, Generator +from ...backend.stable_diffusion import PipelineIntermediateState +from ...backend.util.util import image_to_dataURL SAMPLER_NAME_VALUES = Literal[ tuple(InvokeAIGenerator.schedulers()) @@ -41,18 +45,32 @@ class TextToImageInvocation(BaseInvocation): # TODO: pass this an emitter method or something? or a session for dispatching? def dispatch_progress( - self, context: InvocationContext, sample: Any = None, step: int = 0 - ) -> None: + self, context: InvocationContext, sample: Tensor, step: int + ) -> None: + # TODO: only output a preview image when requested + image = Generator.sample_to_lowres_estimated_image(sample) + + (width, height) = image.size + width *= 8 + height *= 8 + + dataURL = image_to_dataURL(image, image_format="JPEG") + context.services.events.emit_generator_progress( context.graph_execution_state_id, self.id, + { + "width": width, + "height": height, + "dataURL": dataURL + }, step, - float(step) / float(self.steps), + self.steps, ) def invoke(self, context: InvocationContext) -> ImageOutput: - def step_callback(sample, step=0): - self.dispatch_progress(context, sample, step) + def step_callback(state: PipelineIntermediateState): + self.dispatch_progress(context, state.latents, state.step) # Handle invalid model parameter # TODO: figure out if this can be done via a validator that uses the model_cache diff --git a/invokeai/app/services/events.py b/invokeai/app/services/events.py index e2ab4e61e3..c8eb7671d0 100644 --- a/invokeai/app/services/events.py +++ b/invokeai/app/services/events.py @@ -1,7 +1,10 @@ # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) -from typing import Any, Dict +from typing import Any, Dict, TypedDict +ProgressImage = TypedDict( + "ProgressImage", {"dataURL": str, "width": int, "height": int} +) class EventServiceBase: session_event: str = "session_event" @@ -23,8 +26,9 @@ class EventServiceBase: self, graph_execution_state_id: str, invocation_id: str, + progress_image: ProgressImage | None, step: int, - percent: float, + total_steps: int, ) -> None: """Emitted when there is generation progress""" self.__emit_session_event( @@ -32,8 +36,9 @@ class EventServiceBase: payload=dict( graph_execution_state_id=graph_execution_state_id, invocation_id=invocation_id, + progress_image=progress_image, step=step, - percent=percent, + total_steps=total_steps, ), )