Merge branch 'main' into patch-1

This commit is contained in:
Lincoln Stein 2023-04-18 19:14:28 -04:00 committed by GitHub
commit fd80e84ea6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 22 additions and 82 deletions

View File

@ -148,6 +148,11 @@ not supported.
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2 pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
``` ```
_For non-GPU systems:_
```terminal
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
_For Macintoshes, either Intel or M1/M2:_ _For Macintoshes, either Intel or M1/M2:_
```sh ```sh

View File

@ -171,7 +171,7 @@ class TextToLatentsInvocation(BaseInvocation):
# TODO: pass this an emitter method or something? or a session for dispatching? # TODO: pass this an emitter method or something? or a session for dispatching?
def dispatch_progress( def dispatch_progress(
self, context: InvocationContext, intermediate_state: PipelineIntermediateState self, context: InvocationContext, intermediate_state: PipelineIntermediateState
) -> None: ) -> None:
if (context.services.queue.is_canceled(context.graph_execution_state_id)): if (context.services.queue.is_canceled(context.graph_execution_state_id)):
raise CanceledException raise CanceledException
@ -185,7 +185,7 @@ class TextToLatentsInvocation(BaseInvocation):
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context) diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline: def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline:
model_info = choose_model(model_manager, self.model) model_info = choose_model(model_manager, self.model)
model_name = model_info['model_name'] model_name = model_info['model_name']
@ -195,7 +195,7 @@ class TextToLatentsInvocation(BaseInvocation):
model=model, model=model,
scheduler_name=self.scheduler scheduler_name=self.scheduler
) )
if isinstance(model, DiffusionPipeline): if isinstance(model, DiffusionPipeline):
for component in [model.unet, model.vae]: for component in [model.unet, model.vae]:
configure_model_padding(component, configure_model_padding(component,
@ -292,57 +292,7 @@ class LatentsToLatentsInvocation(TextToLatentsInvocation):
initial_latents = latent if self.strength < 1.0 else torch.zeros_like( initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
latent, device=model.device, dtype=latent.dtype latent, device=model.device, dtype=latent.dtype
) )
timesteps, _ = model.get_img2img_timesteps(
self.steps,
self.strength,
device=model.device,
)
result_latents, result_attention_map_saver = model.latents_from_embeddings(
latents=initial_latents,
timesteps=timesteps,
noise=noise,
num_inference_steps=self.steps,
conditioning_data=conditioning_data,
callback=step_callback
)
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
torch.cuda.empty_cache()
name = f'{context.graph_execution_state_id}__{self.id}'
context.services.latents.set(name, result_latents)
return LatentsOutput(
latents=LatentsField(latents_name=name)
)
class LatentsToLatentsInvocation(TextToLatentsInvocation):
"""Generates latents using latents as base image."""
type: Literal["l2l"] = "l2l"
# Inputs
latents: Optional[LatentsField] = Field(description="The latents to use as a base image")
strength: float = Field(default=0.5, description="The strength of the latents to use")
def invoke(self, context: InvocationContext) -> LatentsOutput:
noise = context.services.latents.get(self.noise.latents_name)
latent = context.services.latents.get(self.latents.latents_name)
def step_callback(state: PipelineIntermediateState):
self.dispatch_progress(context, state)
model = self.get_model(context.services.model_manager)
conditioning_data = self.get_conditioning_data(model)
# TODO: Verify the noise is the right size
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
latent, device=model.device, dtype=latent.dtype
)
timesteps, _ = model.get_img2img_timesteps( timesteps, _ = model.get_img2img_timesteps(
self.steps, self.steps,
self.strength, self.strength,

View File

@ -2,7 +2,6 @@
import copy import copy
import itertools import itertools
import traceback
import uuid import uuid
from types import NoneType from types import NoneType
from typing import ( from typing import (
@ -26,7 +25,6 @@ from ..invocations.baseinvocation import (
BaseInvocationOutput, BaseInvocationOutput,
InvocationContext, InvocationContext,
) )
from .invocation_services import InvocationServices
class EdgeConnection(BaseModel): class EdgeConnection(BaseModel):
@ -215,7 +213,7 @@ InvocationOutputsUnion = Union[BaseInvocationOutput.get_all_subclasses_tuple()]
class Graph(BaseModel): class Graph(BaseModel):
id: str = Field(description="The id of this graph", default_factory=uuid.uuid4) id: str = Field(description="The id of this graph", default_factory=lambda: uuid.uuid4().__str__())
# TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me # TODO: use a list (and never use dict in a BaseModel) because pydantic/fastapi hates me
nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field( nodes: dict[str, Annotated[InvocationsUnion, Field(discriminator="type")]] = Field(
description="The nodes in this graph", default_factory=dict description="The nodes in this graph", default_factory=dict
@ -750,9 +748,7 @@ class Graph(BaseModel):
class GraphExecutionState(BaseModel): class GraphExecutionState(BaseModel):
"""Tracks the state of a graph execution""" """Tracks the state of a graph execution"""
id: str = Field( id: str = Field(description="The id of the execution state", default_factory=lambda: uuid.uuid4().__str__())
description="The id of the execution state", default_factory=uuid.uuid4
)
# TODO: Store a reference to the graph instead of the actual graph? # TODO: Store a reference to the graph instead of the actual graph?
graph: Graph = Field(description="The graph being executed") graph: Graph = Field(description="The graph being executed")
@ -1171,7 +1167,7 @@ class LibraryGraph(BaseModel):
if len(v) != len(set(i.alias for i in v)): if len(v) != len(set(i.alias for i in v)):
raise ValueError("Duplicate exposed alias") raise ValueError("Duplicate exposed alias")
return v return v
@root_validator @root_validator
def validate_exposed_nodes(cls, values): def validate_exposed_nodes(cls, values):
graph = values['graph'] graph = values['graph']

View File

@ -1,30 +1,17 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
import time
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from queue import Queue from queue import Queue
import time
from pydantic import BaseModel, Field
# TODO: make this serializable class InvocationQueueItem(BaseModel):
class InvocationQueueItem: graph_execution_state_id: str = Field(description="The ID of the graph execution state")
# session_id: str invocation_id: str = Field(description="The ID of the node being invoked")
graph_execution_state_id: str invoke_all: bool = Field(default=False)
invocation_id: str timestamp: float = Field(default_factory=time.time)
invoke_all: bool
timestamp: float
def __init__(
self,
# session_id: str,
graph_execution_state_id: str,
invocation_id: str,
invoke_all: bool = False,
):
# self.session_id = session_id
self.graph_execution_state_id = graph_execution_state_id
self.invocation_id = invocation_id
self.invoke_all = invoke_all
self.timestamp = time.time()
class InvocationQueueABC(ABC): class InvocationQueueABC(ABC):

View File

@ -57,7 +57,7 @@ class HuggingFaceConceptsLibrary(object):
self.concept_list.extend(list(local_concepts_to_add)) self.concept_list.extend(list(local_concepts_to_add))
return self.concept_list return self.concept_list
return self.concept_list return self.concept_list
else: elif Globals.internet_available is True:
try: try:
models = self.hf_api.list_models( models = self.hf_api.list_models(
filter=ModelFilter(model_name="sd-concepts-library/") filter=ModelFilter(model_name="sd-concepts-library/")
@ -73,6 +73,8 @@ class HuggingFaceConceptsLibrary(object):
" ** You may load .bin and .pt file(s) manually using the --embedding_directory argument." " ** You may load .bin and .pt file(s) manually using the --embedding_directory argument."
) )
return self.concept_list return self.concept_list
else:
return self.concept_list
def get_concept_model_path(self, concept_name: str) -> str: def get_concept_model_path(self, concept_name: str) -> str:
""" """