mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
chore: ruff format
This commit is contained in:
@ -149,7 +149,7 @@ def dest_path(dest=None) -> Path:
|
|||||||
completer=path_completer,
|
completer=path_completer,
|
||||||
default=str(browse_start) + os.sep,
|
default=str(browse_start) + os.sep,
|
||||||
vi_mode=True,
|
vi_mode=True,
|
||||||
complete_while_typing=True
|
complete_while_typing=True,
|
||||||
# Test that this is not needed on Windows
|
# Test that this is not needed on Windows
|
||||||
# complete_style=CompleteStyle.READLINE_LIKE,
|
# complete_style=CompleteStyle.READLINE_LIKE,
|
||||||
)
|
)
|
||||||
|
@ -661,9 +661,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None
|
|||||||
|
|
||||||
field_kind = (
|
field_kind = (
|
||||||
# _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file
|
# _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file
|
||||||
field.json_schema_extra.get("_field_kind", None)
|
field.json_schema_extra.get("_field_kind", None) if field.json_schema_extra else None
|
||||||
if field.json_schema_extra
|
|
||||||
else None
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# must have a field_kind
|
# must have a field_kind
|
||||||
|
@ -90,7 +90,9 @@ class ImageRecordDeleteException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
IMAGE_DTO_COLS = ", ".join(
|
IMAGE_DTO_COLS = ", ".join(
|
||||||
["images." + c for c in [
|
[
|
||||||
|
"images." + c
|
||||||
|
for c in [
|
||||||
"image_name",
|
"image_name",
|
||||||
"image_origin",
|
"image_origin",
|
||||||
"image_category",
|
"image_category",
|
||||||
@ -103,7 +105,8 @@ IMAGE_DTO_COLS = ", ".join(
|
|||||||
"updated_at",
|
"updated_at",
|
||||||
"deleted_at",
|
"deleted_at",
|
||||||
"starred",
|
"starred",
|
||||||
]]
|
]
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -217,13 +217,16 @@ class ImageService(ImageServiceABC):
|
|||||||
board_id,
|
board_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
image_dtos = [image_record_to_dto(
|
image_dtos = [
|
||||||
|
image_record_to_dto(
|
||||||
image_record=r,
|
image_record=r,
|
||||||
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
|
image_url=self.__invoker.services.urls.get_image_url(r.image_name),
|
||||||
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
|
thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True),
|
||||||
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
|
board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name),
|
||||||
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
|
workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name),
|
||||||
) for r in results.items]
|
)
|
||||||
|
for r in results.items
|
||||||
|
]
|
||||||
|
|
||||||
return OffsetPaginatedResults[ImageDTO](
|
return OffsetPaginatedResults[ImageDTO](
|
||||||
items=image_dtos,
|
items=image_dtos,
|
||||||
|
@ -34,7 +34,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
|||||||
name="session_processor",
|
name="session_processor",
|
||||||
target=self.__process,
|
target=self.__process,
|
||||||
kwargs={
|
kwargs={
|
||||||
"stop_event": self.__stop_event, "poll_now_event": self.__poll_now_event, "resume_event": self.__resume_event
|
"stop_event": self.__stop_event,
|
||||||
|
"poll_now_event": self.__poll_now_event,
|
||||||
|
"resume_event": self.__resume_event,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.__thread.start()
|
self.__thread.start()
|
||||||
|
@ -1053,7 +1053,10 @@ class GraphExecutionState(BaseModel):
|
|||||||
# For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator
|
# For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator
|
||||||
# TODO: Handle a node mapping to none
|
# TODO: Handle a node mapping to none
|
||||||
eg = self.execution_graph.nx_graph_flat()
|
eg = self.execution_graph.nx_graph_flat()
|
||||||
prepared_parent_mappings = [[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore
|
prepared_parent_mappings = [
|
||||||
|
[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents]
|
||||||
|
for it in iterator_node_prepared_combinations
|
||||||
|
] # type: ignore
|
||||||
|
|
||||||
# Create execution node for each iteration
|
# Create execution node for each iteration
|
||||||
for iteration_mappings in prepared_parent_mappings:
|
for iteration_mappings in prepared_parent_mappings:
|
||||||
|
@ -130,7 +130,9 @@ class IPAttnProcessor2_0(torch.nn.Module):
|
|||||||
assert ip_adapter_image_prompt_embeds is not None
|
assert ip_adapter_image_prompt_embeds is not None
|
||||||
assert len(ip_adapter_image_prompt_embeds) == len(self._weights)
|
assert len(ip_adapter_image_prompt_embeds) == len(self._weights)
|
||||||
|
|
||||||
for ipa_embed, ipa_weights, scale in zip(ip_adapter_image_prompt_embeds, self._weights, self._scales, strict=True):
|
for ipa_embed, ipa_weights, scale in zip(
|
||||||
|
ip_adapter_image_prompt_embeds, self._weights, self._scales, strict=True
|
||||||
|
):
|
||||||
# The batch dimensions should match.
|
# The batch dimensions should match.
|
||||||
assert ipa_embed.shape[0] == encoder_hidden_states.shape[0]
|
assert ipa_embed.shape[0] == encoder_hidden_states.shape[0]
|
||||||
# The token_len dimensions should match.
|
# The token_len dimensions should match.
|
||||||
|
@ -66,11 +66,13 @@ class CacheStats(object):
|
|||||||
|
|
||||||
class ModelLocker(object):
|
class ModelLocker(object):
|
||||||
"Forward declaration"
|
"Forward declaration"
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ModelCache(object):
|
class ModelCache(object):
|
||||||
"Forward declaration"
|
"Forward declaration"
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -193,6 +193,7 @@ class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput):
|
|||||||
attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user
|
attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user
|
||||||
after generation completes. Optional.
|
after generation completes. Optional.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
attention_map_saver: Optional[AttentionMapSaver]
|
attention_map_saver: Optional[AttentionMapSaver]
|
||||||
|
|
||||||
|
|
||||||
|
@ -642,7 +642,9 @@ class InvokeAIDiffuserComponent:
|
|||||||
|
|
||||||
deltas = None
|
deltas = None
|
||||||
uncond_latents = None
|
uncond_latents = None
|
||||||
weighted_cond_list = c_or_weighted_c_list if isinstance(c_or_weighted_c_list, list) else [(c_or_weighted_c_list, 1)]
|
weighted_cond_list = (
|
||||||
|
c_or_weighted_c_list if isinstance(c_or_weighted_c_list, list) else [(c_or_weighted_c_list, 1)]
|
||||||
|
)
|
||||||
|
|
||||||
# below is fugly omg
|
# below is fugly omg
|
||||||
conditionings = [uc] + [c for c, weight in weighted_cond_list]
|
conditionings = [uc] + [c for c, weight in weighted_cond_list]
|
||||||
|
@ -732,7 +732,9 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
|||||||
|
|
||||||
controlnet_down_block_res_samples = ()
|
controlnet_down_block_res_samples = ()
|
||||||
|
|
||||||
for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks, strict=True):
|
for down_block_res_sample, controlnet_block in zip(
|
||||||
|
down_block_res_samples, self.controlnet_down_blocks, strict=True
|
||||||
|
):
|
||||||
down_block_res_sample = controlnet_block(down_block_res_sample)
|
down_block_res_sample = controlnet_block(down_block_res_sample)
|
||||||
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
|
||||||
|
|
||||||
@ -745,7 +747,9 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
|
|||||||
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
|
scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
|
||||||
|
|
||||||
scales = scales * conditioning_scale
|
scales = scales * conditioning_scale
|
||||||
down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales, strict=True)]
|
down_block_res_samples = [
|
||||||
|
sample * scale for sample, scale in zip(down_block_res_samples, scales, strict=True)
|
||||||
|
]
|
||||||
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
|
mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
|
||||||
else:
|
else:
|
||||||
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
|
||||||
|
@ -229,7 +229,11 @@ def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10
|
|||||||
gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1).to(device)
|
gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1).to(device)
|
||||||
|
|
||||||
def tile_grads(slice1, slice2):
|
def tile_grads(slice1, slice2):
|
||||||
return gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1)
|
return (
|
||||||
|
gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]]
|
||||||
|
.repeat_interleave(d[0], 0)
|
||||||
|
.repeat_interleave(d[1], 1)
|
||||||
|
)
|
||||||
|
|
||||||
def dot(grad, shift):
|
def dot(grad, shift):
|
||||||
return (
|
return (
|
||||||
|
@ -6,5 +6,7 @@ import warnings
|
|||||||
from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure
|
from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
warnings.warn("configure_invokeai.py is deprecated, running 'invokeai-configure'...", DeprecationWarning, stacklevel=2)
|
warnings.warn(
|
||||||
|
"configure_invokeai.py is deprecated, running 'invokeai-configure'...", DeprecationWarning, stacklevel=2
|
||||||
|
)
|
||||||
configure()
|
configure()
|
||||||
|
Reference in New Issue
Block a user