diff --git a/installer/lib/messages.py b/installer/lib/messages.py index b1ffc256c1..6d95eaff59 100644 --- a/installer/lib/messages.py +++ b/installer/lib/messages.py @@ -137,7 +137,7 @@ def dest_path(dest=None) -> Path: path_completer = PathCompleter( only_directories=True, expanduser=True, - get_paths=lambda: [browse_start], # noqa: B023 + get_paths=lambda: [browse_start], # noqa: B023 # get_paths=lambda: [".."].extend(list(browse_start.iterdir())) ) @@ -149,7 +149,7 @@ def dest_path(dest=None) -> Path: completer=path_completer, default=str(browse_start) + os.sep, vi_mode=True, - complete_while_typing=True + complete_while_typing=True, # Test that this is not needed on Windows # complete_style=CompleteStyle.READLINE_LIKE, ) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index a984d67dfa..1b3e535d34 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -661,9 +661,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None field_kind = ( # _field_kind is defined via InputField(), OutputField() or by one of the internal fields defined in this file - field.json_schema_extra.get("_field_kind", None) - if field.json_schema_extra - else None + field.json_schema_extra.get("_field_kind", None) if field.json_schema_extra else None ) # must have a field_kind diff --git a/invokeai/app/services/image_records/image_records_common.py b/invokeai/app/services/image_records/image_records_common.py index a2738fbeb9..61b97c6032 100644 --- a/invokeai/app/services/image_records/image_records_common.py +++ b/invokeai/app/services/image_records/image_records_common.py @@ -90,20 +90,23 @@ class ImageRecordDeleteException(Exception): IMAGE_DTO_COLS = ", ".join( - ["images." + c for c in [ - "image_name", - "image_origin", - "image_category", - "width", - "height", - "session_id", - "node_id", - "is_intermediate", - "created_at", - "updated_at", - "deleted_at", - "starred", - ]] + [ + "images." + c + for c in [ + "image_name", + "image_origin", + "image_category", + "width", + "height", + "session_id", + "node_id", + "is_intermediate", + "created_at", + "updated_at", + "deleted_at", + "starred", + ] + ] ) diff --git a/invokeai/app/services/images/images_default.py b/invokeai/app/services/images/images_default.py index 2cb157f852..63fa78d6c8 100644 --- a/invokeai/app/services/images/images_default.py +++ b/invokeai/app/services/images/images_default.py @@ -217,13 +217,16 @@ class ImageService(ImageServiceABC): board_id, ) - image_dtos = [image_record_to_dto( - image_record=r, - image_url=self.__invoker.services.urls.get_image_url(r.image_name), - thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True), - board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name), - workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name), - ) for r in results.items] + image_dtos = [ + image_record_to_dto( + image_record=r, + image_url=self.__invoker.services.urls.get_image_url(r.image_name), + thumbnail_url=self.__invoker.services.urls.get_image_url(r.image_name, True), + board_id=self.__invoker.services.board_image_records.get_board_for_image(r.image_name), + workflow_id=self.__invoker.services.workflow_image_records.get_workflow_for_image(r.image_name), + ) + for r in results.items + ] return OffsetPaginatedResults[ImageDTO]( items=image_dtos, diff --git a/invokeai/app/services/invocation_processor/invocation_processor_base.py b/invokeai/app/services/invocation_processor/invocation_processor_base.py index 700a15e643..7947a201dd 100644 --- a/invokeai/app/services/invocation_processor/invocation_processor_base.py +++ b/invokeai/app/services/invocation_processor/invocation_processor_base.py @@ -1,5 +1,5 @@ from abc import ABC -class InvocationProcessorABC(ABC): # noqa: B024 +class InvocationProcessorABC(ABC): # noqa: B024 pass diff --git a/invokeai/app/services/session_processor/session_processor_default.py b/invokeai/app/services/session_processor/session_processor_default.py index a8a6b7bf3c..28591fd7df 100644 --- a/invokeai/app/services/session_processor/session_processor_default.py +++ b/invokeai/app/services/session_processor/session_processor_default.py @@ -34,7 +34,9 @@ class DefaultSessionProcessor(SessionProcessorBase): name="session_processor", target=self.__process, kwargs={ - "stop_event": self.__stop_event, "poll_now_event": self.__poll_now_event, "resume_event": self.__resume_event + "stop_event": self.__stop_event, + "poll_now_event": self.__poll_now_event, + "resume_event": self.__resume_event, }, ) self.__thread.start() diff --git a/invokeai/app/services/shared/graph.py b/invokeai/app/services/shared/graph.py index 63fca9d89e..29af1e2333 100644 --- a/invokeai/app/services/shared/graph.py +++ b/invokeai/app/services/shared/graph.py @@ -728,9 +728,9 @@ class Graph(BaseModel): # Validate that all inputs are derived from or match a single type input_field_types = { t - for input_field in input_fields - for t in ([input_field] if get_origin(input_field) is None else get_args(input_field)) - if t != NoneType + for input_field in input_fields + for t in ([input_field] if get_origin(input_field) is None else get_args(input_field)) + if t != NoneType } # Get unique types type_tree = nx.DiGraph() type_tree.add_nodes_from(input_field_types) @@ -1053,7 +1053,10 @@ class GraphExecutionState(BaseModel): # For every iterator, the parent must either not be a child of that iterator, or must match the prepared iteration for that iterator # TODO: Handle a node mapping to none eg = self.execution_graph.nx_graph_flat() - prepared_parent_mappings = [[(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] for it in iterator_node_prepared_combinations] # type: ignore + prepared_parent_mappings = [ + [(n, self._get_iteration_node(n, g, eg, it)) for n in next_node_parents] + for it in iterator_node_prepared_combinations + ] # type: ignore # Create execution node for each iteration for iteration_mappings in prepared_parent_mappings: diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 0962fe9740..afbcc848d8 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -253,13 +253,13 @@ class ModelInstall(object): # folders style or similar elif path.is_dir() and any( (path / x).exists() - for x in { - "config.json", - "model_index.json", - "learned_embeds.bin", - "pytorch_lora_weights.bin", - "pytorch_lora_weights.safetensors", - } + for x in { + "config.json", + "model_index.json", + "learned_embeds.bin", + "pytorch_lora_weights.bin", + "pytorch_lora_weights.safetensors", + } ): models_installed.update({str(model_path_id_or_url): self._install_path(path)}) diff --git a/invokeai/backend/ip_adapter/attention_processor.py b/invokeai/backend/ip_adapter/attention_processor.py index 1ae731290e..195cb12d1b 100644 --- a/invokeai/backend/ip_adapter/attention_processor.py +++ b/invokeai/backend/ip_adapter/attention_processor.py @@ -130,7 +130,9 @@ class IPAttnProcessor2_0(torch.nn.Module): assert ip_adapter_image_prompt_embeds is not None assert len(ip_adapter_image_prompt_embeds) == len(self._weights) - for ipa_embed, ipa_weights, scale in zip(ip_adapter_image_prompt_embeds, self._weights, self._scales, strict=True): + for ipa_embed, ipa_weights, scale in zip( + ip_adapter_image_prompt_embeds, self._weights, self._scales, strict=True + ): # The batch dimensions should match. assert ipa_embed.shape[0] == encoder_hidden_states.shape[0] # The token_len dimensions should match. diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 2055691a06..2a7f4b5a95 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -66,11 +66,13 @@ class CacheStats(object): class ModelLocker(object): "Forward declaration" + pass class ModelCache(object): "Forward declaration" + pass diff --git a/invokeai/backend/model_management/model_search.py b/invokeai/backend/model_management/model_search.py index 3d56d9d8b9..e125c3ced7 100644 --- a/invokeai/backend/model_management/model_search.py +++ b/invokeai/backend/model_management/model_search.py @@ -70,13 +70,13 @@ class ModelSearch(ABC): continue if any( (path / x).exists() - for x in { - "config.json", - "model_index.json", - "learned_embeds.bin", - "pytorch_lora_weights.bin", - "image_encoder.txt", - } + for x in { + "config.json", + "model_index.json", + "learned_embeds.bin", + "pytorch_lora_weights.bin", + "image_encoder.txt", + } ): try: self.on_model_found(path) diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 1b65326f6e..1353e804a7 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -193,6 +193,7 @@ class InvokeAIStableDiffusionPipelineOutput(StableDiffusionPipelineOutput): attention_map_saver (`AttentionMapSaver`): Object containing attention maps that can be displayed to the user after generation completes. Optional. """ + attention_map_saver: Optional[AttentionMapSaver] diff --git a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py index b45bb8f338..92a538ff70 100644 --- a/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py +++ b/invokeai/backend/stable_diffusion/diffusion/cross_attention_control.py @@ -433,7 +433,7 @@ def inject_attention_function(unet, context: Context): module.identifier = identifier try: module.set_attention_slice_wrangler(attention_slice_wrangler) - module.set_slicing_strategy_getter(lambda module: context.get_slicing_strategy(identifier)) # noqa: B023 + module.set_slicing_strategy_getter(lambda module: context.get_slicing_strategy(identifier)) # noqa: B023 except AttributeError as e: if is_attribute_error_about(e, "set_attention_slice_wrangler"): print(f"TODO: implement set_attention_slice_wrangler for {type(module)}") # TODO diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 4123399cf4..455e5e1096 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -642,7 +642,9 @@ class InvokeAIDiffuserComponent: deltas = None uncond_latents = None - weighted_cond_list = c_or_weighted_c_list if isinstance(c_or_weighted_c_list, list) else [(c_or_weighted_c_list, 1)] + weighted_cond_list = ( + c_or_weighted_c_list if isinstance(c_or_weighted_c_list, list) else [(c_or_weighted_c_list, 1)] + ) # below is fugly omg conditionings = [uc] + [c for c, weight in weighted_cond_list] diff --git a/invokeai/backend/util/hotfixes.py b/invokeai/backend/util/hotfixes.py index 32bc52c738..835575c7a1 100644 --- a/invokeai/backend/util/hotfixes.py +++ b/invokeai/backend/util/hotfixes.py @@ -732,7 +732,9 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin): controlnet_down_block_res_samples = () - for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks, strict=True): + for down_block_res_sample, controlnet_block in zip( + down_block_res_samples, self.controlnet_down_blocks, strict=True + ): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) @@ -745,7 +747,9 @@ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin): scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale - down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales, strict=True)] + down_block_res_samples = [ + sample * scale for sample, scale in zip(down_block_res_samples, scales, strict=True) + ] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 7043355219..12ba3701cf 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -229,7 +229,11 @@ def rand_perlin_2d(shape, res, device, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1).to(device) def tile_grads(slice1, slice2): - return gradients[slice1[0]:slice1[1], slice2[0]:slice2[1]].repeat_interleave(d[0], 0).repeat_interleave(d[1], 1) + return ( + gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]] + .repeat_interleave(d[0], 0) + .repeat_interleave(d[1], 1) + ) def dot(grad, shift): return ( diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index a47bd4c35c..e23538ffd6 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -72,7 +72,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): def __init__(self, parentApp, name, multipage=False, *args, **keywords): self.multipage = multipage self.subprocess = None - super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad? + super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad? def create(self): self.keypress_timeout = 10 diff --git a/scripts/configure_invokeai.py b/scripts/configure_invokeai.py index dff658c2b0..c994668ea6 100755 --- a/scripts/configure_invokeai.py +++ b/scripts/configure_invokeai.py @@ -6,5 +6,7 @@ import warnings from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure if __name__ == "__main__": - warnings.warn("configure_invokeai.py is deprecated, running 'invokeai-configure'...", DeprecationWarning, stacklevel=2) + warnings.warn( + "configure_invokeai.py is deprecated, running 'invokeai-configure'...", DeprecationWarning, stacklevel=2 + ) configure()