cleanup: Remove manual offload from Depth Anything Processor (#5812)

## What type of PR is this? (check all applicable)

- [ ] Refactor
- [ ] Feature
- [ ] Bug Fix
- [ ] Optimization
- [ ] Documentation Update
- [ ] Community Node Submission


## Have you discussed this change with the InvokeAI team?
- [ ] Yes
- [ ] No, because:

      
## Have you updated all relevant documentation?
- [ ] Yes
- [ ] No


## Description


## Related Tickets & Documents

<!--
For pull requests that relate or close an issue, please include them
below. 

For example having the text: "closes #1234" would connect the current
pull
request to issue 1234.  And when we merge the pull request, Github will
automatically close the issue.
-->

- Related Issue #
- Closes #

## QA Instructions, Screenshots, Recordings

<!-- 
Please provide steps on how to test changes, any hardware or 
software specifications as well as any other pertinent information. 
-->

## Merge Plan

<!--
A merge plan describes how this PR should be handled after it is
approved.

Example merge plans:
- "This PR can be merged when approved"
- "This must be squash-merged when approved"
- "DO NOT MERGE - I will rebase and tidy commits before merging"
- "#dev-chat on discord needs to be advised of this change when it is
merged"

A merge plan is particularly important for large PRs or PRs that touch
the
database in any way.
-->

## Added/updated tests?

- [ ] Yes
- [ ] No : _please replace this line with details on why tests
      have not been included_

## [optional] Are there any post deployment tasks we need to perform?
This commit is contained in:
blessedcoolant 2024-03-01 23:13:06 +05:30 committed by GitHub
commit 80fd3d3f3c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 16 additions and 15 deletions

View File

@ -576,7 +576,7 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
title="Depth Anything Processor", title="Depth Anything Processor",
tags=["controlnet", "depth", "depth anything"], tags=["controlnet", "depth", "depth anything"],
category="controlnet", category="controlnet",
version="1.0.0", version="1.0.1",
) )
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
"""Generates a depth map based on the Depth Anything algorithm""" """Generates a depth map based on the Depth Anything algorithm"""
@ -585,13 +585,12 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
default="small", description="The size of the depth model to use" default="small", description="The size of the depth model to use"
) )
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res) resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
offload: bool = InputField(default=False)
def run_processor(self, image: Image.Image): def run_processor(self, image: Image.Image):
depth_anything_detector = DepthAnythingDetector() depth_anything_detector = DepthAnythingDetector()
depth_anything_detector.load_model(model_size=self.model_size) depth_anything_detector.load_model(model_size=self.model_size)
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload) processed_image = depth_anything_detector(image=image, resolution=self.resolution)
return processed_image return processed_image

View File

@ -17,6 +17,8 @@ from invokeai.backend.util.util import download_with_progress_bar
config = InvokeAIAppConfig.get_config() config = InvokeAIAppConfig.get_config()
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
DEPTH_ANYTHING_MODELS = { DEPTH_ANYTHING_MODELS = {
"large": { "large": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true", "url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
@ -53,9 +55,9 @@ transform = Compose(
class DepthAnythingDetector: class DepthAnythingDetector:
def __init__(self) -> None: def __init__(self) -> None:
self.model = None self.model = None
self.model_size: Union[Literal["large", "base", "small"], None] = None self.model_size: Union[DEPTH_ANYTHING_MODEL_SIZES, None] = None
def load_model(self, model_size=Literal["large", "base", "small"]): def load_model(self, model_size: DEPTH_ANYTHING_MODEL_SIZES = "small"):
DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"]) DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"])
if not DEPTH_ANYTHING_MODEL_PATH.exists(): if not DEPTH_ANYTHING_MODEL_PATH.exists():
download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH) download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH)
@ -84,16 +86,19 @@ class DepthAnythingDetector:
self.model.to(device) self.model.to(device)
return self return self
def __call__(self, image, resolution=512, offload=False): def __call__(self, image: Image.Image, resolution: int = 512):
image = np.array(image, dtype=np.uint8) if self.model is None:
image = image[:, :, ::-1] / 255.0 raise Exception("Depth Anything Model not loaded")
image_height, image_width = image.shape[:2] np_image = np.array(image, dtype=np.uint8)
image = transform({"image": image})["image"] np_image = np_image[:, :, ::-1] / 255.0
image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device())
image_height, image_width = np_image.shape[:2]
np_image = transform({"image": image})["image"]
tensor_image = torch.from_numpy(np_image).unsqueeze(0).to(choose_torch_device())
with torch.no_grad(): with torch.no_grad():
depth = self.model(image) depth = self.model(tensor_image)
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0] depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0 depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
@ -103,7 +108,4 @@ class DepthAnythingDetector:
new_height = int(image_height * (resolution / image_width)) new_height = int(image_height * (resolution / image_width))
depth_map = depth_map.resize((resolution, new_height)) depth_map = depth_map.resize((resolution, new_height))
if offload:
del self.model
return depth_map return depth_map