fix ruff and remove unused API route

This commit is contained in:
Kent Keirsey
2025-07-21 14:34:07 -04:00
committed by psychedelicious
parent 070eef3eff
commit c3f6389291
3 changed files with 40 additions and 100 deletions

View File

@ -68,14 +68,8 @@ class CacheType(str, Enum):
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
"""Add a cover image URL to a model configuration."""
# If cover_image is already set and looks like a file path, serve it directly
if config.cover_image and not config.cover_image.startswith('http'):
# This is a file path, so we'll serve it via a special endpoint
config.cover_image = f"/api/v2/models/i/{config.key}/cover_image"
else:
# Try to get from model images service
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
config.cover_image = cover_image
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
config.cover_image = cover_image
return config
@ -349,58 +343,6 @@ async def get_model_image(
raise HTTPException(status_code=404)
@model_manager_router.get(
"/i/{key}/cover_image",
operation_id="get_model_cover_image",
responses={
200: {
"description": "The model cover image was fetched successfully",
},
400: {"description": "Bad request"},
404: {"description": "The model cover image could not be found"},
},
status_code=200,
)
async def get_model_cover_image(
key: str = Path(description="The key of the model whose cover image to get"),
) -> FileResponse:
"""Gets a cover image file for a model from its stored path"""
try:
# Get the model config to find the cover image path
config = ApiDependencies.invoker.services.model_manager.store.get_model(key)
if not config.cover_image:
raise HTTPException(status_code=404, detail="No cover image found for this model")
# Construct the full path to the image file
models_path = ApiDependencies.invoker.services.configuration.models_path
image_path = models_path / config.cover_image
if not image_path.exists():
raise HTTPException(status_code=404, detail="Cover image file not found")
# Determine the media type based on file extension
media_type = "image/png" # default
if image_path.suffix.lower() in ['.jpg', '.jpeg']:
media_type = "image/jpeg"
elif image_path.suffix.lower() == '.webp':
media_type = "image/webp"
response = FileResponse(
image_path,
media_type=media_type,
filename=image_path.name,
content_disposition_type="inline",
)
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
return response
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=404, detail=f"Error serving cover image: {str(e)}")
@model_manager_router.patch(
"/i/{key}/image",
operation_id="update_model_image",

View File

@ -1,6 +1,5 @@
"""Model installation class."""
import json
import locale
import os
import re
@ -662,7 +661,6 @@ class ModelInstallService(ModelInstallServiceBase):
except InvalidModelConfigException:
return ModelConfigBase.classify(model_path, hash_algo, **fields)
def _register(
self, model_path: Path, config: Optional[ModelRecordChanges] = None, info: Optional[AnyModelConfig] = None
) -> str:

View File

@ -1,46 +1,48 @@
"""Utility functions for extracting metadata from LoRA model files."""
import json
from pathlib import Path
from typing import Dict, Any, Optional, Set, Tuple
import logging
from pathlib import Path
from typing import Any, Dict, Optional, Set, Tuple
from PIL import Image
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
from invokeai.app.util.thumbnails import make_thumbnail
from invokeai.backend.model_manager.config import AnyModelConfig, ModelType
logger = logging.getLogger(__name__)
def extract_lora_metadata(model_path: Path, model_key: str, model_images_path: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
def extract_lora_metadata(
model_path: Path, model_key: str, model_images_path: Path
) -> Tuple[Optional[str], Optional[Set[str]]]:
"""
Extract metadata for a LoRA model from associated JSON and image files.
Args:
model_path: Path to the LoRA model file
model_key: Unique key for the model
model_images_path: Path to the model images directory
Returns:
Tuple of (description, trigger_phrases)
"""
model_stem = model_path.stem
model_dir = model_path.parent
# Find and process preview image
_process_preview_image(model_stem, model_dir, model_key, model_images_path)
# Extract metadata from JSON
description, trigger_phrases = _extract_json_metadata(model_stem, model_dir)
return description, trigger_phrases
def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, model_images_path: Path) -> bool:
"""Find and process a preview image for the model, saving it to the model images store."""
image_extensions = ['.png', '.jpg', '.jpeg', '.webp']
image_extensions = [".png", ".jpg", ".jpeg", ".webp"]
for ext in image_extensions:
image_path = model_dir / f"{model_stem}{ext}"
if image_path.exists():
@ -51,39 +53,39 @@ def _process_preview_image(model_stem: str, model_dir: Path, model_key: str, mod
thumbnail = make_thumbnail(img, 256)
thumbnail_path = model_images_path / f"{model_key}.webp"
thumbnail.save(thumbnail_path, format="webp")
logger.info(f"Processed preview image {image_path.name} for model {model_key}")
return True
except Exception as e:
logger.warning(f"Failed to process preview image {image_path.name}: {e}")
return False
return False
def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[str], Optional[Set[str]]]:
"""Extract metadata from a JSON file with the same name as the model."""
json_path = model_dir / f"{model_stem}.json"
if not json_path.exists():
return None, None
try:
with open(json_path, 'r', encoding='utf-8') as f:
with open(json_path, "r", encoding="utf-8") as f:
metadata = json.load(f)
# Extract description
description = _build_description(metadata)
# Extract trigger phrases
trigger_phrases = _extract_trigger_phrases(metadata)
if description or trigger_phrases:
logger.info(f"Applied metadata from {json_path.name}")
return description, trigger_phrases
except (json.JSONDecodeError, IOError, Exception) as e:
logger.warning(f"Failed to read metadata from {json_path}: {e}")
return None, None
@ -92,13 +94,13 @@ def _extract_json_metadata(model_stem: str, model_dir: Path) -> Tuple[Optional[s
def _build_description(metadata: Dict[str, Any]) -> Optional[str]:
"""Build a description from metadata fields."""
description_parts = []
if description := metadata.get("description"):
description_parts.append(str(description).strip())
if notes := metadata.get("notes"):
description_parts.append(str(notes).strip())
return " | ".join(description_parts) if description_parts else None
@ -106,21 +108,21 @@ def _extract_trigger_phrases(metadata: Dict[str, Any]) -> Optional[Set[str]]:
"""Extract trigger phrases from metadata."""
if not (activation_text := metadata.get("activation text")):
return None
activation_text = str(activation_text).strip()
if not activation_text:
return None
# Split on commas and clean up each phrase
phrases = [phrase.strip() for phrase in activation_text.split(',') if phrase.strip()]
phrases = [phrase.strip() for phrase in activation_text.split(",") if phrase.strip()]
return set(phrases) if phrases else None
def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_path: Path) -> None:
"""
Apply extracted metadata to a LoRA model configuration.
Args:
info: The model configuration to update
model_path: Path to the LoRA model file
@ -129,17 +131,15 @@ def apply_lora_metadata(info: AnyModelConfig, model_path: Path, model_images_pat
# Only process LoRA models
if info.type != ModelType.LoRA:
return
# Extract and apply metadata
description, trigger_phrases = extract_lora_metadata(
model_path, info.key, model_images_path
)
description, trigger_phrases = extract_lora_metadata(model_path, info.key, model_images_path)
# We don't set cover_image path in the config anymore since images are stored
# separately in the model images store by model key
if description:
info.description = description
if trigger_phrases:
info.trigger_phrases = trigger_phrases
info.trigger_phrases = trigger_phrases