mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Allow civit ai API Key on Imports (#5608)
## What type of PR is this? (check all applicable) - [ ] Refactor - [ ] Feature - [x] Bug Fix - [ ] Optimization - [ ] Documentation Update - [ ] Community Node Submission ## Have you discussed this change with the InvokeAI team? - [x] Yes - [ ] No, because: ## Have you updated all relevant documentation? - [ ] Yes - [ ] No ## Description Small PR to allow users to pass in a civit api key via config options ## Related Tickets & Documents <!-- For pull requests that relate or close an issue, please include them below. For example having the text: "closes #1234" would connect the current pull request to issue 1234. And when we merge the pull request, Github will automatically close the issue. --> - Related Issue # - Closes # ## QA Instructions, Screenshots, Recordings <!-- Please provide steps on how to test changes, any hardware or software specifications as well as any other pertinent information. --> ## Merge Plan <!-- A merge plan describes how this PR should be handled after it is approved. Example merge plans: - "This PR can be merged when approved" - "This must be squash-merged when approved" - "DO NOT MERGE - I will rebase and tidy commits before merging" - "#dev-chat on discord needs to be advised of this change when it is merged" A merge plan is particularly important for large PRs or PRs that touch the database in any way. --> ## Added/updated tests? - [ ] Yes - [ ] No : _please replace this line with details on why tests have not been included_ ## [optional] Are there any post deployment tasks we need to perform?
This commit is contained in:
commit
332f3930a5
@ -284,6 +284,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
||||||
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
||||||
|
|
||||||
|
# MODEL IMPORT
|
||||||
|
civitai_api_key : Optional[str] = Field(default=os.environ.get("CIVITAI_API_KEY"), description="API key for CivitAI", json_schema_extra=Categories.Other)
|
||||||
|
|
||||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
||||||
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
||||||
@ -293,6 +296,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||||
|
|
||||||
# this is not referred to in the source code and can be removed entirely
|
# this is not referred to in the source code and can be removed entirely
|
||||||
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||||
|
|
||||||
|
@ -104,12 +104,14 @@ class ModelInstall(object):
|
|||||||
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
|
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
|
||||||
model_manager: Optional[ModelManager] = None,
|
model_manager: Optional[ModelManager] = None,
|
||||||
access_token: Optional[str] = None,
|
access_token: Optional[str] = None,
|
||||||
|
civitai_api_key: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.mgr = model_manager or ModelManager(config.model_conf_path)
|
self.mgr = model_manager or ModelManager(config.model_conf_path)
|
||||||
self.datasets = OmegaConf.load(Dataset_path)
|
self.datasets = OmegaConf.load(Dataset_path)
|
||||||
self.prediction_helper = prediction_type_helper
|
self.prediction_helper = prediction_type_helper
|
||||||
self.access_token = access_token or HfFolder.get_token()
|
self.access_token = access_token or HfFolder.get_token()
|
||||||
|
self.civitai_api_key = civitai_api_key or config.civitai_api_key
|
||||||
self.reverse_paths = self._reverse_paths(self.datasets)
|
self.reverse_paths = self._reverse_paths(self.datasets)
|
||||||
|
|
||||||
def all_models(self) -> Dict[str, ModelLoadInfo]:
|
def all_models(self) -> Dict[str, ModelLoadInfo]:
|
||||||
@ -326,7 +328,11 @@ class ModelInstall(object):
|
|||||||
|
|
||||||
def _install_url(self, url: str) -> AddModelResult:
|
def _install_url(self, url: str) -> AddModelResult:
|
||||||
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
with TemporaryDirectory(dir=self.config.models_path) as staging:
|
||||||
location = download_with_resume(url, Path(staging))
|
CIVITAI_RE = r".*civitai.com.*"
|
||||||
|
civit_url = re.match(CIVITAI_RE, url, re.IGNORECASE)
|
||||||
|
location = download_with_resume(
|
||||||
|
url, Path(staging), access_token=self.civitai_api_key if civit_url else None
|
||||||
|
)
|
||||||
if not location:
|
if not location:
|
||||||
logger.error(f"Unable to download {url}. Skipping.")
|
logger.error(f"Unable to download {url}. Skipping.")
|
||||||
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
|
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
|
||||||
|
@ -286,7 +286,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
|
|||||||
open_mode = "wb"
|
open_mode = "wb"
|
||||||
exist_size = 0
|
exist_size = 0
|
||||||
|
|
||||||
resp = requests.get(url, header, stream=True)
|
resp = requests.get(url, headers=header, stream=True, allow_redirects=True)
|
||||||
content_length = int(resp.headers.get("content-length", 0))
|
content_length = int(resp.headers.get("content-length", 0))
|
||||||
|
|
||||||
if dest.is_dir():
|
if dest.is_dir():
|
||||||
|
Loading…
Reference in New Issue
Block a user