mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into install/force-torch-reinstall
This commit is contained in:
commit
ce3da40434
41
.github/workflows/pypi-release.yml
vendored
Normal file
41
.github/workflows/pypi-release.yml
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
name: PyPI Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'ldm/invoke/_version.py'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
if: github.repository == 'invoke-ai/InvokeAI'
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
env:
|
||||||
|
TWINE_USERNAME: __token__
|
||||||
|
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
TWINE_NON_INTERACTIVE: 1
|
||||||
|
steps:
|
||||||
|
- name: checkout sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: install deps
|
||||||
|
run: pip install --upgrade build twine
|
||||||
|
|
||||||
|
- name: build package
|
||||||
|
run: python3 -m build
|
||||||
|
|
||||||
|
- name: check distribution
|
||||||
|
run: twine check dist/*
|
||||||
|
|
||||||
|
- name: check PyPI versions
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
run: |
|
||||||
|
pip install --upgrade requests
|
||||||
|
python -c "\
|
||||||
|
import scripts.pypi_helper; \
|
||||||
|
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
||||||
|
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: upload package
|
||||||
|
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
|
||||||
|
run: twine upload dist/*
|
8
installer/create_installer.sh
Executable file → Normal file
8
installer/create_installer.sh
Executable file → Normal file
@ -14,12 +14,13 @@ fi
|
|||||||
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
||||||
PATCH=""
|
PATCH=""
|
||||||
VERSION="v${VERSION}${PATCH}"
|
VERSION="v${VERSION}${PATCH}"
|
||||||
|
LATEST_TAG="v2.3-latest"
|
||||||
|
|
||||||
echo Building installer for version $VERSION
|
echo Building installer for version $VERSION
|
||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||||
|
|
||||||
read -e -p "Commit and tag this repo with ${VERSION} and 'v2.3-latest'? [n]: " input
|
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||||
RESPONSE=${input:='n'}
|
RESPONSE=${input:='n'}
|
||||||
if [ "$RESPONSE" == 'y' ]; then
|
if [ "$RESPONSE" == 'y' ]; then
|
||||||
git commit -a
|
git commit -a
|
||||||
@ -28,8 +29,9 @@ if [ "$RESPONSE" == 'y' ]; then
|
|||||||
echo "Existing/invalid tag"
|
echo "Existing/invalid tag"
|
||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
git push origin :refs/tags/v2.3-latest
|
|
||||||
git tag -fa latest
|
git push origin :refs/tags/$LATEST_TAG
|
||||||
|
git tag -fa $LATEST_TAG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
|
@ -346,7 +346,6 @@ class InvokeAiInstance:
|
|||||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||||
# from the installer will also automatically propagate down to the config script.
|
# from the installer will also automatically propagate down to the config script.
|
||||||
# this may change in the future with config refactoring!
|
# this may change in the future with config refactoring!
|
||||||
|
|
||||||
invokeai_configure.main()
|
invokeai_configure.main()
|
||||||
|
|
||||||
def install_user_scripts(self):
|
def install_user_scripts(self):
|
||||||
|
@ -626,9 +626,10 @@ class InvokeAIWebServer:
|
|||||||
printable_parameters["init_mask"][:64] + "..."
|
printable_parameters["init_mask"][:64] + "..."
|
||||||
)
|
)
|
||||||
|
|
||||||
print(
|
print(f'\n>> Image Generation Parameters:\n\n{printable_parameters}\n')
|
||||||
f">> Image generation requested: {printable_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}"
|
print(f'>> ESRGAN Parameters: {esrgan_parameters}')
|
||||||
)
|
print(f'>> Facetool Parameters: {facetool_parameters}')
|
||||||
|
|
||||||
self.generate_images(
|
self.generate_images(
|
||||||
generation_parameters,
|
generation_parameters,
|
||||||
esrgan_parameters,
|
esrgan_parameters,
|
||||||
@ -1154,7 +1155,7 @@ class InvokeAIWebServer:
|
|||||||
image, os.path.basename(path), self.thumbnail_image_path
|
image, os.path.basename(path), self.thumbnail_image_path
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f'>> Image generated: "{path}"')
|
print(f'\n\n>> Image generated: "{path}"\n')
|
||||||
self.write_log_message(f'[Generated] "{path}": {command}')
|
self.write_log_message(f'[Generated] "{path}": {command}')
|
||||||
|
|
||||||
if progress.total_iterations > progress.current_iteration:
|
if progress.total_iterations > progress.current_iteration:
|
||||||
@ -1193,8 +1194,6 @@ class InvokeAIWebServer:
|
|||||||
|
|
||||||
progress.set_current_iteration(progress.current_iteration + 1)
|
progress.set_current_iteration(progress.current_iteration + 1)
|
||||||
|
|
||||||
print(generation_parameters)
|
|
||||||
|
|
||||||
def diffusers_step_callback_adapter(*cb_args, **kwargs):
|
def diffusers_step_callback_adapter(*cb_args, **kwargs):
|
||||||
if isinstance(cb_args[0], PipelineIntermediateState):
|
if isinstance(cb_args[0], PipelineIntermediateState):
|
||||||
progress_state: PipelineIntermediateState = cb_args[0]
|
progress_state: PipelineIntermediateState = cb_args[0]
|
||||||
@ -1305,8 +1304,6 @@ class InvokeAIWebServer:
|
|||||||
|
|
||||||
rfc_dict["variations"] = variations
|
rfc_dict["variations"] = variations
|
||||||
|
|
||||||
print(parameters)
|
|
||||||
|
|
||||||
if rfc_dict["type"] == "img2img":
|
if rfc_dict["type"] == "img2img":
|
||||||
rfc_dict["strength"] = parameters["strength"]
|
rfc_dict["strength"] = parameters["strength"]
|
||||||
rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant
|
rfc_dict["fit"] = parameters["fit"] # TODO: Noncompliant
|
||||||
|
@ -574,7 +574,7 @@ class Generate:
|
|||||||
print('>> Could not generate image.')
|
print('>> Could not generate image.')
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
print('>> Usage stats:')
|
print('\n>> Usage stats:')
|
||||||
print(
|
print(
|
||||||
f'>> {len(results)} image(s) generated in', '%4.2fs' % (
|
f'>> {len(results)} image(s) generated in', '%4.2fs' % (
|
||||||
toc - tic)
|
toc - tic)
|
||||||
|
@ -4,6 +4,10 @@ import sys
|
|||||||
import shlex
|
import shlex
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
from argparse import Namespace
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
if sys.platform == "darwin":
|
if sys.platform == "darwin":
|
||||||
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
||||||
|
|
||||||
@ -16,10 +20,10 @@ from ldm.invoke.pngwriter import PngWriter, retrieve_metadata, write_metadata
|
|||||||
from ldm.invoke.image_util import make_grid
|
from ldm.invoke.image_util import make_grid
|
||||||
from ldm.invoke.log import write_log
|
from ldm.invoke.log import write_log
|
||||||
from ldm.invoke.model_manager import ModelManager
|
from ldm.invoke.model_manager import ModelManager
|
||||||
from pathlib import Path
|
|
||||||
from argparse import Namespace
|
import click # type: ignore
|
||||||
import pyparsing
|
|
||||||
import ldm.invoke
|
import ldm.invoke
|
||||||
|
import pyparsing # type: ignore
|
||||||
|
|
||||||
# global used in multiple functions (fix)
|
# global used in multiple functions (fix)
|
||||||
infile = None
|
infile = None
|
||||||
@ -69,8 +73,10 @@ def main():
|
|||||||
|
|
||||||
# these two lines prevent a horrible warning message from appearing
|
# these two lines prevent a horrible warning message from appearing
|
||||||
# when the frozen CLIP tokenizer is imported
|
# when the frozen CLIP tokenizer is imported
|
||||||
import transformers
|
import transformers # type: ignore
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
|
import diffusers
|
||||||
|
diffusers.logging.set_verbosity_error()
|
||||||
|
|
||||||
# Loading Face Restoration and ESRGAN Modules
|
# Loading Face Restoration and ESRGAN Modules
|
||||||
gfpgan,codeformer,esrgan = load_face_restoration(opt)
|
gfpgan,codeformer,esrgan = load_face_restoration(opt)
|
||||||
@ -579,12 +585,28 @@ def import_model(model_path:str, gen, opt, completer):
|
|||||||
|
|
||||||
if model_path.startswith(('http:','https:','ftp:')):
|
if model_path.startswith(('http:','https:','ftp:')):
|
||||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||||
|
|
||||||
elif os.path.exists(model_path) and model_path.endswith(('.ckpt','.safetensors')) and os.path.isfile(model_path):
|
elif os.path.exists(model_path) and model_path.endswith(('.ckpt','.safetensors')) and os.path.isfile(model_path):
|
||||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||||
elif re.match('^[\w.+-]+/[\w.+-]+$',model_path):
|
|
||||||
model_name = import_diffuser_model(model_path, gen, opt, completer)
|
|
||||||
elif os.path.isdir(model_path):
|
elif os.path.isdir(model_path):
|
||||||
|
|
||||||
|
# Allow for a directory containing multiple models.
|
||||||
|
models = list(Path(model_path).rglob('*.ckpt')) + list(Path(model_path).rglob('*.safetensors'))
|
||||||
|
|
||||||
|
if models:
|
||||||
|
# Only the last model name will be used below.
|
||||||
|
for model in sorted(models):
|
||||||
|
|
||||||
|
if click.confirm(f'Import {model.stem} ?', default=True):
|
||||||
|
model_name = import_ckpt_model(model, gen, opt, completer)
|
||||||
|
print()
|
||||||
|
else:
|
||||||
model_name = import_diffuser_model(Path(model_path), gen, opt, completer)
|
model_name = import_diffuser_model(Path(model_path), gen, opt, completer)
|
||||||
|
|
||||||
|
elif re.match(r'^[\w.+-]+/[\w.+-]+$', model_path):
|
||||||
|
model_name = import_diffuser_model(model_path, gen, opt, completer)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(f'** {model_path} is neither the path to a .ckpt file nor a diffusers repository id. Can\'t import.')
|
print(f'** {model_path} is neither the path to a .ckpt file nor a diffusers repository id. Can\'t import.')
|
||||||
|
|
||||||
@ -602,7 +624,7 @@ def import_model(model_path:str, gen, opt, completer):
|
|||||||
completer.update_models(gen.model_manager.list_models())
|
completer.update_models(gen.model_manager.list_models())
|
||||||
print(f'>> {model_name} successfully installed')
|
print(f'>> {model_name} successfully installed')
|
||||||
|
|
||||||
def import_diffuser_model(path_or_repo:str, gen, opt, completer)->str:
|
def import_diffuser_model(path_or_repo: Union[Path, str], gen, _, completer) -> Optional[str]:
|
||||||
manager = gen.model_manager
|
manager = gen.model_manager
|
||||||
default_name = Path(path_or_repo).stem
|
default_name = Path(path_or_repo).stem
|
||||||
default_description = f'Imported model {default_name}'
|
default_description = f'Imported model {default_name}'
|
||||||
@ -625,7 +647,7 @@ def import_diffuser_model(path_or_repo:str, gen, opt, completer)->str:
|
|||||||
return None
|
return None
|
||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
def import_ckpt_model(path_or_url:str, gen, opt, completer)->str:
|
def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Optional[str]:
|
||||||
manager = gen.model_manager
|
manager = gen.model_manager
|
||||||
default_name = Path(path_or_url).stem
|
default_name = Path(path_or_url).stem
|
||||||
default_description = f'Imported model {default_name}'
|
default_description = f'Imported model {default_name}'
|
||||||
@ -1133,8 +1155,8 @@ def report_model_error(opt:Namespace, e:Exception):
|
|||||||
for arg in yes_to_all.split():
|
for arg in yes_to_all.split():
|
||||||
sys.argv.append(arg)
|
sys.argv.append(arg)
|
||||||
|
|
||||||
from ldm.invoke.config import configure_invokeai
|
from ldm.invoke.config import invokeai_configure
|
||||||
configure_invokeai.main()
|
invokeai_configure.main()
|
||||||
print('** InvokeAI will now restart')
|
print('** InvokeAI will now restart')
|
||||||
sys.argv = previous_args
|
sys.argv = previous_args
|
||||||
main() # would rather do a os.exec(), but doesn't exist?
|
main() # would rather do a os.exec(), but doesn't exist?
|
||||||
|
@ -1 +1 @@
|
|||||||
__version__='2.3.0-rc3'
|
__version__='2.3.0-rc4'
|
||||||
|
@ -196,6 +196,7 @@ class Args(object):
|
|||||||
elif os.path.exists(legacyinit):
|
elif os.path.exists(legacyinit):
|
||||||
print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.')
|
print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.')
|
||||||
sysargs.insert(0,f'@{legacyinit}')
|
sysargs.insert(0,f'@{legacyinit}')
|
||||||
|
Globals.log_tokenization = self._arg_parser.parse_args(sysargs).log_tokenization
|
||||||
|
|
||||||
self._arg_switches = self._arg_parser.parse_args(sysargs)
|
self._arg_switches = self._arg_parser.parse_args(sysargs)
|
||||||
return self._arg_switches
|
return self._arg_switches
|
||||||
@ -599,6 +600,12 @@ class Args(object):
|
|||||||
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
|
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
|
||||||
default='k_lms',
|
default='k_lms',
|
||||||
)
|
)
|
||||||
|
render_group.add_argument(
|
||||||
|
'--log_tokenization',
|
||||||
|
'-t',
|
||||||
|
action='store_true',
|
||||||
|
help='shows how the prompt is split into tokens'
|
||||||
|
)
|
||||||
render_group.add_argument(
|
render_group.add_argument(
|
||||||
'-f',
|
'-f',
|
||||||
'--strength',
|
'--strength',
|
||||||
@ -756,6 +763,7 @@ class Args(object):
|
|||||||
!models -- list models in configs/models.yaml
|
!models -- list models in configs/models.yaml
|
||||||
!switch <model_name> -- switch to model named <model_name>
|
!switch <model_name> -- switch to model named <model_name>
|
||||||
!import_model /path/to/weights/file.ckpt -- adds a .ckpt model to your config
|
!import_model /path/to/weights/file.ckpt -- adds a .ckpt model to your config
|
||||||
|
!import_model /path/to/weights/ -- interactively import models from a directory
|
||||||
!import_model http://path_to_model.ckpt -- downloads and adds a .ckpt model to your config
|
!import_model http://path_to_model.ckpt -- downloads and adds a .ckpt model to your config
|
||||||
!import_model hakurei/waifu-diffusion -- downloads and adds a diffusers model to your config
|
!import_model hakurei/waifu-diffusion -- downloads and adds a diffusers model to your config
|
||||||
!optimize_model <model_name> -- converts a .ckpt model to a diffusers model
|
!optimize_model <model_name> -- converts a .ckpt model to a diffusers model
|
||||||
|
@ -17,6 +17,7 @@ from ..models.diffusion import cross_attention_control
|
|||||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||||
from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder
|
from ..modules.encoders.modules import WeightedFrozenCLIPEmbedder
|
||||||
from ..modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
|
from ..modules.prompt_to_embeddings_converter import WeightedPromptFragmentsToEmbeddingsConverter
|
||||||
|
from ldm.invoke.globals import Globals
|
||||||
|
|
||||||
|
|
||||||
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
|
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
|
||||||
@ -92,9 +93,9 @@ def _get_conditioning_for_prompt(parsed_prompt: Union[Blend, FlattenedPrompt], p
|
|||||||
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
|
Process prompt structure and tokens, and return (conditioning, unconditioning, extra_conditioning_info)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if log_tokens:
|
if log_tokens or Globals.log_tokenization:
|
||||||
print(f">> Parsed prompt to {parsed_prompt}")
|
print(f"\n>> [TOKENLOG] Parsed Prompt: {parsed_prompt}")
|
||||||
print(f">> Parsed negative prompt to {parsed_negative_prompt}")
|
print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {parsed_negative_prompt}")
|
||||||
|
|
||||||
conditioning = None
|
conditioning = None
|
||||||
cac_args: cross_attention_control.Arguments = None
|
cac_args: cross_attention_control.Arguments = None
|
||||||
@ -235,7 +236,7 @@ def _get_embeddings_and_tokens_for_prompt(model, flattened_prompt: FlattenedProm
|
|||||||
fragments = [x.text for x in flattened_prompt.children]
|
fragments = [x.text for x in flattened_prompt.children]
|
||||||
weights = [x.weight for x in flattened_prompt.children]
|
weights = [x.weight for x in flattened_prompt.children]
|
||||||
embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights])
|
embeddings, tokens = model.get_learned_conditioning([fragments], return_tokens=True, fragment_weights=[weights])
|
||||||
if log_tokens:
|
if log_tokens or Globals.log_tokenization:
|
||||||
text = " ".join(fragments)
|
text = " ".join(fragments)
|
||||||
log_tokenization(text, model, display_label=log_display_label)
|
log_tokenization(text, model, display_label=log_display_label)
|
||||||
|
|
||||||
@ -273,12 +274,12 @@ def log_tokenization(text, model, display_label=None):
|
|||||||
# usually tokens have '</w>' to indicate end-of-word,
|
# usually tokens have '</w>' to indicate end-of-word,
|
||||||
# but for readability it has been replaced with ' '
|
# but for readability it has been replaced with ' '
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
tokens = model.cond_stage_model.tokenizer.tokenize(text)
|
||||||
tokenized = ""
|
tokenized = ""
|
||||||
discarded = ""
|
discarded = ""
|
||||||
usedTokens = 0
|
usedTokens = 0
|
||||||
totalTokens = len(tokens)
|
totalTokens = len(tokens)
|
||||||
|
|
||||||
for i in range(0, totalTokens):
|
for i in range(0, totalTokens):
|
||||||
token = tokens[i].replace('</w>', ' ')
|
token = tokens[i].replace('</w>', ' ')
|
||||||
# alternate color
|
# alternate color
|
||||||
@ -288,8 +289,11 @@ def log_tokenization(text, model, display_label=None):
|
|||||||
usedTokens += 1
|
usedTokens += 1
|
||||||
else: # over max token length
|
else: # over max token length
|
||||||
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||||
print(f"\n>> Tokens {display_label or ''} ({usedTokens}):\n{tokenized}\x1b[0m")
|
|
||||||
|
if usedTokens > 0:
|
||||||
|
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
|
||||||
|
print(f'{tokenized}\x1b[0m')
|
||||||
|
|
||||||
if discarded != "":
|
if discarded != "":
|
||||||
print(
|
print(f'\n>> [TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):')
|
||||||
f">> Tokens Discarded ({totalTokens - usedTokens}):\n{discarded}\x1b[0m"
|
print(f'{discarded}\x1b[0m')
|
||||||
)
|
|
@ -127,8 +127,8 @@ script do it for you. Manual installation is described at:
|
|||||||
|
|
||||||
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
|
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
|
||||||
|
|
||||||
You may download the recommended models (about 10GB total), select a customized set, or
|
You may download the recommended models (about 15GB total), install all models (40 GB!!)
|
||||||
completely skip this step.
|
select a customized set, or completely skip this step.
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
completer.set_options(["recommended", "customized", "skip"])
|
completer.set_options(["recommended", "customized", "skip"])
|
||||||
@ -435,9 +435,7 @@ def _download_diffusion_weights(
|
|||||||
)
|
)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if str(e).startswith("fp16 is not a valid"):
|
if str(e).startswith("fp16 is not a valid"):
|
||||||
print(
|
pass
|
||||||
f"Could not fetch half-precision version of model {repo_id}; fetching full-precision instead"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
print(f"An unexpected error occurred while downloading the model: {e})")
|
print(f"An unexpected error occurred while downloading the model: {e})")
|
||||||
if path:
|
if path:
|
||||||
@ -868,7 +866,7 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
|
|||||||
):
|
):
|
||||||
os.makedirs(os.path.join(root, name), exist_ok=True)
|
os.makedirs(os.path.join(root, name), exist_ok=True)
|
||||||
|
|
||||||
configs_src = Path(configs.__path__[-1])
|
configs_src = Path(configs.__path__[0])
|
||||||
configs_dest = Path(root) / "configs"
|
configs_dest = Path(root) / "configs"
|
||||||
if not os.path.samefile(configs_src, configs_dest):
|
if not os.path.samefile(configs_src, configs_dest):
|
||||||
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
@ -4,7 +4,6 @@ import dataclasses
|
|||||||
import inspect
|
import inspect
|
||||||
import secrets
|
import secrets
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any
|
from typing import List, Optional, Union, Callable, Type, TypeVar, Generic, Any
|
||||||
|
|
||||||
@ -641,7 +640,6 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def cond_stage_model(self):
|
def cond_stage_model(self):
|
||||||
warnings.warn("legacy compatibility layer", DeprecationWarning)
|
|
||||||
return self.prompt_fragments_to_embeddings_converter
|
return self.prompt_fragments_to_embeddings_converter
|
||||||
|
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
|
@ -194,7 +194,8 @@ class Inpaint(Img2Img):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
self.enable_image_debugging = enable_image_debugging
|
self.enable_image_debugging = enable_image_debugging
|
||||||
self.infill_method = infill_method or infill_methods()[0], # The infill method to use
|
infill_method = infill_method or infill_methods()[0]
|
||||||
|
self.infill_method = infill_method
|
||||||
|
|
||||||
self.inpaint_width = inpaint_width
|
self.inpaint_width = inpaint_width
|
||||||
self.inpaint_height = inpaint_height
|
self.inpaint_height = inpaint_height
|
||||||
|
@ -18,7 +18,7 @@ import warnings
|
|||||||
import safetensors.torch
|
import safetensors.torch
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import move, rmtree
|
from shutil import move, rmtree
|
||||||
from typing import Union, Any
|
from typing import Any, Optional, Union
|
||||||
from huggingface_hub import scan_cache_dir
|
from huggingface_hub import scan_cache_dir
|
||||||
from ldm.util import download_with_progress_bar
|
from ldm.util import download_with_progress_bar
|
||||||
|
|
||||||
@ -484,12 +484,11 @@ class ModelManager(object):
|
|||||||
**pipeline_args,
|
**pipeline_args,
|
||||||
**fp_args,
|
**fp_args,
|
||||||
)
|
)
|
||||||
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if str(e).startswith('fp16 is not a valid'):
|
if str(e).startswith('fp16 is not a valid'):
|
||||||
print(f'Could not fetch half-precision version of model {name_or_path}; fetching full-precision instead')
|
pass
|
||||||
else:
|
else:
|
||||||
print(f'An unexpected error occurred while downloading the model: {e})')
|
print(f'** An unexpected error occurred while downloading the model: {e})')
|
||||||
if pipeline:
|
if pipeline:
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -881,14 +880,14 @@ class ModelManager(object):
|
|||||||
print('** Migration is done. Continuing...')
|
print('** Migration is done. Continuing...')
|
||||||
|
|
||||||
|
|
||||||
def _resolve_path(self, source:Union[str,Path], dest_directory:str)->Path:
|
def _resolve_path(self, source: Union[str, Path], dest_directory: str) -> Optional[Path]:
|
||||||
resolved_path = None
|
resolved_path = None
|
||||||
if source.startswith(('http:','https:','ftp:')):
|
if str(source).startswith(('http:','https:','ftp:')):
|
||||||
basename = os.path.basename(source)
|
basename = os.path.basename(source)
|
||||||
if not os.path.isabs(dest_directory):
|
if not os.path.isabs(dest_directory):
|
||||||
dest_directory = os.path.join(Globals.root,dest_directory)
|
dest_directory = os.path.join(Globals.root,dest_directory)
|
||||||
dest = os.path.join(dest_directory,basename)
|
dest = os.path.join(dest_directory,basename)
|
||||||
if download_with_progress_bar(source,dest):
|
if download_with_progress_bar(str(source), Path(dest)):
|
||||||
resolved_path = Path(dest)
|
resolved_path = Path(dest)
|
||||||
else:
|
else:
|
||||||
if not os.path.isabs(source):
|
if not os.path.isabs(source):
|
||||||
@ -1040,7 +1039,7 @@ class ModelManager(object):
|
|||||||
vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args)
|
vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if str(e).startswith('fp16 is not a valid'):
|
if str(e).startswith('fp16 is not a valid'):
|
||||||
print(' | Half-precision version of model not available; fetching full-precision instead')
|
pass
|
||||||
else:
|
else:
|
||||||
deferred_error = e
|
deferred_error = e
|
||||||
if vae:
|
if vae:
|
||||||
|
@ -295,7 +295,8 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
for idx in range(len(model_names))
|
for idx in range(len(model_names))
|
||||||
if "default" in conf[model_names[idx]]
|
if "default" in conf[model_names[idx]]
|
||||||
]
|
]
|
||||||
return (model_names, defaults[0])
|
default = defaults[0] if len(defaults)>0 else 0
|
||||||
|
return (model_names, default)
|
||||||
|
|
||||||
def marshall_arguments(self) -> dict:
|
def marshall_arguments(self) -> dict:
|
||||||
args = dict()
|
args = dict()
|
||||||
|
@ -284,9 +284,9 @@ class ProgressBar():
|
|||||||
|
|
||||||
def download_with_progress_bar(url:str, dest:Path)->bool:
|
def download_with_progress_bar(url:str, dest:Path)->bool:
|
||||||
try:
|
try:
|
||||||
if not os.path.exists(dest):
|
if not dest.exists():
|
||||||
os.makedirs((os.path.dirname(dest) or '.'), exist_ok=True)
|
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||||
request.urlretrieve(url,dest,ProgressBar(os.path.basename(dest)))
|
request.urlretrieve(url,dest,ProgressBar(dest.stem))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
@ -36,6 +36,7 @@ classifiers = [
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"accelerate",
|
"accelerate",
|
||||||
"albumentations",
|
"albumentations",
|
||||||
|
"click",
|
||||||
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
||||||
"datasets",
|
"datasets",
|
||||||
"diffusers[torch]~=0.11",
|
"diffusers[torch]~=0.11",
|
||||||
@ -98,13 +99,13 @@ dependencies = [
|
|||||||
|
|
||||||
# legacy entrypoints; provided for backwards compatibility
|
# legacy entrypoints; provided for backwards compatibility
|
||||||
"invoke.py" = "ldm.invoke.CLI:main"
|
"invoke.py" = "ldm.invoke.CLI:main"
|
||||||
"configure_invokeai.py" = "ldm.invoke.config.configure_invokeai:main"
|
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
|
||||||
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
|
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
|
||||||
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
|
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
|
||||||
|
|
||||||
# modern entrypoints
|
# modern entrypoints
|
||||||
"invokeai" = "ldm.invoke.CLI:main"
|
"invokeai" = "ldm.invoke.CLI:main"
|
||||||
"invokeai-configure" = "ldm.invoke.config.configure_invokeai:main"
|
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
|
||||||
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
|
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
|
||||||
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
|
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
|
||||||
|
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
from ldm.invoke.config import configure_invokeai
|
from ldm.invoke.config import invokeai_configure
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
warnings.warn("configire_invokeai.py is deprecated, please run 'invoke'", DeprecationWarning)
|
warnings.warn("configure_invokeai.py is deprecated, please run 'invokai-configure'", DeprecationWarning)
|
||||||
configure_invokeai.main()
|
configure_invokeai.main()
|
||||||
|
27
scripts/pypi_helper.py
Normal file
27
scripts/pypi_helper.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
import requests as request
|
||||||
|
|
||||||
|
import ldm.invoke._version as version
|
||||||
|
|
||||||
|
local_version = str(version.__version__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_pypi_versions(package_name="InvokeAI") -> list[str]:
|
||||||
|
"""Get the versions of the package from PyPI"""
|
||||||
|
url = f"https://pypi.org/pypi/{package_name}/json"
|
||||||
|
response = request.get(url).json()
|
||||||
|
versions: list[str] = list(response["releases"].keys())
|
||||||
|
return versions
|
||||||
|
|
||||||
|
|
||||||
|
def local_on_pypi(package_name="InvokeAI", local_version=local_version) -> bool:
|
||||||
|
"""Compare the versions of the package from PyPI and the local package"""
|
||||||
|
pypi_versions = get_pypi_versions(package_name)
|
||||||
|
return local_version in pypi_versions
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
package_name = "InvokeAI"
|
||||||
|
if local_on_pypi():
|
||||||
|
print(f"Package {package_name} is up to date")
|
||||||
|
else:
|
||||||
|
print(f"Package {package_name} is not up to date")
|
Loading…
x
Reference in New Issue
Block a user