mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
* add whole <style token> to vocab for concept library embeddings * add ability to load multiple concept .bin files * make --log_tokenization respect custom tokens * start working on concept downloading system * preliminary support for dynamic loading and merging of multiple embedded models - The embedding_manager is now enhanced with ldm.invoke.concepts_lib, which handles dynamic downloading and caching of embedded models from the Hugging Face concepts library (https://huggingface.co/sd-concepts-library) - Downloading of a embedded model is triggered by the presence of one or more <concept> tags in the prompt. - Once the embedded model is downloaded, its trigger phrase will be loaded into the embedding manager and the prompt's <concept> tag will be replaced with the <trigger_phrase> - The downloaded model stays on disk for fast loading later. - The CLI autocomplete will complete partial <concept> tags for you. Type a '<' and hit tab to get all ~700 concepts. BUGS AND LIMITATIONS: - MODEL NAME VS TRIGGER PHRASE You must use the name of the concept embed model from the SD library, and not the trigger phrase itself. Usually these are the same, but not always. For example, the model named "hoi4-leaders" corresponds to the trigger "<HOI4-Leader>" One reason for this design choice is that there is no apparent constraint on the uniqueness of the trigger phrases and one trigger phrase may map onto multiple models. So we use the model name instead. The second reason is that there is no way I know of to search Hugging Face for models with certain trigger phrases. So we'd have to download all 700 models to index the phrases. The problem this presents is that this may confuse users, who will want to reuse prompts from distributions that use the trigger phrase directly. Usually this will work, but not always. - WON'T WORK ON A FIREWALLED SYSTEM If the host running IAI has no internet connection, it can't download the concept libraries. I will add a script that allows users to preload a list of concept models. - BUG IN PROMPT REPLACEMENT WHEN MODEL NOT FOUND There's a small bug that occurs when the user provides an invalid model name. The <concept> gets replaced with <None> in the prompt. * fix loading .pt embeddings; allow multi-vector embeddings; warn on dupes * simplify replacement logic and remove cuda assumption * download list of concepts from hugging face * remove misleading customization of '*' placeholder the existing code as-is did not do anything; unclear what it was supposed to do. the obvious alternative -- setting using 'placeholder_strings' instead of 'placeholder_tokens' to match model.params.personalization_config.params.placeholder_strings -- caused a crash. i think this is because the passed string also needed to be handed over on init of the PersonalizedBase as the 'placeholder_token' argument. this is weird config dict magic and i don't want to touch it. put a breakpoint in personalzied.py line 116 (top of PersonalizedBase.__init__) if you want to have a crack at it yourself. * address all the issues raised by damian0815 in review of PR #1526 * actually resize the token_embeddings * multiple improvements to the concept loader based on code reviews 1. Activated the --embedding_directory option (alias --embedding_path) to load a single embedding or an entire directory of embeddings at startup time. 2. Can turn off automatic loading of embeddings using --no-embeddings. 3. Embedding checkpoints are scanned with the pickle scanner. 4. More informative error messages when a concept can't be loaded due either to a 404 not found error or a network error. * autocomplete terms end with ">" now * fix startup error and network unreachable 1. If the .invokeai file does not contain the --root and --outdir options, invoke.py will now fix it. 2. Catch and handle network problems when downloading hugging face textual inversion concepts. * fix misformatted error string Co-authored-by: Damian Stewart <d@damianstewart.com>
424 lines
13 KiB
Python
424 lines
13 KiB
Python
"""
|
|
Readline helper functions for invoke.py.
|
|
You may import the global singleton `completer` to get access to the
|
|
completer object itself. This is useful when you want to autocomplete
|
|
seeds:
|
|
|
|
from ldm.invoke.readline import completer
|
|
completer.add_seed(18247566)
|
|
completer.add_seed(9281839)
|
|
"""
|
|
import os
|
|
import re
|
|
import atexit
|
|
from ldm.invoke.args import Args
|
|
from ldm.invoke.concepts_lib import Concepts
|
|
|
|
# ---------------readline utilities---------------------
|
|
try:
|
|
import readline
|
|
readline_available = True
|
|
except (ImportError,ModuleNotFoundError) as e:
|
|
print(f'** An error occurred when loading the readline module: {str(e)}')
|
|
readline_available = False
|
|
|
|
IMG_EXTENSIONS = ('.png','.jpg','.jpeg','.PNG','.JPG','.JPEG','.gif','.GIF')
|
|
WEIGHT_EXTENSIONS = ('.ckpt','.bae')
|
|
TEXT_EXTENSIONS = ('.txt','.TXT')
|
|
CONFIG_EXTENSIONS = ('.yaml','.yml')
|
|
COMMANDS = (
|
|
'--steps','-s',
|
|
'--seed','-S',
|
|
'--iterations','-n',
|
|
'--width','-W','--height','-H',
|
|
'--cfg_scale','-C',
|
|
'--threshold',
|
|
'--perlin',
|
|
'--grid','-g',
|
|
'--individual','-i',
|
|
'--save_intermediates',
|
|
'--init_img','-I',
|
|
'--init_mask','-M',
|
|
'--init_color',
|
|
'--strength','-f',
|
|
'--variants','-v',
|
|
'--outdir','-o',
|
|
'--sampler','-A','-m',
|
|
'--embedding_path',
|
|
'--device',
|
|
'--grid','-g',
|
|
'--facetool','-ft',
|
|
'--facetool_strength','-G',
|
|
'--codeformer_fidelity','-cf',
|
|
'--upscale','-U',
|
|
'-save_orig','--save_original',
|
|
'--skip_normalize','-x',
|
|
'--log_tokenization','-t',
|
|
'--hires_fix',
|
|
'--inpaint_replace','-r',
|
|
'--png_compression','-z',
|
|
'--text_mask','-tm',
|
|
'!fix','!fetch','!replay','!history','!search','!clear',
|
|
'!models','!switch','!import_model','!edit_model','!del_model',
|
|
'!mask',
|
|
)
|
|
MODEL_COMMANDS = (
|
|
'!switch',
|
|
'!edit_model',
|
|
'!del_model',
|
|
)
|
|
WEIGHT_COMMANDS = (
|
|
'!import_model',
|
|
)
|
|
IMG_PATH_COMMANDS = (
|
|
'--outdir[=\s]',
|
|
)
|
|
TEXT_PATH_COMMANDS=(
|
|
'!replay',
|
|
)
|
|
IMG_FILE_COMMANDS=(
|
|
'!fix',
|
|
'!fetch',
|
|
'!mask',
|
|
'--init_img[=\s]','-I',
|
|
'--init_mask[=\s]','-M',
|
|
'--init_color[=\s]',
|
|
'--embedding_path[=\s]',
|
|
)
|
|
|
|
path_regexp = '(' + '|'.join(IMG_PATH_COMMANDS+IMG_FILE_COMMANDS) + ')\s*\S*$'
|
|
weight_regexp = '(' + '|'.join(WEIGHT_COMMANDS) + ')\s*\S*$'
|
|
text_regexp = '(' + '|'.join(TEXT_PATH_COMMANDS) + ')\s*\S*$'
|
|
|
|
class Completer(object):
|
|
def __init__(self, options, models=[]):
|
|
self.options = sorted(options)
|
|
self.models = sorted(models)
|
|
self.seeds = set()
|
|
self.matches = list()
|
|
self.default_dir = None
|
|
self.linebuffer = None
|
|
self.auto_history_active = True
|
|
self.extensions = None
|
|
self.concepts = Concepts().list_concepts()
|
|
return
|
|
|
|
def complete(self, text, state):
|
|
'''
|
|
Completes invoke command line.
|
|
BUG: it doesn't correctly complete files that have spaces in the name.
|
|
'''
|
|
buffer = readline.get_line_buffer()
|
|
|
|
if state == 0:
|
|
|
|
# extensions defined, so go directly into path completion mode
|
|
if self.extensions is not None:
|
|
self.matches = self._path_completions(text, state, self.extensions)
|
|
|
|
# looking for an image file
|
|
elif re.search(path_regexp,buffer):
|
|
do_shortcut = re.search('^'+'|'.join(IMG_FILE_COMMANDS),buffer)
|
|
self.matches = self._path_completions(text, state, IMG_EXTENSIONS,shortcut_ok=do_shortcut)
|
|
|
|
# looking for a seed
|
|
elif re.search('(-S\s*|--seed[=\s])\d*$',buffer):
|
|
self.matches= self._seed_completions(text,state)
|
|
|
|
elif re.search('<[\w-]*$',buffer):
|
|
self.matches= self._concept_completions(text,state)
|
|
|
|
# looking for a model
|
|
elif re.match('^'+'|'.join(MODEL_COMMANDS),buffer):
|
|
self.matches= self._model_completions(text, state)
|
|
|
|
elif re.search(weight_regexp,buffer):
|
|
self.matches = self._path_completions(text, state, WEIGHT_EXTENSIONS)
|
|
|
|
elif re.search(text_regexp,buffer):
|
|
self.matches = self._path_completions(text, state, TEXT_EXTENSIONS)
|
|
|
|
# This is the first time for this text, so build a match list.
|
|
elif text:
|
|
self.matches = [
|
|
s for s in self.options if s and s.startswith(text)
|
|
]
|
|
else:
|
|
self.matches = self.options[:]
|
|
|
|
# Return the state'th item from the match list,
|
|
# if we have that many.
|
|
try:
|
|
response = self.matches[state]
|
|
except IndexError:
|
|
response = None
|
|
return response
|
|
|
|
def complete_extensions(self, extensions:list):
|
|
'''
|
|
If called with a list of extensions, will force completer
|
|
to do file path completions.
|
|
'''
|
|
self.extensions=extensions
|
|
|
|
def add_history(self,line):
|
|
'''
|
|
Pass thru to readline
|
|
'''
|
|
if not self.auto_history_active:
|
|
readline.add_history(line)
|
|
|
|
def clear_history(self):
|
|
'''
|
|
Pass clear_history() thru to readline
|
|
'''
|
|
readline.clear_history()
|
|
|
|
def search_history(self,match:str):
|
|
'''
|
|
Like show_history() but only shows items that
|
|
contain the match string.
|
|
'''
|
|
self.show_history(match)
|
|
|
|
def remove_history_item(self,pos):
|
|
readline.remove_history_item(pos)
|
|
|
|
def add_seed(self, seed):
|
|
'''
|
|
Add a seed to the autocomplete list for display when -S is autocompleted.
|
|
'''
|
|
if seed is not None:
|
|
self.seeds.add(str(seed))
|
|
|
|
def set_default_dir(self, path):
|
|
self.default_dir=path
|
|
|
|
def set_options(self,options):
|
|
self.options = options
|
|
|
|
def get_line(self,index):
|
|
try:
|
|
line = self.get_history_item(index)
|
|
except IndexError:
|
|
return None
|
|
return line
|
|
|
|
def get_current_history_length(self):
|
|
return readline.get_current_history_length()
|
|
|
|
def get_history_item(self,index):
|
|
return readline.get_history_item(index)
|
|
|
|
def show_history(self,match=None):
|
|
'''
|
|
Print the session history using the pydoc pager
|
|
'''
|
|
import pydoc
|
|
lines = list()
|
|
h_len = self.get_current_history_length()
|
|
if h_len < 1:
|
|
print('<empty history>')
|
|
return
|
|
|
|
for i in range(0,h_len):
|
|
line = self.get_history_item(i+1)
|
|
if match and match not in line:
|
|
continue
|
|
lines.append(f'[{i+1}] {line}')
|
|
pydoc.pager('\n'.join(lines))
|
|
|
|
def set_line(self,line)->None:
|
|
'''
|
|
Set the default string displayed in the next line of input.
|
|
'''
|
|
self.linebuffer = line
|
|
readline.redisplay()
|
|
|
|
def add_model(self,model_name:str)->None:
|
|
'''
|
|
add a model name to the completion list
|
|
'''
|
|
self.models.append(model_name)
|
|
|
|
def del_model(self,model_name:str)->None:
|
|
'''
|
|
removes a model name from the completion list
|
|
'''
|
|
self.models.remove(model_name)
|
|
|
|
def _seed_completions(self, text, state):
|
|
m = re.search('(-S\s?|--seed[=\s]?)(\d*)',text)
|
|
if m:
|
|
switch = m.groups()[0]
|
|
partial = m.groups()[1]
|
|
else:
|
|
switch = ''
|
|
partial = text
|
|
|
|
matches = list()
|
|
for s in self.seeds:
|
|
if s.startswith(partial):
|
|
matches.append(switch+s)
|
|
matches.sort()
|
|
return matches
|
|
|
|
def add_embedding_terms(self, terms:list[str]):
|
|
self.concepts = Concepts().list_concepts()
|
|
self.concepts.extend(terms)
|
|
|
|
def _concept_completions(self, text, state):
|
|
partial = text[1:] # this removes the leading '<'
|
|
if len(partial) == 0:
|
|
return self.concepts # whole dump - think if user wants this!
|
|
|
|
matches = list()
|
|
for concept in self.concepts:
|
|
if concept.startswith(partial):
|
|
matches.append(f'<{concept}>')
|
|
matches.sort()
|
|
return matches
|
|
|
|
def _model_completions(self, text, state):
|
|
m = re.search('(!switch\s+)(\w*)',text)
|
|
if m:
|
|
switch = m.groups()[0]
|
|
partial = m.groups()[1]
|
|
else:
|
|
switch = ''
|
|
partial = text
|
|
matches = list()
|
|
for s in self.models:
|
|
if s.startswith(partial):
|
|
matches.append(switch+s)
|
|
matches.sort()
|
|
return matches
|
|
|
|
def _pre_input_hook(self):
|
|
if self.linebuffer:
|
|
readline.insert_text(self.linebuffer)
|
|
readline.redisplay()
|
|
self.linebuffer = None
|
|
|
|
def _path_completions(self, text, state, extensions, shortcut_ok=True):
|
|
# separate the switch from the partial path
|
|
match = re.search('^(-\w|--\w+=?)(.*)',text)
|
|
if match is None:
|
|
switch = None
|
|
partial_path = text
|
|
else:
|
|
switch,partial_path = match.groups()
|
|
|
|
partial_path = partial_path.lstrip()
|
|
|
|
matches = list()
|
|
path = os.path.expanduser(partial_path)
|
|
|
|
if os.path.isdir(path):
|
|
dir = path
|
|
elif os.path.dirname(path) != '':
|
|
dir = os.path.dirname(path)
|
|
else:
|
|
dir = ''
|
|
path= os.path.join(dir,path)
|
|
|
|
dir_list = os.listdir(dir or '.')
|
|
if shortcut_ok and os.path.exists(self.default_dir) and dir=='':
|
|
dir_list += os.listdir(self.default_dir)
|
|
|
|
for node in dir_list:
|
|
if node.startswith('.') and len(node) > 1:
|
|
continue
|
|
full_path = os.path.join(dir, node)
|
|
|
|
if not (node.endswith(extensions) or os.path.isdir(full_path)):
|
|
continue
|
|
|
|
if path and not full_path.startswith(path):
|
|
continue
|
|
|
|
if switch is None:
|
|
match_path = os.path.join(dir,node)
|
|
matches.append(match_path+'/' if os.path.isdir(full_path) else match_path)
|
|
elif os.path.isdir(full_path):
|
|
matches.append(
|
|
switch+os.path.join(os.path.dirname(full_path), node) + '/'
|
|
)
|
|
elif node.endswith(extensions):
|
|
matches.append(
|
|
switch+os.path.join(os.path.dirname(full_path), node)
|
|
)
|
|
|
|
return matches
|
|
|
|
class DummyCompleter(Completer):
|
|
def __init__(self,options):
|
|
super().__init__(options)
|
|
self.history = list()
|
|
|
|
def add_history(self,line):
|
|
self.history.append(line)
|
|
|
|
def clear_history(self):
|
|
self.history = list()
|
|
|
|
def get_current_history_length(self):
|
|
return len(self.history)
|
|
|
|
def get_history_item(self,index):
|
|
return self.history[index-1]
|
|
|
|
def remove_history_item(self,index):
|
|
return self.history.pop(index-1)
|
|
|
|
def set_line(self,line):
|
|
print(f'# {line}')
|
|
|
|
def generic_completer(commands:list)->Completer:
|
|
if readline_available:
|
|
completer = Completer(commands,[])
|
|
readline.set_completer(completer.complete)
|
|
readline.set_pre_input_hook(completer._pre_input_hook)
|
|
readline.set_completer_delims(' ')
|
|
readline.parse_and_bind('tab: complete')
|
|
readline.parse_and_bind('set print-completions-horizontally off')
|
|
readline.parse_and_bind('set page-completions on')
|
|
readline.parse_and_bind('set skip-completed-text on')
|
|
readline.parse_and_bind('set show-all-if-ambiguous on')
|
|
else:
|
|
completer = DummyCompleter(commands)
|
|
return completer
|
|
|
|
def get_completer(opt:Args, models=[])->Completer:
|
|
if readline_available:
|
|
completer = Completer(COMMANDS,models)
|
|
|
|
readline.set_completer(
|
|
completer.complete
|
|
)
|
|
# pyreadline3 does not have a set_auto_history() method
|
|
try:
|
|
readline.set_auto_history(False)
|
|
completer.auto_history_active = False
|
|
except:
|
|
completer.auto_history_active = True
|
|
readline.set_pre_input_hook(completer._pre_input_hook)
|
|
readline.set_completer_delims(' ')
|
|
readline.parse_and_bind('tab: complete')
|
|
readline.parse_and_bind('set print-completions-horizontally off')
|
|
readline.parse_and_bind('set page-completions on')
|
|
readline.parse_and_bind('set skip-completed-text on')
|
|
readline.parse_and_bind('set show-all-if-ambiguous on')
|
|
|
|
histfile = os.path.join(os.path.expanduser(opt.outdir), '.invoke_history')
|
|
try:
|
|
readline.read_history_file(histfile)
|
|
readline.set_history_length(1000)
|
|
except FileNotFoundError:
|
|
pass
|
|
atexit.register(readline.write_history_file, histfile)
|
|
|
|
else:
|
|
completer = DummyCompleter(COMMANDS)
|
|
return completer
|