mirror of
https://github.com/invoke-ai/InvokeAI
synced 2025-07-25 04:51:07 +00:00
Merge remote-tracking branch 'origin/main' into dev/diffusers
# Conflicts: # ldm/invoke/generator/inpaint.py # ldm/invoke/generator/txt2img2img.py
This commit is contained in:
@ -14,8 +14,8 @@ if "%1" == "use-cache" (
|
|||||||
|
|
||||||
@rem Config
|
@rem Config
|
||||||
@rem this should be changed to the tagged release!
|
@rem this should be changed to the tagged release!
|
||||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||||
@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||||
|
@ -9,8 +9,8 @@ cd "$scriptdir"
|
|||||||
deactivate >/dev/null 2>&1
|
deactivate >/dev/null 2>&1
|
||||||
|
|
||||||
# this should be changed to the tagged release!
|
# this should be changed to the tagged release!
|
||||||
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
# INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||||
# INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||||
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||||
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||||
MINIMUM_PYTHON_VERSION=3.9.0
|
MINIMUM_PYTHON_VERSION=3.9.0
|
||||||
|
@ -810,6 +810,7 @@ class Generate:
|
|||||||
if not self.generators.get('inpaint'):
|
if not self.generators.get('inpaint'):
|
||||||
from ldm.invoke.generator.inpaint import Inpaint
|
from ldm.invoke.generator.inpaint import Inpaint
|
||||||
self.generators['inpaint'] = Inpaint(self.model, self.precision)
|
self.generators['inpaint'] = Inpaint(self.model, self.precision)
|
||||||
|
self.generators['inpaint'].free_gpu_mem = self.free_gpu_mem
|
||||||
return self.generators['inpaint']
|
return self.generators['inpaint']
|
||||||
|
|
||||||
# "omnibus" supports the runwayML custom inpainting model, which does
|
# "omnibus" supports the runwayML custom inpainting model, which does
|
||||||
|
@ -591,6 +591,12 @@ class Args(object):
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help='Generates debugging image to display'
|
help='Generates debugging image to display'
|
||||||
)
|
)
|
||||||
|
render_group.add_argument(
|
||||||
|
'--karras_max',
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29]."
|
||||||
|
)
|
||||||
# Restoration related args
|
# Restoration related args
|
||||||
postprocessing_group.add_argument(
|
postprocessing_group.add_argument(
|
||||||
'--no_restore',
|
'--no_restore',
|
||||||
|
@ -72,16 +72,23 @@ this program and resume later.\n'''
|
|||||||
#--------------------------------------------
|
#--------------------------------------------
|
||||||
def postscript(errors: None):
|
def postscript(errors: None):
|
||||||
if not any(errors):
|
if not any(errors):
|
||||||
message='''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands:
|
message='''
|
||||||
Web version:
|
** Model Installation Successful **
|
||||||
python scripts/invoke.py --web (connect to http://localhost:9090)
|
|
||||||
Command-line version:
|
|
||||||
python scripts/invoke.py
|
|
||||||
|
|
||||||
If you installed manually, remember to activate the 'invokeai'
|
You're all set!
|
||||||
environment before running invoke.py. If you installed using the
|
|
||||||
automated installation script, execute "invoke.sh" (Linux/Mac) or
|
If you installed using one of the automated installation scripts,
|
||||||
"invoke.bat" (Windows) to start InvokeAI.
|
execute 'invoke.sh' (Linux/macOS) or 'invoke.bat' (Windows) to
|
||||||
|
start InvokeAI.
|
||||||
|
|
||||||
|
If you installed manually, activate the 'invokeai' environment
|
||||||
|
(e.g. 'conda activate invokeai'), then run one of the following
|
||||||
|
commands to start InvokeAI.
|
||||||
|
|
||||||
|
Web UI:
|
||||||
|
python scripts/invoke.py --web # (connect to http://localhost:9090)
|
||||||
|
Command-line interface:
|
||||||
|
python scripts/invoke.py
|
||||||
|
|
||||||
Have fun!
|
Have fun!
|
||||||
'''
|
'''
|
||||||
|
Reference in New Issue
Block a user