mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Update local development from origin.
This commit is contained in:
@ -30,6 +30,7 @@ dependencies:
|
|||||||
- nomkl
|
- nomkl
|
||||||
- numpy==1.23.2
|
- numpy==1.23.2
|
||||||
- omegaconf==2.1.1
|
- omegaconf==2.1.1
|
||||||
|
- openh264==2.3.0
|
||||||
- onnx==1.12.0
|
- onnx==1.12.0
|
||||||
- onnxruntime==1.12.1
|
- onnxruntime==1.12.1
|
||||||
- protobuf==3.20.1
|
- protobuf==3.20.1
|
||||||
|
@ -339,6 +339,12 @@ class Args(object):
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help='Deprecated way to set --precision=float32',
|
help='Deprecated way to set --precision=float32',
|
||||||
)
|
)
|
||||||
|
model_group.add_argument(
|
||||||
|
'--free_gpu_mem',
|
||||||
|
dest='free_gpu_mem',
|
||||||
|
action='store_true',
|
||||||
|
help='Force free gpu memory before final decoding',
|
||||||
|
)
|
||||||
model_group.add_argument(
|
model_group.add_argument(
|
||||||
'--precision',
|
'--precision',
|
||||||
dest='precision',
|
dest='precision',
|
||||||
|
@ -27,6 +27,10 @@ class Txt2Img(Generator):
|
|||||||
height // self.downsampling_factor,
|
height // self.downsampling_factor,
|
||||||
width // self.downsampling_factor,
|
width // self.downsampling_factor,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||||
|
self.model.model.to(self.model.device)
|
||||||
|
|
||||||
samples, _ = sampler.sample(
|
samples, _ = sampler.sample(
|
||||||
batch_size = 1,
|
batch_size = 1,
|
||||||
S = steps,
|
S = steps,
|
||||||
@ -39,6 +43,10 @@ class Txt2Img(Generator):
|
|||||||
eta = ddim_eta,
|
eta = ddim_eta,
|
||||||
img_callback = step_callback
|
img_callback = step_callback
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.free_gpu_mem:
|
||||||
|
self.model.model.to("cpu")
|
||||||
|
|
||||||
return self.sample_to_image(samples)
|
return self.sample_to_image(samples)
|
||||||
|
|
||||||
return make_image
|
return make_image
|
||||||
|
@ -655,6 +655,7 @@ class Generate:
|
|||||||
if not self.generators.get('txt2img'):
|
if not self.generators.get('txt2img'):
|
||||||
from ldm.dream.generator.txt2img import Txt2Img
|
from ldm.dream.generator.txt2img import Txt2Img
|
||||||
self.generators['txt2img'] = Txt2Img(self.model, self.precision)
|
self.generators['txt2img'] = Txt2Img(self.model, self.precision)
|
||||||
|
self.generators['txt2img'].free_gpu_mem = self.free_gpu_mem
|
||||||
return self.generators['txt2img']
|
return self.generators['txt2img']
|
||||||
|
|
||||||
def _make_inpaint(self):
|
def _make_inpaint(self):
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"Note that you will need NVIDIA drivers, Python 3.10, and Git installed\n",
|
"Note that you will need NVIDIA drivers, Python 3.10, and Git installed\n",
|
||||||
"beforehand - simplified\n",
|
"beforehand - simplified\n",
|
||||||
"[step-by-step instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install)\n",
|
"[step-by-step instructions](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)\n",
|
||||||
"are available in the wiki (you'll only need steps 1, 2, & 3 )"
|
"are available in the wiki (you'll only need steps 1, 2, & 3 )"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -40,8 +40,8 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%cmd\n",
|
"%%cmd\n",
|
||||||
"git clone https://github.com/lstein/stable-diffusion.git\n",
|
"git clone https://github.com/invoke-ai/InvokeAI.git\n",
|
||||||
"cd /content/stable-diffusion/\n",
|
"cd /content/InvokeAI/\n",
|
||||||
"git checkout --quiet development"
|
"git checkout --quiet development"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -52,14 +52,14 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%%cmd\n",
|
"%%cmd\n",
|
||||||
"pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate stable-diffusion"
|
"pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate invoke-ai"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Switch the notebook kernel to the new 'stable-diffusion' environment!\n",
|
"# Switch the notebook kernel to the new 'invoke-ai' environment!\n",
|
||||||
"\n",
|
"\n",
|
||||||
"## VSCode: restart VSCode and come back to this cell\n",
|
"## VSCode: restart VSCode and come back to this cell\n",
|
||||||
"\n",
|
"\n",
|
||||||
@ -67,7 +67,7 @@
|
|||||||
"1. Type \"Select Interpreter\" and select \"Jupyter: Select Interpreter to Start Jupyter Server\"\n",
|
"1. Type \"Select Interpreter\" and select \"Jupyter: Select Interpreter to Start Jupyter Server\"\n",
|
||||||
"1. VSCode will say that it needs to install packages. Click the \"Install\" button.\n",
|
"1. VSCode will say that it needs to install packages. Click the \"Install\" button.\n",
|
||||||
"1. Once the install is finished, do 1 & 2 again\n",
|
"1. Once the install is finished, do 1 & 2 again\n",
|
||||||
"1. Pick 'stable-diffusion'\n",
|
"1. Pick 'invoke-ai'\n",
|
||||||
"1. Run the following cell"
|
"1. Run the following cell"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -77,7 +77,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"%cd stable-diffusion"
|
"%cd InvokeAI"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -88,7 +88,7 @@
|
|||||||
"## Jupyter/JupyterLab\n",
|
"## Jupyter/JupyterLab\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. Run the cell below\n",
|
"1. Run the cell below\n",
|
||||||
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'stable-diffusion' from the drop-down.\n"
|
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'invoke-ai' from the drop-down.\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -106,9 +106,9 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"# DO NOT RUN THIS CELL IF YOU ARE USING VSCODE!!\n",
|
"# DO NOT RUN THIS CELL IF YOU ARE USING VSCODE!!\n",
|
||||||
"%%cmd\n",
|
"%%cmd\n",
|
||||||
"pew workon stable-diffusion\n",
|
"pew workon invoke-ai\n",
|
||||||
"pip3 install ipykernel\n",
|
"pip3 install ipykernel\n",
|
||||||
"python -m ipykernel install --name=stable-diffusion"
|
"python -m ipykernel install --name=invoke-ai"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -182,15 +182,20 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"Now:\n",
|
"Now:\n",
|
||||||
"\n",
|
"\n",
|
||||||
"1. `cd` to wherever the 'stable-diffusion' directory is\n",
|
"1. `cd` to wherever the 'InvokeAI' directory is\n",
|
||||||
"1. Run `pew workon stable-diffusion`\n",
|
"1. Run `pew workon invoke-ai`\n",
|
||||||
"1. Run `winpty python scripts\\dream.py`"
|
"1. Run `winpty python scripts\\dream.py`"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3.10.6 ('ldm')",
|
"display_name": "Python 3.10.6 64-bit",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
@ -208,7 +213,7 @@
|
|||||||
},
|
},
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"interpreter": {
|
"interpreter": {
|
||||||
"hash": "a05e4574567b7bc2c98f7f9aa579f9ea5b8739b54844ab610ac85881c4be2659"
|
"hash": "5e164cef426134bf171f386fbddecb52046b6c1479f922ab8dfdd30df05e0e80"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -108,6 +108,8 @@ def main():
|
|||||||
|
|
||||||
# preload the model
|
# preload the model
|
||||||
gen.load_model()
|
gen.load_model()
|
||||||
|
#set additional option
|
||||||
|
gen.free_gpu_mem = opt.free_gpu_mem
|
||||||
|
|
||||||
if not infile:
|
if not infile:
|
||||||
print(
|
print(
|
||||||
|
Reference in New Issue
Block a user