mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
resolved merge conflicts
This commit is contained in:
commit
c24a16ccb0
13
.gitignore
vendored
13
.gitignore
vendored
@ -1,3 +1,7 @@
|
||||
# ignore default image save location and model symbolic link
|
||||
outputs/
|
||||
models/ldm/stable-diffusion-v1/model.ckpt
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@ -6,6 +10,10 @@ __pycache__/
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# emacs autosave files
|
||||
*~
|
||||
#*
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
@ -20,6 +28,7 @@ parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
@ -86,6 +95,7 @@ ipython_config.py
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
@ -109,7 +119,7 @@ ipython_config.py
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
@ -159,7 +169,6 @@ cython_debug/
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
**/*.ckpt
|
||||
src/
|
||||
logs/
|
||||
**/__pycache__/
|
||||
|
@ -17,9 +17,6 @@ class DDIMSampler(object):
|
||||
self.schedule = schedule
|
||||
|
||||
def register_buffer(self, name, attr):
|
||||
if type(attr) == torch.Tensor:
|
||||
if attr.device != torch.device("cuda"):
|
||||
attr = attr.to(torch.device("cuda"))
|
||||
setattr(self, name, attr)
|
||||
|
||||
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
||||
|
@ -16,9 +16,6 @@ class PLMSSampler(object):
|
||||
self.schedule = schedule
|
||||
|
||||
def register_buffer(self, name, attr):
|
||||
if type(attr) == torch.Tensor:
|
||||
if attr.device != torch.device("cuda"):
|
||||
attr = attr.to(torch.device("cuda"))
|
||||
setattr(self, name, attr)
|
||||
|
||||
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
||||
|
@ -123,7 +123,8 @@ The vast majority of these arguments default to reasonable values.
|
||||
full_precision=False,
|
||||
strength=0.75, # default in scripts/img2img.py
|
||||
embedding_path=None,
|
||||
latent_diffusion_weights=False # just to keep track of this parameter when regenerating prompt
|
||||
latent_diffusion_weights=False, # just to keep track of this parameter when regenerating prompt
|
||||
device='cuda'
|
||||
):
|
||||
self.outdir = outdir
|
||||
self.batch_size = batch_size
|
||||
@ -147,11 +148,13 @@ The vast majority of these arguments default to reasonable values.
|
||||
self.model = None # empty for now
|
||||
self.sampler = None
|
||||
self.latent_diffusion_weights=latent_diffusion_weights
|
||||
self.device = device
|
||||
if seed is None:
|
||||
self.seed = self._new_seed()
|
||||
else:
|
||||
self.seed = seed
|
||||
|
||||
@torch.no_grad()
|
||||
def txt2img(self,prompt,outdir=None,batch_size=None,iterations=None,
|
||||
steps=None,seed=None,grid=None,individual=None,width=None,height=None,
|
||||
cfg_scale=None,ddim_eta=None,strength=None,embedding_path=None,init_img=None,skip_normalize=False):
|
||||
@ -206,9 +209,7 @@ The vast majority of these arguments default to reasonable values.
|
||||
|
||||
# Gawd. Too many levels of indent here. Need to refactor into smaller routines!
|
||||
try:
|
||||
with torch.no_grad():
|
||||
with precision_scope("cuda"):
|
||||
with model.ema_scope():
|
||||
with precision_scope(self.device.type), model.ema_scope():
|
||||
all_samples = list()
|
||||
for n in trange(iterations, desc="Sampling"):
|
||||
seed_everything(seed)
|
||||
@ -281,6 +282,7 @@ The vast majority of these arguments default to reasonable values.
|
||||
return images
|
||||
|
||||
# There is lots of shared code between this and txt2img and should be refactored.
|
||||
@torch.no_grad()
|
||||
def img2img(self,prompt,outdir=None,init_img=None,batch_size=None,iterations=None,
|
||||
steps=None,seed=None,grid=None,individual=None,width=None,height=None,
|
||||
cfg_scale=None,ddim_eta=None,strength=None,embedding_path=None,skip_normalize=False):
|
||||
@ -331,7 +333,7 @@ The vast majority of these arguments default to reasonable values.
|
||||
assert os.path.isfile(init_img)
|
||||
init_image = self._load_img(init_img).to(self.device)
|
||||
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
||||
with precision_scope("cuda"):
|
||||
with precision_scope(self.device.type):
|
||||
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
|
||||
|
||||
sampler.make_schedule(ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False)
|
||||
@ -353,9 +355,7 @@ The vast majority of these arguments default to reasonable values.
|
||||
|
||||
# Gawd. Too many levels of indent here. Need to refactor into smaller routines!
|
||||
try:
|
||||
with torch.no_grad():
|
||||
with precision_scope("cuda"):
|
||||
with model.ema_scope():
|
||||
with precision_scope(self.device.type), model.ema_scope():
|
||||
all_samples = list()
|
||||
for n in trange(iterations, desc="Sampling"):
|
||||
seed_everything(seed)
|
||||
@ -448,11 +448,13 @@ The vast majority of these arguments default to reasonable values.
|
||||
seed_everything(self.seed)
|
||||
try:
|
||||
config = OmegaConf.load(self.config)
|
||||
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
self.device = torch.device(self.device) if torch.cuda.is_available() else torch.device("cpu")
|
||||
model = self._load_model_from_config(config,self.weights)
|
||||
if self.embedding_path is not None:
|
||||
model.embedding_manager.load(self.embedding_path)
|
||||
self.model = model.to(self.device)
|
||||
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
|
||||
self.model.cond_stage_model.device = self.device
|
||||
except AttributeError:
|
||||
raise SystemExit
|
||||
|
||||
@ -489,7 +491,6 @@ The vast majority of these arguments default to reasonable values.
|
||||
sd = pl_sd["state_dict"]
|
||||
model = instantiate_from_config(config.model)
|
||||
m, u = model.load_state_dict(sd, strict=False)
|
||||
model.cuda()
|
||||
model.eval()
|
||||
if self.full_precision:
|
||||
print('Using slower but more accurate full-precision math (--full_precision)')
|
||||
|
@ -60,7 +60,8 @@ def main():
|
||||
full_precision=opt.full_precision,
|
||||
config=config,
|
||||
latent_diffusion_weights=opt.laion400m, # this is solely for recreating the prompt
|
||||
embedding_path=opt.embedding_path
|
||||
embedding_path=opt.embedding_path,
|
||||
device=opt.device
|
||||
)
|
||||
|
||||
# make sure the output directory exists
|
||||
@ -282,10 +283,14 @@ def create_argv_parser():
|
||||
type=str,
|
||||
default="outputs/img-samples",
|
||||
help="directory in which to place generated images and a log of prompts and seeds")
|
||||
|
||||
parser.add_argument('--embedding_path',
|
||||
type=str,
|
||||
help="Path to a pre-trained embedding manager checkpoint - can only be set on command line")
|
||||
parser.add_argument('--device',
|
||||
'-d',
|
||||
type=str,
|
||||
default="cuda",
|
||||
help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if avalible")
|
||||
return parser
|
||||
|
||||
|
||||
|
1
src/clip
Submodule
1
src/clip
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit d50d76daa670286dd6cacf3bcd80b5e4823fc8e1
|
1
src/k-diffusion
Submodule
1
src/k-diffusion
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit db5799068749bf3a6d5845120ed32df16b7d883b
|
1
src/taming-transformers
Submodule
1
src/taming-transformers
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 24268930bf1dce879235a7fddd0b2355b84d7ea6
|
Loading…
Reference in New Issue
Block a user