diff --git a/scripts/orig_scripts/txt2img.py b/scripts/orig_scripts/txt2img.py index 9f01bca021..6c43e73b93 100644 --- a/scripts/orig_scripts/txt2img.py +++ b/scripts/orig_scripts/txt2img.py @@ -232,7 +232,12 @@ def main(): print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() - data = list(chunk(data, batch_size)) + if (len(data) >= batch_size): + data = list(chunk(data, batch_size)) + else: + while (len(data) < batch_size): + data.append(data[-1]) + data = [data] sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) @@ -264,7 +269,7 @@ def main(): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] - + if not opt.klms: samples_ddim, _ = sampler.sample(S=opt.ddim_steps, conditioning=c, @@ -284,7 +289,7 @@ def main(): model_wrap_cfg = CFGDenoiser(model_wrap) extra_args = {'cond': c, 'uncond': uc, 'cond_scale': opt.scale} samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args) - + x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)