mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Resolving merge conflicts for flake8
This commit is contained in:
committed by
psychedelicious
parent
f6db9da06c
commit
537ae2f901
@ -4,7 +4,6 @@ Read a checkpoint/safetensors file and write out a template .json file containin
|
||||
its metadata for use in fast model probing.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import json
|
||||
|
||||
|
@ -3,11 +3,12 @@
|
||||
|
||||
import warnings
|
||||
|
||||
from invokeai.app.cli_app import invoke_cli
|
||||
|
||||
warnings.warn(
|
||||
"dream.py is being deprecated, please run invoke.py for the " "new UI/API or legacy_api.py for the old API",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
from invokeai.app.cli_app import invoke_cli
|
||||
|
||||
invoke_cli()
|
||||
|
@ -2,7 +2,7 @@
|
||||
"""This script reads the "Invoke" Stable Diffusion prompt embedded in files generated by invoke.py"""
|
||||
|
||||
import sys
|
||||
from PIL import Image, PngImagePlugin
|
||||
from PIL import Image
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
|
||||
|
@ -2,13 +2,11 @@
|
||||
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
# Change working directory to the repo root
|
||||
|
@ -2,13 +2,11 @@
|
||||
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
# Change working directory to the repo root
|
||||
|
@ -1,6 +1,7 @@
|
||||
"""make variations of input image"""
|
||||
|
||||
import argparse, os, sys, glob
|
||||
import argparse
|
||||
import os
|
||||
import PIL
|
||||
import torch
|
||||
import numpy as np
|
||||
@ -12,7 +13,6 @@ from einops import rearrange, repeat
|
||||
from torchvision.utils import make_grid
|
||||
from torch import autocast
|
||||
from contextlib import nullcontext
|
||||
import time
|
||||
from pytorch_lightning import seed_everything
|
||||
|
||||
from ldm.util import instantiate_from_config
|
||||
@ -234,7 +234,6 @@ def main():
|
||||
with torch.no_grad():
|
||||
with precision_scope(device.type):
|
||||
with model.ema_scope():
|
||||
tic = time.time()
|
||||
all_samples = list()
|
||||
for n in trange(opt.n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
@ -279,8 +278,6 @@ def main():
|
||||
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_count += 1
|
||||
|
||||
toc = time.time()
|
||||
|
||||
print(f"Your samples are ready and waiting for you here: \n{outpath} \n" f" \nEnjoy.")
|
||||
|
||||
|
||||
|
@ -1,4 +1,6 @@
|
||||
import argparse, os, sys, glob
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
@ -1,13 +1,13 @@
|
||||
import argparse, os, sys, glob
|
||||
import clip
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image
|
||||
from tqdm import tqdm, trange
|
||||
from itertools import islice
|
||||
from einops import rearrange, repeat
|
||||
from einops import rearrange
|
||||
from torchvision.utils import make_grid
|
||||
import scann
|
||||
import time
|
||||
@ -390,8 +390,8 @@ if __name__ == "__main__":
|
||||
grid = make_grid(grid, nrow=n_rows)
|
||||
|
||||
# to image
|
||||
grid = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
|
||||
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_np = 255.0 * rearrange(grid, "c h w -> h w c").cpu().numpy()
|
||||
Image.fromarray(grid_np.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_count += 1
|
||||
|
||||
print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
|
||||
|
@ -1,24 +1,24 @@
|
||||
import argparse, os, sys, datetime, glob, importlib, csv
|
||||
import argparse
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import torch
|
||||
|
||||
import torchvision
|
||||
import pytorch_lightning as pl
|
||||
|
||||
from packaging import version
|
||||
from omegaconf import OmegaConf
|
||||
from torch.utils.data import random_split, DataLoader, Dataset, Subset
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from functools import partial
|
||||
from PIL import Image
|
||||
|
||||
from pytorch_lightning import seed_everything
|
||||
from pytorch_lightning.trainer import Trainer
|
||||
from pytorch_lightning.callbacks import (
|
||||
ModelCheckpoint,
|
||||
Callback,
|
||||
LearningRateMonitor,
|
||||
)
|
||||
from pytorch_lightning.callbacks import Callback
|
||||
from pytorch_lightning.utilities.distributed import rank_zero_only
|
||||
from pytorch_lightning.utilities import rank_zero_info
|
||||
|
||||
@ -651,7 +651,7 @@ if __name__ == "__main__":
|
||||
trainer_config["accelerator"] = "auto"
|
||||
for k in nondefault_trainer_args(opt):
|
||||
trainer_config[k] = getattr(opt, k)
|
||||
if not "gpus" in trainer_config:
|
||||
if "gpus" not in trainer_config:
|
||||
del trainer_config["accelerator"]
|
||||
cpu = True
|
||||
else:
|
||||
@ -803,7 +803,7 @@ if __name__ == "__main__":
|
||||
trainer_opt.detect_anomaly = False
|
||||
|
||||
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
|
||||
trainer.logdir = logdir ###
|
||||
trainer.logdir = logdir
|
||||
|
||||
# data
|
||||
config.data.params.train.params.data_root = opt.data_root
|
||||
|
@ -2,7 +2,7 @@ from ldm.modules.encoders.modules import FrozenCLIPEmbedder, BERTEmbedder
|
||||
from ldm.modules.embedding_manager import EmbeddingManager
|
||||
from ldm.invoke.globals import Globals
|
||||
|
||||
import argparse, os
|
||||
import argparse
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
@ -108,7 +108,7 @@ if __name__ == "__main__":
|
||||
manager.load(manager_ckpt)
|
||||
|
||||
for placeholder_string in manager.string_to_token_dict:
|
||||
if not placeholder_string in string_to_token_dict:
|
||||
if placeholder_string not in string_to_token_dict:
|
||||
string_to_token_dict[placeholder_string] = manager.string_to_token_dict[placeholder_string]
|
||||
string_to_param_dict[placeholder_string] = manager.string_to_param_dict[placeholder_string]
|
||||
|
||||
|
@ -1,6 +1,12 @@
|
||||
import argparse, os, sys, glob, datetime, yaml
|
||||
import torch
|
||||
import argparse
|
||||
import datetime
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from tqdm import trange
|
||||
|
||||
@ -10,7 +16,9 @@ from PIL import Image
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.util import instantiate_from_config
|
||||
|
||||
rescale = lambda x: (x + 1.0) / 2.0
|
||||
|
||||
def rescale(x: float) -> float:
|
||||
return (x + 1.0) / 2.0
|
||||
|
||||
|
||||
def custom_to_pil(x):
|
||||
@ -45,7 +53,7 @@ def logs2pil(logs, keys=["sample"]):
|
||||
else:
|
||||
print(f"Unknown format for key {k}. ")
|
||||
img = None
|
||||
except:
|
||||
except Exception:
|
||||
img = None
|
||||
imgs[k] = img
|
||||
return imgs
|
||||
|
@ -1,4 +1,5 @@
|
||||
import os, sys
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
import scann
|
||||
import argparse
|
||||
|
@ -1,4 +1,5 @@
|
||||
import argparse, os, sys, glob
|
||||
import argparse
|
||||
import os
|
||||
import torch
|
||||
import numpy as np
|
||||
from omegaconf import OmegaConf
|
||||
@ -7,10 +8,9 @@ from tqdm import tqdm, trange
|
||||
from itertools import islice
|
||||
from einops import rearrange
|
||||
from torchvision.utils import make_grid
|
||||
import time
|
||||
from pytorch_lightning import seed_everything
|
||||
from torch import autocast
|
||||
from contextlib import contextmanager, nullcontext
|
||||
from contextlib import nullcontext
|
||||
|
||||
import k_diffusion as K
|
||||
import torch.nn as nn
|
||||
@ -251,7 +251,6 @@ def main():
|
||||
with torch.no_grad():
|
||||
with precision_scope(device.type):
|
||||
with model.ema_scope():
|
||||
tic = time.time()
|
||||
all_samples = list()
|
||||
for n in trange(opt.n_iter, desc="Sampling"):
|
||||
for prompts in tqdm(data, desc="data"):
|
||||
@ -310,8 +309,6 @@ def main():
|
||||
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f"grid-{grid_count:04}.png"))
|
||||
grid_count += 1
|
||||
|
||||
toc = time.time()
|
||||
|
||||
print(f"Your samples are ready and waiting for you here: \n{outpath} \n" f" \nEnjoy.")
|
||||
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#!/bin/env python
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from invokeai.backend.model_management.model_probe import ModelProbe
|
||||
|
||||
|
Reference in New Issue
Block a user