rename all modules from ldm.dream to ldm.invoke

- scripts and documentation updated to match
- ran preflight checks on both web and CLI and seems to be working
This commit is contained in:
Lincoln Stein 2022-10-08 11:37:23 -04:00
parent 4a7f5c7469
commit 2b1aaf4ee7
47 changed files with 108 additions and 107 deletions

View File

@ -12,9 +12,9 @@ from PIL import Image
from uuid import uuid4
from threading import Event
from ldm.dream.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
from ldm.dream.conditioning import split_weighted_subprompts
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
from ldm.invoke.conditioning import split_weighted_subprompts
from backend.modules.parameters import parameters_to_command
@ -157,7 +157,7 @@ class InvokeAIWebServer:
self.init_image_path = os.path.join(self.result_path, 'init-images/')
self.mask_image_path = os.path.join(self.result_path, 'mask-images/')
# txt log
self.log_path = os.path.join(self.result_path, 'dream_log.txt')
self.log_path = os.path.join(self.result_path, 'invoke_log.txt')
# make all output paths
[
os.makedirs(path, exist_ok=True)

View File

@ -1,6 +1,6 @@
import argparse
import os
from ldm.dream.args import PRECISION_CHOICES
from ldm.invoke.args import PRECISION_CHOICES
def create_cmd_parser():

View File

@ -15,7 +15,7 @@ SAMPLER_CHOICES = [
def parameters_to_command(params):
"""
Converts dict of parameters into a `dream.py` REPL command.
Converts dict of parameters into a `invoke.py` REPL command.
"""
switches = list()

View File

@ -30,10 +30,10 @@ from send2trash import send2trash
from ldm.generate import Generate
from ldm.dream.restoration import Restoration
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
from ldm.dream.args import APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.dream.conditioning import split_weighted_subprompts
from ldm.invoke.restoration import Restoration
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
from ldm.invoke.args import APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.conditioning import split_weighted_subprompts
from modules.parameters import parameters_to_command
@ -125,7 +125,7 @@ class CanceledException(Exception):
try:
gfpgan, codeformer, esrgan = None, None, None
from ldm.dream.restoration.base import Restoration
from ldm.invoke.restoration.base import Restoration
restoration = Restoration()
gfpgan, codeformer = restoration.load_face_restore_models()
@ -164,7 +164,7 @@ init_image_path = os.path.join(result_path, "init-images/")
mask_image_path = os.path.join(result_path, "mask-images/")
# txt log
log_path = os.path.join(result_path, "dream_log.txt")
log_path = os.path.join(result_path, "invoke_log.txt")
# make all output paths
[

View File

@ -1,4 +0,0 @@
'''
Initialization file for the ldm.dream.generator package
'''
from .base import Generator

View File

@ -1,4 +0,0 @@
'''
Initialization file for the ldm.dream.restoration package
'''
from .base import Restoration

View File

@ -19,7 +19,7 @@ import cv2
import skimage
from omegaconf import OmegaConf
from ldm.dream.generator.base import downsampling
from ldm.invoke.generator.base import downsampling
from PIL import Image, ImageOps
from torch import nn
from pytorch_lightning import seed_everything, logging
@ -28,13 +28,11 @@ from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ksampler import KSampler
from ldm.dream.pngwriter import PngWriter
from ldm.dream.args import metadata_from_png
from ldm.dream.image_util import InitImageResizer
from ldm.dream.devices import choose_torch_device, choose_precision
from ldm.dream.conditioning import get_uc_and_c
from ldm.invoke.pngwriter import PngWriter
from ldm.invoke.args import metadata_from_png
from ldm.invoke.image_util import InitImageResizer
from ldm.invoke.devices import choose_torch_device, choose_precision
from ldm.invoke.conditioning import get_uc_and_c
def fix_func(orig):
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
@ -297,9 +295,9 @@ class Generate:
def process_image(image,seed):
image.save(f{'images/seed.png'})
The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code
to create the requested output directory, select a unique informative name for each image, and
write the prompt into the PNG metadata.
The code used to save images to a directory can be found in ldm/invoke/pngwriter.py.
It contains code to create the requested output directory, select a unique informative
name for each image, and write the prompt into the PNG metadata.
"""
# TODO: convert this into a getattr() loop
steps = steps or self.steps
@ -524,7 +522,7 @@ class Generate:
)
elif tool == 'outcrop':
from ldm.dream.restoration.outcrop import Outcrop
from ldm.invoke.restoration.outcrop import Outcrop
extend_instructions = {}
for direction,pixels in _pairwise(opt.outcrop):
extend_instructions[direction]=int(pixels)
@ -561,7 +559,7 @@ class Generate:
image_callback = callback,
)
elif tool == 'outpaint':
from ldm.dream.restoration.outpaint import Outpaint
from ldm.invoke.restoration.outpaint import Outpaint
restorer = Outpaint(image,self)
return restorer.process(
opt,
@ -620,39 +618,39 @@ class Generate:
def _make_base(self):
if not self.generators.get('base'):
from ldm.dream.generator import Generator
from ldm.invoke.generator import Generator
self.generators['base'] = Generator(self.model, self.precision)
return self.generators['base']
def _make_img2img(self):
if not self.generators.get('img2img'):
from ldm.dream.generator.img2img import Img2Img
from ldm.invoke.generator.img2img import Img2Img
self.generators['img2img'] = Img2Img(self.model, self.precision)
return self.generators['img2img']
def _make_embiggen(self):
if not self.generators.get('embiggen'):
from ldm.dream.generator.embiggen import Embiggen
from ldm.invoke.generator.embiggen import Embiggen
self.generators['embiggen'] = Embiggen(self.model, self.precision)
return self.generators['embiggen']
def _make_txt2img(self):
if not self.generators.get('txt2img'):
from ldm.dream.generator.txt2img import Txt2Img
from ldm.invoke.generator.txt2img import Txt2Img
self.generators['txt2img'] = Txt2Img(self.model, self.precision)
self.generators['txt2img'].free_gpu_mem = self.free_gpu_mem
return self.generators['txt2img']
def _make_txt2img2img(self):
if not self.generators.get('txt2img2'):
from ldm.dream.generator.txt2img2img import Txt2Img2Img
from ldm.invoke.generator.txt2img2img import Txt2Img2Img
self.generators['txt2img2'] = Txt2Img2Img(self.model, self.precision)
self.generators['txt2img2'].free_gpu_mem = self.free_gpu_mem
return self.generators['txt2img2']
def _make_inpaint(self):
if not self.generators.get('inpaint'):
from ldm.dream.generator.inpaint import Inpaint
from ldm.invoke.generator.inpaint import Inpaint
self.generators['inpaint'] = Inpaint(self.model, self.precision)
return self.generators['inpaint']
@ -783,7 +781,7 @@ class Generate:
print(msg)
# Be warned: config is the path to the model config file, not the dream conf file!
# Be warned: config is the path to the model config file, not the invoke conf file!
# Also note that we can get config and weights from self, so why do we need to
# pass them as args?
def _load_model_from_config(self, config, weights):

View File

@ -90,8 +90,8 @@ import re
import copy
import base64
import functools
import ldm.dream.pngwriter
from ldm.dream.conditioning import split_weighted_subprompts
import ldm.invoke.pngwriter
from ldm.invoke.conditioning import split_weighted_subprompts
SAMPLER_CHOICES = [
'ddim',
@ -811,7 +811,7 @@ def metadata_from_png(png_file_path) -> Args:
an Args object containing the image metadata. Note that this
returns a single Args object, not multiple.
'''
meta = ldm.dream.pngwriter.retrieve_metadata(png_file_path)
meta = ldm.invoke.pngwriter.retrieve_metadata(png_file_path)
if 'sd-metadata' in meta and len(meta['sd-metadata'])>0 :
return metadata_loads(meta)[0]
else:

View File

@ -0,0 +1,4 @@
'''
Initialization file for the ldm.invoke.generator package
'''
from .base import Generator

View File

@ -1,5 +1,5 @@
'''
Base class for ldm.dream.generator.*
Base class for ldm.invoke.generator.*
including img2img, txt2img, and inpaint
'''
import torch
@ -9,7 +9,7 @@ from tqdm import tqdm, trange
from PIL import Image
from einops import rearrange, repeat
from pytorch_lightning import seed_everything
from ldm.dream.devices import choose_autocast
from ldm.invoke.devices import choose_autocast
from ldm.util import rand_perlin_2d
downsampling = 8

View File

@ -1,15 +1,15 @@
'''
ldm.dream.generator.embiggen descends from ldm.dream.generator
and generates with ldm.dream.generator.img2img
ldm.invoke.generator.embiggen descends from ldm.invoke.generator
and generates with ldm.invoke.generator.img2img
'''
import torch
import numpy as np
from tqdm import trange
from PIL import Image
from ldm.dream.generator.base import Generator
from ldm.dream.generator.img2img import Img2Img
from ldm.dream.devices import choose_autocast
from ldm.invoke.generator.base import Generator
from ldm.invoke.generator.img2img import Img2Img
from ldm.invoke.devices import choose_autocast
from ldm.models.diffusion.ddim import DDIMSampler
class Embiggen(Generator):
@ -107,7 +107,7 @@ class Embiggen(Generator):
initsuperwidth = round(initsuperwidth*embiggen[0])
initsuperheight = round(initsuperheight*embiggen[0])
if embiggen[1] > 0: # No point in ESRGAN upscaling if strength is set zero
from ldm.dream.restoration.realesrgan import ESRGAN
from ldm.invoke.restoration.realesrgan import ESRGAN
esrgan = ESRGAN()
print(
f'>> ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}')

View File

@ -1,11 +1,11 @@
'''
ldm.dream.generator.img2img descends from ldm.dream.generator
ldm.invoke.generator.img2img descends from ldm.invoke.generator
'''
import torch
import numpy as np
from ldm.dream.devices import choose_autocast
from ldm.dream.generator.base import Generator
from ldm.invoke.devices import choose_autocast
from ldm.invoke.generator.base import Generator
from ldm.models.diffusion.ddim import DDIMSampler
class Img2Img(Generator):

View File

@ -1,12 +1,12 @@
'''
ldm.dream.generator.inpaint descends from ldm.dream.generator
ldm.invoke.generator.inpaint descends from ldm.invoke.generator
'''
import torch
import numpy as np
from einops import rearrange, repeat
from ldm.dream.devices import choose_autocast
from ldm.dream.generator.img2img import Img2Img
from ldm.invoke.devices import choose_autocast
from ldm.invoke.generator.img2img import Img2Img
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ksampler import KSampler

View File

@ -1,10 +1,10 @@
'''
ldm.dream.generator.txt2img inherits from ldm.dream.generator
ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
'''
import torch
import numpy as np
from ldm.dream.generator.base import Generator
from ldm.invoke.generator.base import Generator
class Txt2Img(Generator):
def __init__(self, model, precision):

View File

@ -1,11 +1,11 @@
'''
ldm.dream.generator.txt2img inherits from ldm.dream.generator
ldm.invoke.generator.txt2img inherits from ldm.invoke.generator
'''
import torch
import numpy as np
import math
from ldm.dream.generator.base import Generator
from ldm.invoke.generator.base import Generator
from ldm.models.diffusion.ddim import DDIMSampler

View File

@ -1,17 +1,17 @@
"""
Readline helper functions for dream.py (linux and mac only).
Readline helper functions for invoke.py.
You may import the global singleton `completer` to get access to the
completer object itself. This is useful when you want to autocomplete
seeds:
from ldm.dream.readline import completer
from ldm.invoke.readline import completer
completer.add_seed(18247566)
completer.add_seed(9281839)
"""
import os
import re
import atexit
from ldm.dream.args import Args
from ldm.invoke.args import Args
# ---------------readline utilities---------------------
try:
@ -20,7 +20,7 @@ try:
except (ImportError,ModuleNotFoundError):
readline_available = False
IMG_EXTENSIONS = ('.png','.jpg','.jpeg')
IMG_EXTENSIONS = ('.png','.jpg','.jpeg','.PNG','.JPG','.JPEG','.gif','.GIF')
COMMANDS = (
'--steps','-s',
'--seed','-S',
@ -74,7 +74,7 @@ class Completer(object):
def complete(self, text, state):
'''
Completes dream command line.
Completes invoke command line.
BUG: it doesn't correctly complete files that have spaces in the name.
'''
buffer = readline.get_line_buffer()
@ -287,7 +287,7 @@ def get_completer(opt:Args)->Completer:
readline.parse_and_bind('set skip-completed-text on')
readline.parse_and_bind('set show-all-if-ambiguous on')
histfile = os.path.join(os.path.expanduser(opt.outdir), '.dream_history')
histfile = os.path.join(os.path.expanduser(opt.outdir), '.invoke_history')
try:
readline.read_history_file(histfile)
readline.set_history_length(1000)

View File

@ -0,0 +1,4 @@
'''
Initialization file for the ldm.invoke.restoration package
'''
from .base import Restoration

View File

@ -23,16 +23,16 @@ class Restoration():
# Face Restore Models
def load_gfpgan(self, gfpgan_dir, gfpgan_model_path):
from ldm.dream.restoration.gfpgan import GFPGAN
from ldm.invoke.restoration.gfpgan import GFPGAN
return GFPGAN(gfpgan_dir, gfpgan_model_path)
def load_codeformer(self):
from ldm.dream.restoration.codeformer import CodeFormerRestoration
from ldm.invoke.restoration.codeformer import CodeFormerRestoration
return CodeFormerRestoration()
# Upscale Models
def load_esrgan(self, esrgan_bg_tile=400):
from ldm.dream.restoration.realesrgan import ESRGAN
from ldm.invoke.restoration.realesrgan import ESRGAN
esrgan = ESRGAN(esrgan_bg_tile)
print('>> ESRGAN Initialized')
return esrgan;

View File

@ -8,7 +8,7 @@ pretrained_model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v
class CodeFormerRestoration():
def __init__(self,
codeformer_dir='ldm/dream/restoration/codeformer',
codeformer_dir='ldm/invoke/restoration/codeformer',
codeformer_model_path='weights/codeformer.pth') -> None:
self.model_path = os.path.join(codeformer_dir, codeformer_model_path)
self.codeformer_model_exists = os.path.isfile(self.model_path)
@ -27,7 +27,7 @@ class CodeFormerRestoration():
from basicsr.utils.download_util import load_file_from_url
from basicsr.utils import img2tensor, tensor2img
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from ldm.dream.restoration.codeformer_arch import CodeFormer
from ldm.invoke.restoration.codeformer_arch import CodeFormer
from torchvision.transforms.functional import normalize
from PIL import Image
@ -35,7 +35,7 @@ class CodeFormerRestoration():
cf = cf_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(device)
checkpoint_path = load_file_from_url(url=pretrained_model_url, model_dir=os.path.abspath('ldm/dream/restoration/codeformer/weights'), progress=True)
checkpoint_path = load_file_from_url(url=pretrained_model_url, model_dir=os.path.abspath('ldm/invoke/restoration/codeformer/weights'), progress=True)
checkpoint = torch.load(checkpoint_path)['params_ema']
cf.load_state_dict(checkpoint)
cf.eval()

View File

@ -5,7 +5,7 @@ from torch import nn, Tensor
import torch.nn.functional as F
from typing import Optional, List
from ldm.dream.restoration.vqgan_arch import *
from ldm.invoke.restoration.vqgan_arch import *
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY

View File

@ -4,9 +4,9 @@ import copy
import base64
import mimetypes
import os
from ldm.dream.args import Args, metadata_dumps
from ldm.invoke.args import Args, metadata_dumps
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
from ldm.dream.pngwriter import PngWriter
from ldm.invoke.pngwriter import PngWriter
from threading import Event
def build_opt(post_data, seed, gfpgan_model_exists):

View File

@ -4,7 +4,7 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
from ldm.models.diffusion.sampler import Sampler
from ldm.modules.diffusionmodules.util import noise_like

View File

@ -2,7 +2,7 @@
import k_diffusion as K
import torch
import torch.nn as nn
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
from ldm.models.diffusion.sampler import Sampler
from ldm.util import rand_perlin_2d
from ldm.modules.diffusionmodules.util import (

View File

@ -4,7 +4,7 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
from ldm.models.diffusion.sampler import Sampler
from ldm.modules.diffusionmodules.util import noise_like

View File

@ -8,7 +8,7 @@ import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
from ldm.modules.diffusionmodules.util import (
make_ddim_sampling_parameters,

View File

@ -5,7 +5,7 @@ import clip
from einops import rearrange, repeat
from transformers import CLIPTokenizer, CLIPTextModel
import kornia
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
from ldm.modules.x_transformer import (
Encoder,

View File

@ -14,7 +14,7 @@ from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import ismap
import time
from omegaconf import OmegaConf
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
def download_models(mode):

View File

@ -1,12 +1,12 @@
#!/usr/bin/env python3
'''This script reads the "Dream" Stable Diffusion prompt embedded in files generated by dream.py'''
'''This script reads the "Invoke" Stable Diffusion prompt embedded in files generated by invoke.py'''
import sys
from PIL import Image,PngImagePlugin
if len(sys.argv) < 2:
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
print("This script opens up the indicated dream.py-generated PNG file(s) and prints out the prompt used to generate them.")
print("This script opens up the indicated invoke.py-generated PNG file(s) and prints out the prompt used to generate them.")
exit(-1)
filenames = sys.argv[1:]

View File

@ -10,11 +10,11 @@ import warnings
import time
import traceback
sys.path.append('.') # corrects a weird problem on Macs
from ldm.dream.readline import get_completer
from ldm.dream.args import Args, metadata_dumps, metadata_from_png, dream_cmd_from_png
from ldm.dream.pngwriter import PngWriter, retrieve_metadata, write_metadata
from ldm.dream.image_util import make_grid
from ldm.dream.log import write_log
from ldm.invoke.readline import get_completer
from ldm.invoke.args import Args, metadata_dumps, metadata_from_png, dream_cmd_from_png
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata, write_metadata
from ldm.invoke.image_util import make_grid
from ldm.invoke.log import write_log
from omegaconf import OmegaConf
from backend.invoke_ai_web_server import InvokeAIWebServer
@ -45,7 +45,7 @@ def main():
try:
gfpgan, codeformer, esrgan = None, None, None
if opt.restore or opt.esrgan:
from ldm.dream.restoration import Restoration
from ldm.invoke.restoration import Restoration
restoration = Restoration()
if opt.restore:
gfpgan, codeformer = restoration.load_face_restore_models(opt.gfpgan_dir, opt.gfpgan_model_path)
@ -256,7 +256,7 @@ def main_loop(gen, opt, infile):
if opt.with_variations is not None:
opt.with_variations = split_variations(opt.with_variations)
if opt.prompt_as_dir:
if opt.prompt_as_dir and operation == 'generate':
# sanitize the prompt to a valid folder name
subdir = path_filter.sub('_', opt.prompt)[:name_max].rstrip(' .')
@ -275,6 +275,12 @@ def main_loop(gen, opt, infile):
os.makedirs(opt.outdir)
current_outdir = opt.outdir
# write out the history at this point
if operation == 'postprocess':
completer.add_history(f'!fix {command}')
else:
completer.add_history(command)
# Here is where the images are actually generated!
last_results = []
try:
@ -380,13 +386,9 @@ def main_loop(gen, opt, infile):
continue
print('Outputs:')
log_path = os.path.join(current_outdir, 'dream_log')
log_path = os.path.join(current_outdir, 'invoke_log')
output_cntr = write_log(results, log_path ,('txt', 'md'), output_cntr)
print()
if operation == 'postprocess':
completer.add_history(f'!fix {command}')
else:
completer.add_history(command)
print('goodbye!')
@ -422,6 +424,7 @@ def do_postprocess (gen, opt, callback):
opt = opt,
)
except OSError:
print(traceback.format_exc(), file=sys.stderr)
print(f'** {file_path}: file could not be read')
return
except (KeyError, AttributeError):

View File

@ -18,7 +18,7 @@ from pytorch_lightning import seed_everything
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
def chunk(it, size):

View File

@ -6,7 +6,7 @@ import numpy as np
import torch
from main import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
def make_batch(image, mask, device):
image = np.array(Image.open(image).convert("RGB"))

View File

@ -18,7 +18,7 @@ import torch.nn as nn
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.plms import PLMSSampler
from ldm.dream.devices import choose_torch_device
from ldm.invoke.devices import choose_torch_device
def chunk(it, size):
it = iter(it)

View File

@ -97,7 +97,7 @@ print('preloading CodeFormer model file...',end='')
try:
import urllib.request
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
model_dest = 'ldm/dream/restoration/codeformer/weights/codeformer.pth'
model_dest = 'ldm/invoke/restoration/codeformer/weights/codeformer.pth'
if not os.path.exists(model_dest):
print('Downloading codeformer model file...')
os.makedirs(os.path.dirname(model_dest), exist_ok=True)

View File

@ -2,11 +2,11 @@
import sys
import json
from ldm.dream.pngwriter import retrieve_metadata
from ldm.invoke.pngwriter import retrieve_metadata
if len(sys.argv) < 2:
print("Usage: file2prompt.py <file1.png> <file2.png> <file3.png>...")
print("This script opens up the indicated dream.py-generated PNG file(s) and prints out their metadata.")
print("This script opens up the indicated invoke.py-generated PNG file(s) and prints out their metadata.")
exit(-1)
filenames = sys.argv[1:]

View File

@ -10,7 +10,7 @@ from flask_cors import CORS
from flask_socketio import SocketIO
from omegaconf import OmegaConf
from dependency_injector.wiring import inject, Provide
from ldm.dream.args import Args
from ldm.invoke.args import Args
from server import views
from server.containers import Container
from server.services import GeneratorService, SignalService

View File

@ -12,12 +12,12 @@ import shlex
from threading import Thread
import time
from flask_socketio import SocketIO, join_room, leave_room
from ldm.dream.args import Args
from ldm.dream.generator import embiggen
from ldm.invoke.args import Args
from ldm.invoke.generator import embiggen
from PIL import Image
from ldm.dream.pngwriter import PngWriter
from ldm.dream.server import CanceledException
from ldm.invoke.pngwriter import PngWriter
from ldm.invoke.server import CanceledException
from ldm.generate import Generate
from server.models import DreamResult, JobRequest, PaginatedItems, ProgressType, Signal