mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'development' into fix-high-step-count
This commit is contained in:
@ -49,9 +49,15 @@ class Upsample(nn.Module):
|
||||
padding=1)
|
||||
|
||||
def forward(self, x):
|
||||
cpu_m1_cond = True if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available() and \
|
||||
x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3] % 2**27 == 0 else False
|
||||
if cpu_m1_cond:
|
||||
x = x.to('cpu') # send to cpu
|
||||
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
||||
if self.with_conv:
|
||||
x = self.conv(x)
|
||||
if cpu_m1_cond:
|
||||
x = x.to('mps') # return to mps
|
||||
return x
|
||||
|
||||
|
||||
@ -117,6 +123,14 @@ class ResnetBlock(nn.Module):
|
||||
padding=0)
|
||||
|
||||
def forward(self, x, temb):
|
||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||
x_size = x.size()
|
||||
if (x_size[0] * x_size[1] * x_size[2] * x_size[3]) % 2**29 == 0:
|
||||
self.to('cpu')
|
||||
x = x.to('cpu')
|
||||
else:
|
||||
self.to('mps')
|
||||
x = x.to('mps')
|
||||
h = self.norm1(x)
|
||||
h = silu(h)
|
||||
h = self.conv1(h)
|
||||
@ -245,7 +259,7 @@ class AttnBlock(nn.Module):
|
||||
|
||||
def make_attn(in_channels, attn_type="vanilla"):
|
||||
assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
|
||||
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
||||
print(f" | Making attention of type '{attn_type}' with {in_channels} in_channels")
|
||||
if attn_type == "vanilla":
|
||||
return AttnBlock(in_channels)
|
||||
elif attn_type == "none":
|
||||
@ -521,7 +535,7 @@ class Decoder(nn.Module):
|
||||
block_in = ch*ch_mult[self.num_resolutions-1]
|
||||
curr_res = resolution // 2**(self.num_resolutions-1)
|
||||
self.z_shape = (1,z_channels,curr_res,curr_res)
|
||||
print("Working with z of shape {} = {} dimensions.".format(
|
||||
print(" | Working with z of shape {} = {} dimensions.".format(
|
||||
self.z_shape, np.prod(self.z_shape)))
|
||||
|
||||
# z to block_in
|
||||
|
@ -66,7 +66,7 @@ def make_ddim_timesteps(
|
||||
c = num_ddpm_timesteps // num_ddim_timesteps
|
||||
if c < 1:
|
||||
c = 1
|
||||
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
||||
ddim_timesteps = (np.arange(0, num_ddim_timesteps) * c).astype(int)
|
||||
elif ddim_discr_method == 'quad':
|
||||
ddim_timesteps = (
|
||||
(
|
||||
@ -83,8 +83,8 @@ def make_ddim_timesteps(
|
||||
|
||||
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
||||
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
||||
# steps_out = ddim_timesteps + 1
|
||||
steps_out = ddim_timesteps
|
||||
steps_out = ddim_timesteps + 1
|
||||
# steps_out = ddim_timesteps
|
||||
|
||||
if verbose:
|
||||
print(f'Selected timesteps for ddim sampler: {steps_out}')
|
||||
|
@ -5,7 +5,7 @@ import clip
|
||||
from einops import rearrange, repeat
|
||||
from transformers import CLIPTokenizer, CLIPTextModel
|
||||
import kornia
|
||||
from ldm.dream.devices import choose_torch_device
|
||||
from ldm.invoke.devices import choose_torch_device
|
||||
|
||||
from ldm.modules.x_transformer import (
|
||||
Encoder,
|
||||
|
Reference in New Issue
Block a user