mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'development' into vite-relative-paths
This commit is contained in:
commit
9980c4baf9
690
frontend/dist/assets/index.b06af007.js
vendored
Normal file
690
frontend/dist/assets/index.b06af007.js
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -72,7 +72,13 @@ export const gallerySlice = createSlice({
|
||||
},
|
||||
addImage: (state, action: PayloadAction<InvokeAI.Image>) => {
|
||||
const newImage = action.payload;
|
||||
const { uuid, mtime } = newImage;
|
||||
const { uuid, url, mtime } = newImage;
|
||||
|
||||
// Do not add duplicate images
|
||||
if (state.images.find((i) => i.url === url && i.mtime === mtime)) {
|
||||
return;
|
||||
}
|
||||
|
||||
state.images.unshift(newImage);
|
||||
state.currentImageUuid = uuid;
|
||||
state.intermediateImage = undefined;
|
||||
@ -120,8 +126,15 @@ export const gallerySlice = createSlice({
|
||||
) => {
|
||||
const { images, areMoreImagesAvailable } = action.payload;
|
||||
if (images.length > 0) {
|
||||
// Filter images that already exist in the gallery
|
||||
const newImages = images.filter(
|
||||
(newImage) =>
|
||||
!state.images.find(
|
||||
(i) => i.url === newImage.url && i.mtime === newImage.mtime
|
||||
)
|
||||
);
|
||||
state.images = state.images
|
||||
.concat(images)
|
||||
.concat(newImages)
|
||||
.sort((a, b) => b.mtime - a.mtime);
|
||||
|
||||
if (!state.currentImage) {
|
||||
|
@ -15,7 +15,7 @@ export default function MainCFGScale() {
|
||||
label="CFG Scale"
|
||||
step={0.5}
|
||||
min={1}
|
||||
max={200}
|
||||
max={30}
|
||||
onChange={handleChangeCfgScale}
|
||||
value={cfgScale}
|
||||
width={inputWidth}
|
||||
|
@ -728,7 +728,7 @@ class Generate:
|
||||
|
||||
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
model.embedding_manager.load(
|
||||
self.model.embedding_manager.load(
|
||||
self.embedding_path, self.precision == 'float32' or self.precision == 'autocast'
|
||||
)
|
||||
|
||||
|
@ -60,14 +60,18 @@ class ESRGAN():
|
||||
print(
|
||||
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
|
||||
)
|
||||
|
||||
# REALSRGAN expects a BGR np array; make array and flip channels
|
||||
bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1]
|
||||
|
||||
output, _ = upsampler.enhance(
|
||||
np.array(image, dtype=np.uint8),
|
||||
bgr_image_array,
|
||||
outscale=upsampler_scale,
|
||||
alpha_upsampler='realesrgan',
|
||||
)
|
||||
|
||||
res = Image.fromarray(output)
|
||||
# Flip the channels back to RGB
|
||||
res = Image.fromarray(output[...,::-1])
|
||||
|
||||
if strength < 1.0:
|
||||
# Resize the image to the new image if the sizes have changed
|
||||
|
Loading…
Reference in New Issue
Block a user