mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Remove onnx changes from canvas img2img, inpaint, and linear image2image
This commit is contained in:
parent
d2a46b4308
commit
989d3d7f3c
@ -458,6 +458,7 @@ class Generator:
|
|||||||
dtype=samples.dtype,
|
dtype=samples.dtype,
|
||||||
device=samples.device,
|
device=samples.device,
|
||||||
)
|
)
|
||||||
|
|
||||||
latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors
|
latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors
|
||||||
latents_ubyte = (
|
latents_ubyte = (
|
||||||
((latent_image + 1) / 2)
|
((latent_image + 1) / 2)
|
||||||
|
@ -267,18 +267,16 @@ class DiffusersModel(ModelBase):
|
|||||||
try:
|
try:
|
||||||
# TODO: set cache_dir to /dev/null to be sure that cache not used?
|
# TODO: set cache_dir to /dev/null to be sure that cache not used?
|
||||||
model = self.child_types[child_type].from_pretrained(
|
model = self.child_types[child_type].from_pretrained(
|
||||||
os.path.join(self.model_path, child_type.value),
|
self.model_path,
|
||||||
#subfolder=child_type.value,
|
subfolder=child_type.value,
|
||||||
torch_dtype=torch_dtype,
|
torch_dtype=torch_dtype,
|
||||||
variant=variant,
|
variant=variant,
|
||||||
local_files_only=True,
|
local_files_only=True,
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("====ERR LOAD====")
|
# print("====ERR LOAD====")
|
||||||
print(f"{variant}: {e}")
|
# print(f"{variant}: {e}")
|
||||||
import traceback
|
|
||||||
traceback.print_exc()
|
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise Exception(f"Failed to load {self.base_model}:{self.model_type}:{child_type} model")
|
raise Exception(f"Failed to load {self.base_model}:{self.model_type}:{child_type} model")
|
||||||
|
@ -20,7 +20,6 @@ import {
|
|||||||
LATENTS_TO_IMAGE,
|
LATENTS_TO_IMAGE,
|
||||||
LATENTS_TO_LATENTS,
|
LATENTS_TO_LATENTS,
|
||||||
MAIN_MODEL_LOADER,
|
MAIN_MODEL_LOADER,
|
||||||
ONNX_MODEL_LOADER,
|
|
||||||
METADATA_ACCUMULATOR,
|
METADATA_ACCUMULATOR,
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
NOISE,
|
NOISE,
|
||||||
@ -63,9 +62,6 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
? shouldUseCpuNoise
|
? shouldUseCpuNoise
|
||||||
: initialGenerationState.shouldUseCpuNoise;
|
: initialGenerationState.shouldUseCpuNoise;
|
||||||
|
|
||||||
const onnx_model_type = model.model_type.includes('onnx');
|
|
||||||
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||||
@ -76,18 +72,17 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||||
// TODO: Actually create the graph correctly for ONNX
|
|
||||||
const graph: NonNullableGraph = {
|
const graph: NonNullableGraph = {
|
||||||
id: IMAGE_TO_IMAGE_GRAPH,
|
id: IMAGE_TO_IMAGE_GRAPH,
|
||||||
nodes: {
|
nodes: {
|
||||||
[POSITIVE_CONDITIONING]: {
|
[POSITIVE_CONDITIONING]: {
|
||||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
type: 'compel',
|
||||||
id: POSITIVE_CONDITIONING,
|
id: POSITIVE_CONDITIONING,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
prompt: positivePrompt,
|
prompt: positivePrompt,
|
||||||
},
|
},
|
||||||
[NEGATIVE_CONDITIONING]: {
|
[NEGATIVE_CONDITIONING]: {
|
||||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
type: 'compel',
|
||||||
id: NEGATIVE_CONDITIONING,
|
id: NEGATIVE_CONDITIONING,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
prompt: negativePrompt,
|
prompt: negativePrompt,
|
||||||
@ -98,9 +93,9 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
use_cpu,
|
use_cpu,
|
||||||
},
|
},
|
||||||
[model_loader]: {
|
[MAIN_MODEL_LOADER]: {
|
||||||
type: model_loader,
|
type: 'main_model_loader',
|
||||||
id: model_loader,
|
id: MAIN_MODEL_LOADER,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
model,
|
model,
|
||||||
},
|
},
|
||||||
@ -111,7 +106,7 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
skipped_layers: clipSkip,
|
skipped_layers: clipSkip,
|
||||||
},
|
},
|
||||||
[LATENTS_TO_LATENTS]: {
|
[LATENTS_TO_LATENTS]: {
|
||||||
type: onnx_model_type ? 'l2l_onnx' : 'l2l',
|
type: 'l2l',
|
||||||
id: LATENTS_TO_LATENTS,
|
id: LATENTS_TO_LATENTS,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
@ -120,7 +115,7 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
strength,
|
strength,
|
||||||
},
|
},
|
||||||
[IMAGE_TO_LATENTS]: {
|
[IMAGE_TO_LATENTS]: {
|
||||||
type: onnx_model_type ? 'i2l_onnx' : 'i2l',
|
type: 'i2l',
|
||||||
id: IMAGE_TO_LATENTS,
|
id: IMAGE_TO_LATENTS,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
||||||
@ -137,7 +132,7 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
edges: [
|
edges: [
|
||||||
{
|
{
|
||||||
source: {
|
source: {
|
||||||
node_id: model_loader,
|
node_id: MAIN_MODEL_LOADER,
|
||||||
field: 'clip',
|
field: 'clip',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
@ -197,7 +192,7 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
source: {
|
source: {
|
||||||
node_id: model_loader,
|
node_id: MAIN_MODEL_LOADER,
|
||||||
field: 'unet',
|
field: 'unet',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
@ -329,10 +324,10 @@ export const buildCanvasImageToImageGraph = (
|
|||||||
});
|
});
|
||||||
|
|
||||||
// add LoRA support
|
// add LoRA support
|
||||||
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS, model_loader);
|
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
|
||||||
|
|
||||||
// optionally add custom VAE
|
// optionally add custom VAE
|
||||||
addVAEToGraph(state, graph, model_loader);
|
addVAEToGraph(state, graph);
|
||||||
|
|
||||||
// add dynamic prompts - also sets up core iteration and seed
|
// add dynamic prompts - also sets up core iteration and seed
|
||||||
addDynamicPromptsToGraph(state, graph);
|
addDynamicPromptsToGraph(state, graph);
|
||||||
|
@ -17,7 +17,6 @@ import {
|
|||||||
INPAINT_GRAPH,
|
INPAINT_GRAPH,
|
||||||
ITERATE,
|
ITERATE,
|
||||||
MAIN_MODEL_LOADER,
|
MAIN_MODEL_LOADER,
|
||||||
ONNX_MODEL_LOADER,
|
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
POSITIVE_CONDITIONING,
|
POSITIVE_CONDITIONING,
|
||||||
RANDOM_INT,
|
RANDOM_INT,
|
||||||
@ -69,11 +68,6 @@ export const buildCanvasInpaintGraph = (
|
|||||||
shouldAutoSave,
|
shouldAutoSave,
|
||||||
} = state.canvas;
|
} = state.canvas;
|
||||||
|
|
||||||
const model_loader = model.model_type.includes('onnx')
|
|
||||||
? ONNX_MODEL_LOADER
|
|
||||||
: MAIN_MODEL_LOADER;
|
|
||||||
|
|
||||||
// TODO: Actually create the graph correctly for ONNX
|
|
||||||
const graph: NonNullableGraph = {
|
const graph: NonNullableGraph = {
|
||||||
id: INPAINT_GRAPH,
|
id: INPAINT_GRAPH,
|
||||||
nodes: {
|
nodes: {
|
||||||
@ -121,9 +115,9 @@ export const buildCanvasInpaintGraph = (
|
|||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
prompt: negativePrompt,
|
prompt: negativePrompt,
|
||||||
},
|
},
|
||||||
[model_loader]: {
|
[MAIN_MODEL_LOADER]: {
|
||||||
type: model_loader,
|
type: 'main_model_loader',
|
||||||
id: model_loader,
|
id: MAIN_MODEL_LOADER,
|
||||||
is_intermediate: true,
|
is_intermediate: true,
|
||||||
model,
|
model,
|
||||||
},
|
},
|
||||||
@ -151,7 +145,7 @@ export const buildCanvasInpaintGraph = (
|
|||||||
edges: [
|
edges: [
|
||||||
{
|
{
|
||||||
source: {
|
source: {
|
||||||
node_id: model_loader,
|
node_id: MAIN_MODEL_LOADER,
|
||||||
field: 'unet',
|
field: 'unet',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
@ -161,7 +155,7 @@ export const buildCanvasInpaintGraph = (
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
source: {
|
source: {
|
||||||
node_id: model_loader,
|
node_id: MAIN_MODEL_LOADER,
|
||||||
field: 'clip',
|
field: 'clip',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
|
@ -19,7 +19,6 @@ import {
|
|||||||
LATENTS_TO_IMAGE,
|
LATENTS_TO_IMAGE,
|
||||||
LATENTS_TO_LATENTS,
|
LATENTS_TO_LATENTS,
|
||||||
MAIN_MODEL_LOADER,
|
MAIN_MODEL_LOADER,
|
||||||
ONNX_MODEL_LOADER,
|
|
||||||
METADATA_ACCUMULATOR,
|
METADATA_ACCUMULATOR,
|
||||||
NEGATIVE_CONDITIONING,
|
NEGATIVE_CONDITIONING,
|
||||||
NOISE,
|
NOISE,
|
||||||
@ -85,17 +84,13 @@ export const buildLinearImageToImageGraph = (
|
|||||||
? shouldUseCpuNoise
|
? shouldUseCpuNoise
|
||||||
: initialGenerationState.shouldUseCpuNoise;
|
: initialGenerationState.shouldUseCpuNoise;
|
||||||
|
|
||||||
const onnx_model_type = model.model_type.includes('onnx');
|
|
||||||
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
|
|
||||||
|
|
||||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||||
// TODO: Actually create the graph correctly for ONNX
|
|
||||||
const graph: NonNullableGraph = {
|
const graph: NonNullableGraph = {
|
||||||
id: IMAGE_TO_IMAGE_GRAPH,
|
id: IMAGE_TO_IMAGE_GRAPH,
|
||||||
nodes: {
|
nodes: {
|
||||||
[model_loader]: {
|
[MAIN_MODEL_LOADER]: {
|
||||||
type: model_loader,
|
type: 'main_model_loader',
|
||||||
id: model_loader,
|
id: MAIN_MODEL_LOADER,
|
||||||
model,
|
model,
|
||||||
},
|
},
|
||||||
[CLIP_SKIP]: {
|
[CLIP_SKIP]: {
|
||||||
@ -104,12 +99,12 @@ export const buildLinearImageToImageGraph = (
|
|||||||
skipped_layers: clipSkip,
|
skipped_layers: clipSkip,
|
||||||
},
|
},
|
||||||
[POSITIVE_CONDITIONING]: {
|
[POSITIVE_CONDITIONING]: {
|
||||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
type: 'compel',
|
||||||
id: POSITIVE_CONDITIONING,
|
id: POSITIVE_CONDITIONING,
|
||||||
prompt: positivePrompt,
|
prompt: positivePrompt,
|
||||||
},
|
},
|
||||||
[NEGATIVE_CONDITIONING]: {
|
[NEGATIVE_CONDITIONING]: {
|
||||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
type: 'compel',
|
||||||
id: NEGATIVE_CONDITIONING,
|
id: NEGATIVE_CONDITIONING,
|
||||||
prompt: negativePrompt,
|
prompt: negativePrompt,
|
||||||
},
|
},
|
||||||
@ -119,12 +114,12 @@ export const buildLinearImageToImageGraph = (
|
|||||||
use_cpu,
|
use_cpu,
|
||||||
},
|
},
|
||||||
[LATENTS_TO_IMAGE]: {
|
[LATENTS_TO_IMAGE]: {
|
||||||
type: onnx_model_type ? 'l2i_onnx' : 'l2i',
|
type: 'l2i',
|
||||||
id: LATENTS_TO_IMAGE,
|
id: LATENTS_TO_IMAGE,
|
||||||
fp32: vaePrecision === 'fp32' ? true : false,
|
fp32: vaePrecision === 'fp32' ? true : false,
|
||||||
},
|
},
|
||||||
[LATENTS_TO_LATENTS]: {
|
[LATENTS_TO_LATENTS]: {
|
||||||
type: onnx_model_type ? 'l2l_onnx' : 'l2l',
|
type: 'l2l',
|
||||||
id: LATENTS_TO_LATENTS,
|
id: LATENTS_TO_LATENTS,
|
||||||
cfg_scale,
|
cfg_scale,
|
||||||
scheduler,
|
scheduler,
|
||||||
@ -132,7 +127,7 @@ export const buildLinearImageToImageGraph = (
|
|||||||
strength,
|
strength,
|
||||||
},
|
},
|
||||||
[IMAGE_TO_LATENTS]: {
|
[IMAGE_TO_LATENTS]: {
|
||||||
type: onnx_model_type ? 'i2l_onnx' : 'i2l',
|
type: 'i2l',
|
||||||
id: IMAGE_TO_LATENTS,
|
id: IMAGE_TO_LATENTS,
|
||||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
||||||
// image: {
|
// image: {
|
||||||
@ -144,7 +139,7 @@ export const buildLinearImageToImageGraph = (
|
|||||||
edges: [
|
edges: [
|
||||||
{
|
{
|
||||||
source: {
|
source: {
|
||||||
node_id: model_loader,
|
node_id: MAIN_MODEL_LOADER,
|
||||||
field: 'unet',
|
field: 'unet',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
@ -154,7 +149,7 @@ export const buildLinearImageToImageGraph = (
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
source: {
|
source: {
|
||||||
node_id: model_loader,
|
node_id: MAIN_MODEL_LOADER,
|
||||||
field: 'clip',
|
field: 'clip',
|
||||||
},
|
},
|
||||||
destination: {
|
destination: {
|
||||||
@ -339,10 +334,10 @@ export const buildLinearImageToImageGraph = (
|
|||||||
});
|
});
|
||||||
|
|
||||||
// add LoRA support
|
// add LoRA support
|
||||||
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS, model_loader);
|
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
|
||||||
|
|
||||||
// optionally add custom VAE
|
// optionally add custom VAE
|
||||||
addVAEToGraph(state, graph, model_loader);
|
addVAEToGraph(state, graph);
|
||||||
|
|
||||||
// add dynamic prompts - also sets up core iteration and seed
|
// add dynamic prompts - also sets up core iteration and seed
|
||||||
addDynamicPromptsToGraph(state, graph);
|
addDynamicPromptsToGraph(state, graph);
|
||||||
|
Loading…
Reference in New Issue
Block a user