mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Remove onnx changes from canvas img2img, inpaint, and linear image2image
This commit is contained in:
parent
d2a46b4308
commit
989d3d7f3c
@ -458,6 +458,7 @@ class Generator:
|
||||
dtype=samples.dtype,
|
||||
device=samples.device,
|
||||
)
|
||||
|
||||
latent_image = samples[0].permute(1, 2, 0) @ v1_5_latent_rgb_factors
|
||||
latents_ubyte = (
|
||||
((latent_image + 1) / 2)
|
||||
|
@ -267,18 +267,16 @@ class DiffusersModel(ModelBase):
|
||||
try:
|
||||
# TODO: set cache_dir to /dev/null to be sure that cache not used?
|
||||
model = self.child_types[child_type].from_pretrained(
|
||||
os.path.join(self.model_path, child_type.value),
|
||||
#subfolder=child_type.value,
|
||||
self.model_path,
|
||||
subfolder=child_type.value,
|
||||
torch_dtype=torch_dtype,
|
||||
variant=variant,
|
||||
local_files_only=True,
|
||||
)
|
||||
break
|
||||
except Exception as e:
|
||||
print("====ERR LOAD====")
|
||||
print(f"{variant}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
# print("====ERR LOAD====")
|
||||
# print(f"{variant}: {e}")
|
||||
pass
|
||||
else:
|
||||
raise Exception(f"Failed to load {self.base_model}:{self.model_type}:{child_type} model")
|
||||
|
@ -20,7 +20,6 @@ import {
|
||||
LATENTS_TO_IMAGE,
|
||||
LATENTS_TO_LATENTS,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -63,9 +62,6 @@ export const buildCanvasImageToImageGraph = (
|
||||
? shouldUseCpuNoise
|
||||
: initialGenerationState.shouldUseCpuNoise;
|
||||
|
||||
const onnx_model_type = model.model_type.includes('onnx');
|
||||
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
|
||||
|
||||
/**
|
||||
* The easiest way to build linear graphs is to do it in the node editor, then copy and paste the
|
||||
* full graph here as a template. Then use the parameters from app state and set friendlier node
|
||||
@ -76,18 +72,17 @@ export const buildCanvasImageToImageGraph = (
|
||||
*/
|
||||
|
||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: IMAGE_TO_IMAGE_GRAPH,
|
||||
nodes: {
|
||||
[POSITIVE_CONDITIONING]: {
|
||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
||||
type: 'compel',
|
||||
id: POSITIVE_CONDITIONING,
|
||||
is_intermediate: true,
|
||||
prompt: positivePrompt,
|
||||
},
|
||||
[NEGATIVE_CONDITIONING]: {
|
||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
||||
type: 'compel',
|
||||
id: NEGATIVE_CONDITIONING,
|
||||
is_intermediate: true,
|
||||
prompt: negativePrompt,
|
||||
@ -98,9 +93,9 @@ export const buildCanvasImageToImageGraph = (
|
||||
is_intermediate: true,
|
||||
use_cpu,
|
||||
},
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
is_intermediate: true,
|
||||
model,
|
||||
},
|
||||
@ -111,7 +106,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
skipped_layers: clipSkip,
|
||||
},
|
||||
[LATENTS_TO_LATENTS]: {
|
||||
type: onnx_model_type ? 'l2l_onnx' : 'l2l',
|
||||
type: 'l2l',
|
||||
id: LATENTS_TO_LATENTS,
|
||||
is_intermediate: true,
|
||||
cfg_scale,
|
||||
@ -120,7 +115,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
strength,
|
||||
},
|
||||
[IMAGE_TO_LATENTS]: {
|
||||
type: onnx_model_type ? 'i2l_onnx' : 'i2l',
|
||||
type: 'i2l',
|
||||
id: IMAGE_TO_LATENTS,
|
||||
is_intermediate: true,
|
||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
||||
@ -137,7 +132,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: model_loader,
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
@ -197,7 +192,7 @@ export const buildCanvasImageToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: model_loader,
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
@ -329,10 +324,10 @@ export const buildCanvasImageToImageGraph = (
|
||||
});
|
||||
|
||||
// add LoRA support
|
||||
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS, model_loader);
|
||||
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
|
||||
|
||||
// optionally add custom VAE
|
||||
addVAEToGraph(state, graph, model_loader);
|
||||
addVAEToGraph(state, graph);
|
||||
|
||||
// add dynamic prompts - also sets up core iteration and seed
|
||||
addDynamicPromptsToGraph(state, graph);
|
||||
|
@ -17,7 +17,6 @@ import {
|
||||
INPAINT_GRAPH,
|
||||
ITERATE,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
NEGATIVE_CONDITIONING,
|
||||
POSITIVE_CONDITIONING,
|
||||
RANDOM_INT,
|
||||
@ -69,11 +68,6 @@ export const buildCanvasInpaintGraph = (
|
||||
shouldAutoSave,
|
||||
} = state.canvas;
|
||||
|
||||
const model_loader = model.model_type.includes('onnx')
|
||||
? ONNX_MODEL_LOADER
|
||||
: MAIN_MODEL_LOADER;
|
||||
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: INPAINT_GRAPH,
|
||||
nodes: {
|
||||
@ -121,9 +115,9 @@ export const buildCanvasInpaintGraph = (
|
||||
is_intermediate: true,
|
||||
prompt: negativePrompt,
|
||||
},
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
is_intermediate: true,
|
||||
model,
|
||||
},
|
||||
@ -151,7 +145,7 @@ export const buildCanvasInpaintGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: model_loader,
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
@ -161,7 +155,7 @@ export const buildCanvasInpaintGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: model_loader,
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
|
@ -19,7 +19,6 @@ import {
|
||||
LATENTS_TO_IMAGE,
|
||||
LATENTS_TO_LATENTS,
|
||||
MAIN_MODEL_LOADER,
|
||||
ONNX_MODEL_LOADER,
|
||||
METADATA_ACCUMULATOR,
|
||||
NEGATIVE_CONDITIONING,
|
||||
NOISE,
|
||||
@ -85,17 +84,13 @@ export const buildLinearImageToImageGraph = (
|
||||
? shouldUseCpuNoise
|
||||
: initialGenerationState.shouldUseCpuNoise;
|
||||
|
||||
const onnx_model_type = model.model_type.includes('onnx');
|
||||
const model_loader = onnx_model_type ? ONNX_MODEL_LOADER : MAIN_MODEL_LOADER;
|
||||
|
||||
// copy-pasted graph from node editor, filled in with state values & friendly node ids
|
||||
// TODO: Actually create the graph correctly for ONNX
|
||||
const graph: NonNullableGraph = {
|
||||
id: IMAGE_TO_IMAGE_GRAPH,
|
||||
nodes: {
|
||||
[model_loader]: {
|
||||
type: model_loader,
|
||||
id: model_loader,
|
||||
[MAIN_MODEL_LOADER]: {
|
||||
type: 'main_model_loader',
|
||||
id: MAIN_MODEL_LOADER,
|
||||
model,
|
||||
},
|
||||
[CLIP_SKIP]: {
|
||||
@ -104,12 +99,12 @@ export const buildLinearImageToImageGraph = (
|
||||
skipped_layers: clipSkip,
|
||||
},
|
||||
[POSITIVE_CONDITIONING]: {
|
||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
||||
type: 'compel',
|
||||
id: POSITIVE_CONDITIONING,
|
||||
prompt: positivePrompt,
|
||||
},
|
||||
[NEGATIVE_CONDITIONING]: {
|
||||
type: onnx_model_type ? 'prompt_onnx' : 'compel',
|
||||
type: 'compel',
|
||||
id: NEGATIVE_CONDITIONING,
|
||||
prompt: negativePrompt,
|
||||
},
|
||||
@ -119,12 +114,12 @@ export const buildLinearImageToImageGraph = (
|
||||
use_cpu,
|
||||
},
|
||||
[LATENTS_TO_IMAGE]: {
|
||||
type: onnx_model_type ? 'l2i_onnx' : 'l2i',
|
||||
type: 'l2i',
|
||||
id: LATENTS_TO_IMAGE,
|
||||
fp32: vaePrecision === 'fp32' ? true : false,
|
||||
},
|
||||
[LATENTS_TO_LATENTS]: {
|
||||
type: onnx_model_type ? 'l2l_onnx' : 'l2l',
|
||||
type: 'l2l',
|
||||
id: LATENTS_TO_LATENTS,
|
||||
cfg_scale,
|
||||
scheduler,
|
||||
@ -132,7 +127,7 @@ export const buildLinearImageToImageGraph = (
|
||||
strength,
|
||||
},
|
||||
[IMAGE_TO_LATENTS]: {
|
||||
type: onnx_model_type ? 'i2l_onnx' : 'i2l',
|
||||
type: 'i2l',
|
||||
id: IMAGE_TO_LATENTS,
|
||||
// must be set manually later, bc `fit` parameter may require a resize node inserted
|
||||
// image: {
|
||||
@ -144,7 +139,7 @@ export const buildLinearImageToImageGraph = (
|
||||
edges: [
|
||||
{
|
||||
source: {
|
||||
node_id: model_loader,
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
field: 'unet',
|
||||
},
|
||||
destination: {
|
||||
@ -154,7 +149,7 @@ export const buildLinearImageToImageGraph = (
|
||||
},
|
||||
{
|
||||
source: {
|
||||
node_id: model_loader,
|
||||
node_id: MAIN_MODEL_LOADER,
|
||||
field: 'clip',
|
||||
},
|
||||
destination: {
|
||||
@ -339,10 +334,10 @@ export const buildLinearImageToImageGraph = (
|
||||
});
|
||||
|
||||
// add LoRA support
|
||||
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS, model_loader);
|
||||
addLoRAsToGraph(state, graph, LATENTS_TO_LATENTS);
|
||||
|
||||
// optionally add custom VAE
|
||||
addVAEToGraph(state, graph, model_loader);
|
||||
addVAEToGraph(state, graph);
|
||||
|
||||
// add dynamic prompts - also sets up core iteration and seed
|
||||
addDynamicPromptsToGraph(state, graph);
|
||||
|
Loading…
Reference in New Issue
Block a user