>>0;return a===0?32:31-(Vv(a)/yn|0)|0}var Hv=31,Pe=0,Xr=0,$t=1,$c=2,jr=4,Iu=8,Vi=16,Fu=32,zu=4194240,Vc=64,qa=128,bs=256,Ed=512,Sh=1024,Hc=2048,xh=4096,Td=8192,Ch=16384,Rd=32768,Ad=65536,Ss=131072,Wv=262144,xs=524288,wh=1048576,_h=2097152,Wc=130023424,Gc=4194304,Gv=8388608,Yv=16777216,Nh=33554432,Eh=67108864,Db=Gc,Yc=134217728,qv=268435455,sa=268435456,Ua=536870912,Hi=1073741824;function qC(n){{if(n&$t)return"Sync";if(n&$c)return"InputContinuousHydration";if(n&jr)return"InputContinuous";if(n&Iu)return"DefaultHydration";if(n&Vi)return"Default";if(n&Fu)return"TransitionHydration";if(n&zu)return"Transition";if(n&Wc)return"Retry";if(n&Yc)return"SelectiveHydration";if(n&sa)return"IdleHydration";if(n&Ua)return"Idle";if(n&Hi)return"Offscreen"}}var Jn=-1,rl=Vc,kd=Gc;function qc(n){switch(al(n)){case $t:return $t;case $c:return $c;case jr:return jr;case Iu:return Iu;case Vi:return Vi;case Fu:return Fu;case Vc:case qa:case bs:case Ed:case Sh:case Hc:case xh:case Td:case Ch:case Rd:case Ad:case Ss:case Wv:case xs:case wh:case _h:return n&zu;case Gc:case Gv:case Yv:case Nh:case Eh:return n&Wc;case Yc:return Yc;case sa:return sa;case Ua:return Ua;case Hi:return Hi;default:return u("Should have found matching lanes. This is a bug in React."),n}}function Th(n,a){var s=n.pendingLanes;if(s===Pe)return Pe;var f=Pe,p=n.suspendedLanes,y=n.pingedLanes,C=s&qv;if(C!==Pe){var T=C&~p;if(T!==Pe)f=qc(T);else{var O=C&y;O!==Pe&&(f=qc(O))}}else{var z=s&~p;z!==Pe?f=qc(z):y!==Pe&&(f=qc(y))}if(f===Pe)return Pe;if(a!==Pe&&a!==f&&(a&p)===Pe){var H=al(f),ee=al(a);if(H>=ee||H===Vi&&(ee&zu)!==Pe)return a}(f&jr)!==Pe&&(f|=s&Vi);var J=n.entangledLanes;if(J!==Pe)for(var he=n.entanglements,ve=f&J;ve>0;){var Ne=Bl(ve),ct=1<0;){var p=Bl(a),y=1<f&&(f=C),a&=~y}return f}function Kc(n,a){switch(n){case $t:case $c:case jr:return a+250;case Iu:case Vi:case Fu:case Vc:case qa:case bs:case Ed:case Sh:case Hc:case xh:case Td:case Ch:case Rd:case Ad:case Ss:case Wv:case xs:case wh:case _h:return a+5e3;case Gc:case Gv:case Yv:case Nh:case Eh:return Jn;case Yc:case sa:case Ua:case Hi:return Jn;default:return u("Should have found matching lanes. This is a bug in React."),Jn}}function Od(n,a){for(var s=n.pendingLanes,f=n.suspendedLanes,p=n.pingedLanes,y=n.expirationTimes,C=s;C>0;){var T=Bl(C),O=1<0;){var p=Bl(f),y=1<0;){var T=Bl(C),O=1<0;){var p=Wo(s),y=1<0;){var p=Wo(a),y=1<
0&&(C.forEach(function(T){var O=T.alternate;(O===null||!f.has(O))&&f.add(T)}),C.clear()),a&=~y}}function Fb(n,a){return null}var Ka=$t,Cs=jr,Cr=Vi,Dh=Ua,Fd=Xr;function co(){return Fd}function la(n){Fd=n}function zb(n,a){var s=Fd;try{return Fd=n,a()}finally{Fd=s}}function XC(n,a){return n!==0&&na?n:a}function Jv(n,a){return n!==0&&n-1}function Ph(n,a,s,f,p){return{blockedOn:n,domEventName:a,eventSystemFlags:s,nativeEvent:p,targetContainers:[f]}}function ag(n,a){switch(n){case"focusin":case"focusout":ol=null;break;case"dragenter":case"dragleave":jl=null;break;case"mouseover":case"mouseout":_s=null;break;case"pointerover":case"pointerout":{var s=a.pointerId;Jc.delete(s);break}case"gotpointercapture":case"lostpointercapture":{var f=a.pointerId;sl.delete(f);break}}}function _i(n,a,s,f,p,y){if(n===null||n.nativeEvent!==y){var C=Ph(a,s,f,p,y);if(a!==null){var T=Ze(a);T!==null&&tg(T)}return C}n.eventSystemFlags|=f;var O=n.targetContainers;return p!==null&&O.indexOf(p)===-1&&O.push(p),n}function $u(n,a,s,f,p){switch(a){case"focusin":{var y=p;return ol=_i(ol,n,a,s,f,y),!0}case"dragenter":{var C=p;return jl=_i(jl,n,a,s,f,C),!0}case"mouseover":{var T=p;return _s=_i(_s,n,a,s,f,T),!0}case"pointerover":{var O=p,z=O.pointerId;return Jc.set(z,_i(Jc.get(z)||null,n,a,s,f,O)),!0}case"gotpointercapture":{var H=p,ee=H.pointerId;return sl.set(ee,_i(sl.get(ee)||null,n,a,s,f,H)),!0}}return!1}function Vb(n){var a=Me(n.target);if(a!==null){var s=Pu(a);if(s!==null){var f=s.tag;if(f===$){var p=vb(s);if(p!==null){n.blockedOn=p,Ub(n.priority,function(){Qc(s)});return}}else if(f===x){var y=s.stateNode;if(ua(y)){n.blockedOn=vh(s);return}}}}n.blockedOn=null}function Hb(n){for(var a=ng(),s={blockedOn:null,target:n,priority:a},f=0;f0;){var s=a[0],f=og(n.domEventName,n.eventSystemFlags,s,n.nativeEvent);if(f===null){var p=n.nativeEvent,y=new p.constructor(p.type,p);hd(y),p.target.dispatchEvent(y),Au()}else{var C=Ze(f);return C!==null&&tg(C),n.blockedOn=f,!1}a.shift()}return!0}function Wb(n,a,s){Lh(n)&&s.delete(a)}function rw(){rg=!1,ol!==null&&Lh(ol)&&(ol=null),jl!==null&&Lh(jl)&&(jl=null),_s!==null&&Lh(_s)&&(_s=null),Jc.forEach(Wb),sl.forEach(Wb)}function Bd(n,a){n.blockedOn===a&&(n.blockedOn=null,rg||(rg=!0,t.unstable_scheduleCallback(t.unstable_NormalPriority,rw)))}function Ud(n){if(zd.length>0){Bd(zd[0],n);for(var a=1;a0;){var C=Ns[0];if(C.blockedOn!==null)break;Vb(C),C.blockedOn===null&&Ns.shift()}}var ef=r.ReactCurrentBatchConfig,ig=!0;function Gb(n){ig=!!n}function aw(){return ig}function iw(n,a,s){var f=Kb(a),p;switch(f){case Ka:p=ow;break;case Cs:p=Yb;break;case Cr:default:p=Ih;break}return p.bind(null,a,s,n)}function ow(n,a,s,f){var p=co(),y=ef.transition;ef.transition=null;try{la(Ka),Ih(n,a,s,f)}finally{la(p),ef.transition=y}}function Yb(n,a,s,f){var p=co(),y=ef.transition;ef.transition=null;try{la(Cs),Ih(n,a,s,f)}finally{la(p),ef.transition=y}}function Ih(n,a,s,f){!ig||qb(n,a,s,f)}function qb(n,a,s,f){var p=og(n,a,s,f);if(p===null){Cg(n,a,f,Fh,s),ag(n,f);return}if($u(p,n,a,s,f)){f.stopPropagation();return}if(ag(n,f),a&kc&&$b(n)){for(;p!==null;){var y=Ze(p);y!==null&&eg(y);var C=og(n,a,s,f);if(C===null&&Cg(n,a,f,Fh,s),C===p)break;p=C}p!==null&&f.stopPropagation();return}Cg(n,a,f,null,s)}var Fh=null;function og(n,a,s,f){Fh=null;var p=xv(f),y=Me(p);if(y!==null){var C=Pu(y);if(C===null)y=null;else{var T=C.tag;if(T===$){var O=vb(C);if(O!==null)return O;y=null}else if(T===x){var z=C.stateNode;if(ua(z))return vh(C);y=null}else C!==y&&(y=null)}}return Fh=y,null}function Kb(n){switch(n){case"cancel":case"click":case"close":case"contextmenu":case"copy":case"cut":case"auxclick":case"dblclick":case"dragend":case"dragstart":case"drop":case"focusin":case"focusout":case"input":case"invalid":case"keydown":case"keypress":case"keyup":case"mousedown":case"mouseup":case"paste":case"pause":case"play":case"pointercancel":case"pointerdown":case"pointerup":case"ratechange":case"reset":case"resize":case"seeked":case"submit":case"touchcancel":case"touchend":case"touchstart":case"volumechange":case"change":case"selectionchange":case"textInput":case"compositionstart":case"compositionend":case"compositionupdate":case"beforeblur":case"afterblur":case"beforeinput":case"blur":case"fullscreenchange":case"focus":case"hashchange":case"popstate":case"select":case"selectstart":return Ka;case"drag":case"dragenter":case"dragexit":case"dragleave":case"dragover":case"mousemove":case"mouseout":case"mouseover":case"pointermove":case"pointerout":case"pointerover":case"scroll":case"toggle":case"touchmove":case"wheel":case"mouseenter":case"mouseleave":case"pointerenter":case"pointerleave":return Cs;case"message":{var a=zl();switch(a){case Cd:return Ka;case Mv:return Cs;case ys:case BC:return Cr;case yh:return Dh;default:return Cr}}default:return Cr}}function Zb(n,a,s){return n.addEventListener(a,s,!1),s}function sw(n,a,s){return n.addEventListener(a,s,!0),s}function lw(n,a,s,f){return n.addEventListener(a,s,{capture:!0,passive:f}),s}function uw(n,a,s,f){return n.addEventListener(a,s,{passive:f}),s}var jd=null,sg=null,$d=null;function fo(n){return jd=n,sg=Qb(),!0}function cw(){jd=null,sg=null,$d=null}function Xb(){if($d)return $d;var n,a=sg,s=a.length,f,p=Qb(),y=p.length;for(n=0;n1?1-f:void 0;return $d=p.slice(n,T),$d}function Qb(){return"value"in jd?jd.value:jd.textContent}function Vd(n){var a,s=n.keyCode;return"charCode"in n?(a=n.charCode,a===0&&s===13&&(a=13)):a=s,a===10&&(a=13),a>=32||a===13?a:0}function zh(){return!0}function Jb(){return!1}function Wi(n){function a(s,f,p,y,C){this._reactName=s,this._targetInst=p,this.type=f,this.nativeEvent=y,this.target=C,this.currentTarget=null;for(var T in n)if(!!n.hasOwnProperty(T)){var O=n[T];O?this[T]=O(y):this[T]=y[T]}var z=y.defaultPrevented!=null?y.defaultPrevented:y.returnValue===!1;return z?this.isDefaultPrevented=zh:this.isDefaultPrevented=Jb,this.isPropagationStopped=Jb,this}return Jt(a.prototype,{preventDefault:function(){this.defaultPrevented=!0;var s=this.nativeEvent;!s||(s.preventDefault?s.preventDefault():typeof s.returnValue!="unknown"&&(s.returnValue=!1),this.isDefaultPrevented=zh)},stopPropagation:function(){var s=this.nativeEvent;!s||(s.stopPropagation?s.stopPropagation():typeof s.cancelBubble!="unknown"&&(s.cancelBubble=!0),this.isPropagationStopped=zh)},persist:function(){},isPersistent:zh}),a}var tf={eventPhase:0,bubbles:0,cancelable:0,timeStamp:function(n){return n.timeStamp||Date.now()},defaultPrevented:0,isTrusted:0},Hd=Wi(tf),Wd=Jt({},tf,{view:0,detail:0}),fw=Wi(Wd),lg,ug,Gd;function dw(n){n!==Gd&&(Gd&&n.type==="mousemove"?(lg=n.screenX-Gd.screenX,ug=n.screenY-Gd.screenY):(lg=0,ug=0),Gd=n)}var Bh=Jt({},Wd,{screenX:0,screenY:0,clientX:0,clientY:0,pageX:0,pageY:0,ctrlKey:0,shiftKey:0,altKey:0,metaKey:0,getModifierState:dg,button:0,buttons:0,relatedTarget:function(n){return n.relatedTarget===void 0?n.fromElement===n.srcElement?n.toElement:n.fromElement:n.relatedTarget},movementX:function(n){return"movementX"in n?n.movementX:(dw(n),lg)},movementY:function(n){return"movementY"in n?n.movementY:ug}}),e1=Wi(Bh),pw=Jt({},Bh,{dataTransfer:0}),hw=Wi(pw),mw=Jt({},Wd,{relatedTarget:0}),cg=Wi(mw),vw=Jt({},tf,{animationName:0,elapsedTime:0,pseudoElement:0}),gw=Wi(vw),yw=Jt({},tf,{clipboardData:function(n){return"clipboardData"in n?n.clipboardData:window.clipboardData}}),bw=Wi(yw),Sw=Jt({},tf,{data:0}),t1=Wi(Sw),xw=t1,Cw={Esc:"Escape",Spacebar:" ",Left:"ArrowLeft",Up:"ArrowUp",Right:"ArrowRight",Down:"ArrowDown",Del:"Delete",Win:"OS",Menu:"ContextMenu",Apps:"ContextMenu",Scroll:"ScrollLock",MozPrintableKey:"Unidentified"},fg={8:"Backspace",9:"Tab",12:"Clear",13:"Enter",16:"Shift",17:"Control",18:"Alt",19:"Pause",20:"CapsLock",27:"Escape",32:" ",33:"PageUp",34:"PageDown",35:"End",36:"Home",37:"ArrowLeft",38:"ArrowUp",39:"ArrowRight",40:"ArrowDown",45:"Insert",46:"Delete",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"NumLock",145:"ScrollLock",224:"Meta"};function n1(n){if(n.key){var a=Cw[n.key]||n.key;if(a!=="Unidentified")return a}if(n.type==="keypress"){var s=Vd(n);return s===13?"Enter":String.fromCharCode(s)}return n.type==="keydown"||n.type==="keyup"?fg[n.keyCode]||"Unidentified":""}var ww={Alt:"altKey",Control:"ctrlKey",Meta:"metaKey",Shift:"shiftKey"};function _w(n){var a=this,s=a.nativeEvent;if(s.getModifierState)return s.getModifierState(n);var f=ww[n];return f?!!s[f]:!1}function dg(n){return _w}var Nw=Jt({},Wd,{key:n1,code:0,location:0,ctrlKey:0,shiftKey:0,altKey:0,metaKey:0,repeat:0,locale:0,getModifierState:dg,charCode:function(n){return n.type==="keypress"?Vd(n):0},keyCode:function(n){return n.type==="keydown"||n.type==="keyup"?n.keyCode:0},which:function(n){return n.type==="keypress"?Vd(n):n.type==="keydown"||n.type==="keyup"?n.keyCode:0}}),Ew=Wi(Nw),Tw=Jt({},Bh,{pointerId:0,width:0,height:0,pressure:0,tangentialPressure:0,tiltX:0,tiltY:0,twist:0,pointerType:0,isPrimary:0}),r1=Wi(Tw),Rw=Jt({},Wd,{touches:0,targetTouches:0,changedTouches:0,altKey:0,metaKey:0,ctrlKey:0,shiftKey:0,getModifierState:dg}),a1=Wi(Rw),Aw=Jt({},tf,{propertyName:0,elapsedTime:0,pseudoElement:0}),Uh=Wi(Aw),kw=Jt({},Bh,{deltaX:function(n){return"deltaX"in n?n.deltaX:"wheelDeltaX"in n?-n.wheelDeltaX:0},deltaY:function(n){return"deltaY"in n?n.deltaY:"wheelDeltaY"in n?-n.wheelDeltaY:"wheelDelta"in n?-n.wheelDelta:0},deltaZ:0,deltaMode:0}),Ow=Wi(kw),Dw=[9,13,27,32],i1=229,pg=Kt&&"CompositionEvent"in window,Yd=null;Kt&&"documentMode"in document&&(Yd=document.documentMode);var Mw=Kt&&"TextEvent"in window&&!Yd,o1=Kt&&(!pg||Yd&&Yd>8&&Yd<=11),s1=32,l1=String.fromCharCode(s1);function Pw(){Ut("onBeforeInput",["compositionend","keypress","textInput","paste"]),Ut("onCompositionEnd",["compositionend","focusout","keydown","keypress","keyup","mousedown"]),Ut("onCompositionStart",["compositionstart","focusout","keydown","keypress","keyup","mousedown"]),Ut("onCompositionUpdate",["compositionupdate","focusout","keydown","keypress","keyup","mousedown"])}var u1=!1;function Lw(n){return(n.ctrlKey||n.altKey||n.metaKey)&&!(n.ctrlKey&&n.altKey)}function Iw(n){switch(n){case"compositionstart":return"onCompositionStart";case"compositionend":return"onCompositionEnd";case"compositionupdate":return"onCompositionUpdate"}}function Fw(n,a){return n==="keydown"&&a.keyCode===i1}function c1(n,a){switch(n){case"keyup":return Dw.indexOf(a.keyCode)!==-1;case"keydown":return a.keyCode!==i1;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function hg(n){var a=n.detail;return typeof a=="object"&&"data"in a?a.data:null}function mg(n){return n.locale==="ko"}var nf=!1;function zw(n,a,s,f,p){var y,C;if(pg?y=Iw(a):nf?c1(a,f)&&(y="onCompositionEnd"):Fw(a,f)&&(y="onCompositionStart"),!y)return null;o1&&!mg(f)&&(!nf&&y==="onCompositionStart"?nf=fo(p):y==="onCompositionEnd"&&nf&&(C=Xb()));var T=rn(s,y);if(T.length>0){var O=new t1(y,a,null,f,p);if(n.push({event:O,listeners:T}),C)O.data=C;else{var z=hg(f);z!==null&&(O.data=z)}}}function Bw(n,a){switch(n){case"compositionend":return hg(a);case"keypress":var s=a.which;return s!==s1?null:(u1=!0,l1);case"textInput":var f=a.data;return f===l1&&u1?null:f;default:return null}}function Uw(n,a){if(nf){if(n==="compositionend"||!pg&&c1(n,a)){var s=Xb();return cw(),nf=!1,s}return null}switch(n){case"paste":return null;case"keypress":if(!Lw(a)){if(a.char&&a.char.length>1)return a.char;if(a.which)return String.fromCharCode(a.which)}return null;case"compositionend":return o1&&!mg(a)?null:a.data;default:return null}}function jh(n,a,s,f,p){var y;if(Mw?y=Bw(a,f):y=Uw(a,f),!y)return null;var C=rn(s,"onBeforeInput");if(C.length>0){var T=new xw("onBeforeInput","beforeinput",null,f,p);n.push({event:T,listeners:C}),T.data=y}}function jw(n,a,s,f,p,y,C){zw(n,a,s,f,p),jh(n,a,s,f,p)}var $w={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};function f1(n){var a=n&&n.nodeName&&n.nodeName.toLowerCase();return a==="input"?!!$w[n.type]:a==="textarea"}/**
- * Checks if an event is supported in the current execution environment.
- *
- * NOTE: This will not work correctly for non-generic events such as `change`,
- * `reset`, `load`, `error`, and `select`.
- *
- * Borrows from Modernizr.
- *
- * @param {string} eventNameSuffix Event name, e.g. "click".
- * @return {boolean} True if the event is supported.
- * @internal
- * @license Modernizr 3.0.0pre (Custom Build) | MIT
- */function Vw(n){if(!Kt)return!1;var a="on"+n,s=a in document;if(!s){var f=document.createElement("div");f.setAttribute(a,"return;"),s=typeof f[a]=="function"}return s}function Hw(){Ut("onChange",["change","click","focusin","focusout","input","keydown","keyup","selectionchange"])}function d1(n,a,s,f){sb(f);var p=rn(a,"onChange");if(p.length>0){var y=new Hd("onChange","change",null,s,f);n.push({event:y,listeners:p})}}var qd=null,Kd=null;function Ww(n){var a=n.nodeName&&n.nodeName.toLowerCase();return a==="select"||a==="input"&&n.type==="file"}function Gw(n){var a=[];d1(a,Kd,n,xv(n)),fb(Yw,a)}function Yw(n){xg(n,0)}function Vu(n){var a=pt(n);if(W(a))return n}function qw(n,a){if(n==="change")return a}var vg=!1;Kt&&(vg=Vw("input")&&(!document.documentMode||document.documentMode>9));function p1(n,a){qd=n,Kd=a,qd.attachEvent("onpropertychange",gg)}function $h(){!qd||(qd.detachEvent("onpropertychange",gg),qd=null,Kd=null)}function gg(n){n.propertyName==="value"&&Vu(Kd)&&Gw(n)}function h1(n,a,s){n==="focusin"?($h(),p1(a,s)):n==="focusout"&&$h()}function m1(n,a){if(n==="selectionchange"||n==="keyup"||n==="keydown")return Vu(Kd)}function v1(n){var a=n.nodeName;return a&&a.toLowerCase()==="input"&&(n.type==="checkbox"||n.type==="radio")}function Kw(n,a){if(n==="click")return Vu(a)}function Zw(n,a){if(n==="input"||n==="change")return Vu(a)}function Xw(n){var a=n._wrapperState;!a||!a.controlled||n.type!=="number"||$e(n,"number",n.value)}function Vh(n,a,s,f,p,y,C){var T=s?pt(s):window,O,z;if(Ww(T)?O=qw:f1(T)?vg?O=Zw:(O=m1,z=h1):v1(T)&&(O=Kw),O){var H=O(a,s);if(H){d1(n,H,f,p);return}}z&&z(a,T,s),a==="focusout"&&Xw(T)}function Hh(){We("onMouseEnter",["mouseout","mouseover"]),We("onMouseLeave",["mouseout","mouseover"]),We("onPointerEnter",["pointerout","pointerover"]),We("onPointerLeave",["pointerout","pointerover"])}function Qw(n,a,s,f,p,y,C){var T=a==="mouseover"||a==="pointerover",O=a==="mouseout"||a==="pointerout";if(T&&!fh(f)){var z=f.relatedTarget||f.fromElement;if(z&&(Me(z)||Ae(z)))return}if(!(!O&&!T)){var H;if(p.window===p)H=p;else{var ee=p.ownerDocument;ee?H=ee.defaultView||ee.parentWindow:H=window}var J,he;if(O){var ve=f.relatedTarget||f.toElement;if(J=s,he=ve?Me(ve):null,he!==null){var Ne=Pu(he);(he!==Ne||he.tag!==_&&he.tag!==R)&&(he=null)}}else J=null,he=s;if(J!==he){var ct=e1,Nt="onMouseLeave",bt="onMouseEnter",Mn="mouse";(a==="pointerout"||a==="pointerover")&&(ct=r1,Nt="onPointerLeave",bt="onPointerEnter",Mn="pointer");var wn=J==null?H:pt(J),le=he==null?H:pt(he),Ee=new ct(Nt,Mn+"leave",J,f,p);Ee.target=wn,Ee.relatedTarget=le;var ue=null,He=Me(p);if(He===s){var ft=new ct(bt,Mn+"enter",he,f,p);ft.target=le,ft.relatedTarget=wn,ue=ft}v_(n,Ee,ue,J,he)}}}function Jw(n,a){return n===a&&(n!==0||1/n===1/a)||n!==n&&a!==a}var Za=typeof Object.is=="function"?Object.is:Jw;function rf(n,a){if(Za(n,a))return!0;if(typeof n!="object"||n===null||typeof a!="object"||a===null)return!1;var s=Object.keys(n),f=Object.keys(a);if(s.length!==f.length)return!1;for(var p=0;p=a)return{node:s,offset:a-f};f=p}s=g1(e_(s))}}function t_(n){var a=n.ownerDocument,s=a&&a.defaultView||window,f=s.getSelection&&s.getSelection();if(!f||f.rangeCount===0)return null;var p=f.anchorNode,y=f.anchorOffset,C=f.focusNode,T=f.focusOffset;try{p.nodeType,C.nodeType}catch{return null}return n_(n,p,y,C,T)}function n_(n,a,s,f,p){var y=0,C=-1,T=-1,O=0,z=0,H=n,ee=null;e:for(;;){for(var J=null;H===a&&(s===0||H.nodeType===hs)&&(C=y+s),H===f&&(p===0||H.nodeType===hs)&&(T=y+p),H.nodeType===hs&&(y+=H.nodeValue.length),(J=H.firstChild)!==null;)ee=H,H=J;for(;;){if(H===n)break e;if(ee===a&&++O===s&&(C=y),ee===f&&++z===p&&(T=y),(J=H.nextSibling)!==null)break;H=ee,ee=H.parentNode}H=J}return C===-1||T===-1?null:{start:C,end:T}}function r_(n,a){var s=n.ownerDocument||document,f=s&&s.defaultView||window;if(!!f.getSelection){var p=f.getSelection(),y=n.textContent.length,C=Math.min(a.start,y),T=a.end===void 0?C:Math.min(a.end,y);if(!p.extend&&C>T){var O=T;T=C,C=O}var z=y1(n,C),H=y1(n,T);if(z&&H){if(p.rangeCount===1&&p.anchorNode===z.node&&p.anchorOffset===z.offset&&p.focusNode===H.node&&p.focusOffset===H.offset)return;var ee=s.createRange();ee.setStart(z.node,z.offset),p.removeAllRanges(),C>T?(p.addRange(ee),p.extend(H.node,H.offset)):(ee.setEnd(H.node,H.offset),p.addRange(ee))}}}function b1(n){return n&&n.nodeType===hs}function S1(n,a){return!n||!a?!1:n===a?!0:b1(n)?!1:b1(a)?S1(n,a.parentNode):"contains"in n?n.contains(a):n.compareDocumentPosition?!!(n.compareDocumentPosition(a)&16):!1}function a_(n){return n&&n.ownerDocument&&S1(n.ownerDocument.documentElement,n)}function i_(n){try{return typeof n.contentWindow.location.href=="string"}catch{return!1}}function x1(){for(var n=window,a=Q();a instanceof n.HTMLIFrameElement;){if(i_(a))n=a.contentWindow;else return a;a=Q(n.document)}return a}function yg(n){var a=n&&n.nodeName&&n.nodeName.toLowerCase();return a&&(a==="input"&&(n.type==="text"||n.type==="search"||n.type==="tel"||n.type==="url"||n.type==="password")||a==="textarea"||n.contentEditable==="true")}function Go(){var n=x1();return{focusedElem:n,selectionRange:yg(n)?s_(n):null}}function o_(n){var a=x1(),s=n.focusedElem,f=n.selectionRange;if(a!==s&&a_(s)){f!==null&&yg(s)&&Hu(s,f);for(var p=[],y=s;y=y.parentNode;)y.nodeType===Ci&&p.push({element:y,left:y.scrollLeft,top:y.scrollTop});typeof s.focus=="function"&&s.focus();for(var C=0;C0){var C=new Hd("onSelect","select",null,a,s);n.push({event:C,listeners:y}),C.target=Qr}}}}function f_(n,a,s,f,p,y,C){var T=s?pt(s):window;switch(a){case"focusin":(f1(T)||T.contentEditable==="true")&&(Qr=T,Tr=s,Zd=null);break;case"focusout":Qr=null,Tr=null,Zd=null;break;case"mousedown":Es=!0;break;case"contextmenu":case"mouseup":case"dragend":Es=!1,C1(n,f,p);break;case"selectionchange":if(on)break;case"keydown":case"keyup":C1(n,f,p)}}function Wh(n,a){var s={};return s[n.toLowerCase()]=a.toLowerCase(),s["Webkit"+n]="webkit"+a,s["Moz"+n]="moz"+a,s}var $l={animationend:Wh("Animation","AnimationEnd"),animationiteration:Wh("Animation","AnimationIteration"),animationstart:Wh("Animation","AnimationStart"),transitionend:Wh("Transition","TransitionEnd")},bg={},Ts={};Kt&&(Ts=document.createElement("div").style,"AnimationEvent"in window||(delete $l.animationend.animation,delete $l.animationiteration.animation,delete $l.animationstart.animation),"TransitionEvent"in window||delete $l.transitionend.transition);function Xd(n){if(bg[n])return bg[n];if(!$l[n])return n;var a=$l[n];for(var s in a)if(a.hasOwnProperty(s)&&s in Ts)return bg[n]=a[s];return n}var Qd=Xd("animationend"),wr=Xd("animationiteration"),Rr=Xd("animationstart"),Sg=Xd("transitionend"),w1=new Map,_1=["abort","auxClick","cancel","canPlay","canPlayThrough","click","close","contextMenu","copy","cut","drag","dragEnd","dragEnter","dragExit","dragLeave","dragOver","dragStart","drop","durationChange","emptied","encrypted","ended","error","gotPointerCapture","input","invalid","keyDown","keyPress","keyUp","load","loadedData","loadedMetadata","loadStart","lostPointerCapture","mouseDown","mouseMove","mouseOut","mouseOver","mouseUp","paste","pause","play","playing","pointerCancel","pointerDown","pointerMove","pointerOut","pointerOver","pointerUp","progress","rateChange","reset","resize","seeked","seeking","stalled","submit","suspend","timeUpdate","touchCancel","touchEnd","touchStart","volumeChange","scroll","toggle","touchMove","waiting","wheel"];function Vl(n,a){w1.set(n,a),Ut(a,[n])}function d_(){for(var n=0;n<_1.length;n++){var a=_1[n],s=a.toLowerCase(),f=a[0].toUpperCase()+a.slice(1);Vl(s,"on"+f)}Vl(Qd,"onAnimationEnd"),Vl(wr,"onAnimationIteration"),Vl(Rr,"onAnimationStart"),Vl("dblclick","onDoubleClick"),Vl("focusin","onFocus"),Vl("focusout","onBlur"),Vl(Sg,"onTransitionEnd")}function p_(n,a,s,f,p,y,C){var T=w1.get(a);if(T!==void 0){var O=Hd,z=a;switch(a){case"keypress":if(Vd(f)===0)return;case"keydown":case"keyup":O=Ew;break;case"focusin":z="focus",O=cg;break;case"focusout":z="blur",O=cg;break;case"beforeblur":case"afterblur":O=cg;break;case"click":if(f.button===2)return;case"auxclick":case"dblclick":case"mousedown":case"mousemove":case"mouseup":case"mouseout":case"mouseover":case"contextmenu":O=e1;break;case"drag":case"dragend":case"dragenter":case"dragexit":case"dragleave":case"dragover":case"dragstart":case"drop":O=hw;break;case"touchcancel":case"touchend":case"touchmove":case"touchstart":O=a1;break;case Qd:case wr:case Rr:O=gw;break;case Sg:O=Uh;break;case"scroll":O=fw;break;case"wheel":O=Ow;break;case"copy":case"cut":case"paste":O=bw;break;case"gotpointercapture":case"lostpointercapture":case"pointercancel":case"pointerdown":case"pointermove":case"pointerout":case"pointerover":case"pointerup":O=r1;break}var H=(y&kc)!==0;{var ee=!H&&a==="scroll",J=ll(s,T,f.type,H,ee);if(J.length>0){var he=new O(T,z,null,f,p);n.push({event:he,listeners:J})}}}}d_(),Hh(),Hw(),l_(),Pw();function h_(n,a,s,f,p,y,C){p_(n,a,s,f,p,y);var T=(y&ch)===0;T&&(Qw(n,a,s,f,p),Vh(n,a,s,f,p),f_(n,a,s,f,p),jw(n,a,s,f,p))}var af=["abort","canplay","canplaythrough","durationchange","emptied","encrypted","ended","error","loadeddata","loadedmetadata","loadstart","pause","play","playing","progress","ratechange","resize","seeked","seeking","stalled","suspend","timeupdate","volumechange","waiting"],Wu=new Set(["cancel","close","invalid","load","scroll","toggle"].concat(af));function Gh(n,a,s){var f=n.type||"unknown-event";n.currentTarget=s,gd(f,a,void 0,n),n.currentTarget=null}function m_(n,a,s){var f;if(s)for(var p=a.length-1;p>=0;p--){var y=a[p],C=y.instance,T=y.currentTarget,O=y.listener;if(C!==f&&n.isPropagationStopped())return;Gh(n,O,T),f=C}else for(var z=0;z0;)s=Gu(s),p--;for(;C-p>0;)f=Gu(f),C--;for(var O=p;O--;){if(s===f||f!==null&&s===f.alternate)return s;s=Gu(s),f=Gu(f)}return null}function wg(n,a,s,f,p){for(var y=a._reactName,C=[],T=s;T!==null&&T!==f;){var O=T,z=O.alternate,H=O.stateNode,ee=O.tag;if(z!==null&&z===f)break;if(ee===_&&H!==null){var J=H;if(p){var he=Xs(T,y);he!=null&&C.unshift(of(T,he,J))}else if(!p){var ve=Xs(T,y);ve!=null&&C.push(of(T,ve,J))}}T=T.return}C.length!==0&&n.push({event:a,listeners:C})}function v_(n,a,s,f,p){var y=f&&p?po(f,p):null;f!==null&&wg(n,a,f,y,!1),p!==null&&s!==null&&wg(n,s,p,y,!0)}function Kn(n,a){return n+"__"+(a?"capture":"bubble")}var Ni=!1,sf="dangerouslySetInnerHTML",Yu="suppressContentEditableWarning",Wl="suppressHydrationWarning",T1="autoFocus",qu="children",Ku="style",Kh="__html",_g,Zh,ep,R1,Xh,A1,k1;_g={dialog:!0,webview:!0},Zh=function(n,a){Qn(n,a),ab(n,a),ob(n,a,{registrationNameDependencies:gt,possibleRegistrationNames:Tt})},A1=Kt&&!document.documentMode,ep=function(n,a,s){if(!Ni){var f=Qh(s),p=Qh(a);p!==f&&(Ni=!0,u("Prop `%s` did not match. Server: %s Client: %s",n,JSON.stringify(p),JSON.stringify(f)))}},R1=function(n){if(!Ni){Ni=!0;var a=[];n.forEach(function(s){a.push(s)}),u("Extra attributes from the server: %s",a)}},Xh=function(n,a){a===!1?u("Expected `%s` listener to be a function, instead got `false`.\n\nIf you used to conditionally omit it with %s={condition && value}, pass %s={condition ? value : undefined} instead.",n,n,n):u("Expected `%s` listener to be a function, instead got a value of `%s` type.",n,typeof a)},k1=function(n,a){var s=n.namespaceURI===ps?n.ownerDocument.createElement(n.tagName):n.ownerDocument.createElementNS(n.namespaceURI,n.tagName);return s.innerHTML=a,s.innerHTML};var g_=/\r\n?/g,Ng=/\u0000|\uFFFD/g;function Qh(n){Pn(n);var a=typeof n=="string"?n:""+n;return a.replace(g_,`
-`).replace(Ng,"")}function lf(n,a,s,f){var p=Qh(a),y=Qh(n);if(y!==p&&(f&&(Ni||(Ni=!0,u('Text content did not match. Server: "%s" Client: "%s"',y,p))),s&&oe))throw new Error("Text content does not match server-rendered HTML.")}function O1(n){return n.nodeType===$o?n:n.ownerDocument}function y_(){}function Jh(n){n.onclick=y_}function ca(n,a,s,f,p){for(var y in f)if(!!f.hasOwnProperty(y)){var C=f[y];if(y===Ku)C&&Object.freeze(C),re(a,C);else if(y===sf){var T=C?C[Kh]:void 0;T!=null&&rh(a,T)}else if(y===qu)if(typeof C=="string"){var O=n!=="textarea"||C!=="";O&&ud(a,C)}else typeof C=="number"&&ud(a,""+C);else y===Yu||y===Wl||y===T1||(gt.hasOwnProperty(y)?C!=null&&(typeof C!="function"&&Xh(y,C),y==="onScroll"&&Cn("scroll",a)):C!=null&&Ii(a,y,C,p))}}function Ei(n,a,s,f){for(var p=0;p is using incorrect casing. Use PascalCase for React components, or lowercase for HTML elements.",n),n==="script"){var O=y.createElement("div");O.innerHTML="
+
diff --git a/frontend/src/app/App.tsx b/frontend/src/app/App.tsx
index 354d914b68..fb6ed1ee6d 100644
--- a/frontend/src/app/App.tsx
+++ b/frontend/src/app/App.tsx
@@ -10,15 +10,13 @@ import PromptInput from '../features/options/PromptInput';
import LogViewer from '../features/system/LogViewer';
import Loading from '../Loading';
import { useAppDispatch } from './store';
-import { requestAllImages, requestSystemConfig } from './socketio/actions';
+import { requestSystemConfig } from './socketio/actions';
const App = () => {
const dispatch = useAppDispatch();
const [isReady, setIsReady] = useState(false);
- // Load images from the gallery once
useEffect(() => {
- dispatch(requestAllImages());
dispatch(requestSystemConfig());
setIsReady(true);
}, [dispatch]);
diff --git a/frontend/src/app/features.ts b/frontend/src/app/features.ts
new file mode 100644
index 0000000000..cb8455f09d
--- /dev/null
+++ b/frontend/src/app/features.ts
@@ -0,0 +1,59 @@
+type FeatureHelpInfo = {
+ text: string;
+ href: string;
+ guideImage: string;
+};
+
+export enum Feature {
+ PROMPT,
+ GALLERY,
+ OUTPUT,
+ SEED_AND_VARIATION,
+ ESRGAN,
+ FACE_CORRECTION,
+ IMAGE_TO_IMAGE,
+ SAMPLER,
+}
+
+export const FEATURES: Record = {
+ [Feature.PROMPT]: {
+ text: 'This field will take all prompt text, including both content and stylistic terms. CLI Commands will not work in the prompt.',
+ href: 'link/to/docs/feature3.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.GALLERY]: {
+ text: 'As new invocations are generated, files from the output directory will be displayed here. Generations have additional options to configure new generations.',
+ href: 'link/to/docs/feature3.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.OUTPUT]: {
+ text: 'The Height and Width of generations can be controlled here. If you experience errors, you may be generating an image too large for your system. The seamless option will more often result in repeating patterns in outputs.',
+ href: 'link/to/docs/feature3.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.SEED_AND_VARIATION]: {
+ text: 'Seed values provide an initial set of noise which guide the denoising process. Try a variation with an amount of between 0 and 1 to change the output image for that seed.',
+ href: 'link/to/docs/feature3.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.ESRGAN]: {
+ text: 'The ESRGAN setting can be used to increase the output resolution without requiring a higher width/height in the initial generation.',
+ href: 'link/to/docs/feature1.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.FACE_CORRECTION]: {
+ text: 'Using GFPGAN or CodeFormer, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher values will apply a stronger corrective pressure on outputs.',
+ href: 'link/to/docs/feature2.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.IMAGE_TO_IMAGE]: {
+ text: 'ImageToImage allows the upload of an initial image, which InvokeAI will use to guide the generation process, along with a prompt. A lower value for this setting will more closely resemble the original image. Values between 0-1 are accepted, and a range of .25-.75 is recommended ',
+ href: 'link/to/docs/feature3.html',
+ guideImage: 'asset/path.gif',
+ },
+ [Feature.SAMPLER]: {
+ text: 'This setting allows for different denoising samplers to be used, as well as the number of denoising steps used, which will change the resulting output.',
+ href: 'link/to/docs/feature3.html',
+ guideImage: 'asset/path.gif',
+ },
+};
diff --git a/frontend/src/app/invokeai.d.ts b/frontend/src/app/invokeai.d.ts
index e32f9a0626..9e1a4b1e57 100644
--- a/frontend/src/app/invokeai.d.ts
+++ b/frontend/src/app/invokeai.d.ts
@@ -107,6 +107,7 @@ export declare type Metadata = SystemConfig & {
export declare type Image = {
uuid: string;
url: string;
+ mtime: number;
metadata: Metadata;
};
@@ -148,6 +149,7 @@ export declare type SystemConfigResponse = SystemConfig;
export declare type ImageResultResponse = {
url: string;
+ mtime: number;
metadata: Metadata;
};
@@ -157,7 +159,10 @@ export declare type ErrorResponse = {
};
export declare type GalleryImagesResponse = {
- images: Array<{ url: string; metadata: Metadata }>;
+ images: Array>;
+ nextPage: number;
+ offset: number;
+ onlyNewImages: boolean;
};
export declare type ImageUrlAndUuidResponse = {
diff --git a/frontend/src/app/socketio/actions.ts b/frontend/src/app/socketio/actions.ts
index 1bb2b0acac..0e9e408cc7 100644
--- a/frontend/src/app/socketio/actions.ts
+++ b/frontend/src/app/socketio/actions.ts
@@ -12,8 +12,11 @@ export const generateImage = createAction('socketio/generateImage');
export const runESRGAN = createAction('socketio/runESRGAN');
export const runGFPGAN = createAction('socketio/runGFPGAN');
export const deleteImage = createAction('socketio/deleteImage');
-export const requestAllImages = createAction(
- 'socketio/requestAllImages'
+export const requestImages = createAction(
+ 'socketio/requestImages'
+);
+export const requestNewImages = createAction(
+ 'socketio/requestNewImages'
);
export const cancelProcessing = createAction(
'socketio/cancelProcessing'
@@ -23,4 +26,6 @@ export const uploadInitialImage = createAction(
);
export const uploadMaskImage = createAction('socketio/uploadMaskImage');
-export const requestSystemConfig = createAction('socketio/requestSystemConfig');
+export const requestSystemConfig = createAction(
+ 'socketio/requestSystemConfig'
+);
diff --git a/frontend/src/app/socketio/emitters.ts b/frontend/src/app/socketio/emitters.ts
index 64b2d8e659..77b542c070 100644
--- a/frontend/src/app/socketio/emitters.ts
+++ b/frontend/src/app/socketio/emitters.ts
@@ -83,8 +83,17 @@ const makeSocketIOEmitters = (
const { url, uuid } = imageToDelete;
socketio.emit('deleteImage', url, uuid);
},
- emitRequestAllImages: () => {
- socketio.emit('requestAllImages');
+ emitRequestImages: () => {
+ const { nextPage, offset } = getState().gallery;
+ socketio.emit('requestImages', nextPage, offset);
+ },
+ emitRequestNewImages: () => {
+ const { nextPage, offset, images } = getState().gallery;
+ if (images.length > 0) {
+ socketio.emit('requestImages', nextPage, offset, images[0].mtime);
+ } else {
+ socketio.emit('requestImages', nextPage, offset);
+ }
},
emitCancelProcessing: () => {
socketio.emit('cancel');
@@ -96,8 +105,8 @@ const makeSocketIOEmitters = (
socketio.emit('uploadMaskImage', file, file.name);
},
emitRequestSystemConfig: () => {
- socketio.emit('requestSystemConfig')
- }
+ socketio.emit('requestSystemConfig');
+ },
};
};
diff --git a/frontend/src/app/socketio/listeners.ts b/frontend/src/app/socketio/listeners.ts
index c03317b9b9..6d5dbdb924 100644
--- a/frontend/src/app/socketio/listeners.ts
+++ b/frontend/src/app/socketio/listeners.ts
@@ -14,10 +14,10 @@ import {
} from '../../features/system/systemSlice';
import {
+ addGalleryImages,
addImage,
clearIntermediateImage,
removeImage,
- setGalleryImages,
setIntermediateImage,
} from '../../features/gallery/gallerySlice';
@@ -25,6 +25,7 @@ import {
setInitialImagePath,
setMaskPath,
} from '../../features/options/optionsSlice';
+import { requestNewImages } from './actions';
/**
* Returns an object containing listener callbacks for socketio events.
@@ -43,6 +44,7 @@ const makeSocketIOListeners = (
try {
dispatch(setIsConnected(true));
dispatch(setCurrentStatus('Connected'));
+ dispatch(requestNewImages());
} catch (e) {
console.error(e);
}
@@ -53,7 +55,6 @@ const makeSocketIOListeners = (
onDisconnect: () => {
try {
dispatch(setIsConnected(false));
- dispatch(setIsProcessing(false));
dispatch(setCurrentStatus('Disconnected'));
dispatch(
@@ -72,13 +73,14 @@ const makeSocketIOListeners = (
*/
onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
try {
- const { url, metadata } = data;
+ const { url, mtime, metadata } = data;
const newUuid = uuidv4();
dispatch(
addImage({
uuid: newUuid,
url,
+ mtime,
metadata: metadata,
})
);
@@ -99,11 +101,12 @@ const makeSocketIOListeners = (
onIntermediateResult: (data: InvokeAI.ImageResultResponse) => {
try {
const uuid = uuidv4();
- const { url, metadata } = data;
+ const { url, metadata, mtime } = data;
dispatch(
setIntermediateImage({
uuid,
url,
+ mtime,
metadata,
})
);
@@ -123,12 +126,13 @@ const makeSocketIOListeners = (
*/
onESRGANResult: (data: InvokeAI.ImageResultResponse) => {
try {
- const { url, metadata } = data;
+ const { url, metadata, mtime } = data;
dispatch(
addImage({
uuid: uuidv4(),
url,
+ mtime,
metadata,
})
);
@@ -149,12 +153,13 @@ const makeSocketIOListeners = (
*/
onGFPGANResult: (data: InvokeAI.ImageResultResponse) => {
try {
- const { url, metadata } = data;
+ const { url, metadata, mtime } = data;
dispatch(
addImage({
uuid: uuidv4(),
url,
+ mtime,
metadata,
})
);
@@ -209,16 +214,26 @@ const makeSocketIOListeners = (
* Callback to run when we receive a 'galleryImages' event.
*/
onGalleryImages: (data: InvokeAI.GalleryImagesResponse) => {
- const { images } = data;
+ const { images, nextPage, offset } = data;
+
+ /**
+ * the logic here ideally would be in the reducer but we have a side effect:
+ * generating a uuid. so the logic needs to be here, outside redux.
+ */
+
+ // Generate a UUID for each image
const preparedImages = images.map((image): InvokeAI.Image => {
- const { url, metadata } = image;
+ const { url, metadata, mtime } = image;
return {
uuid: uuidv4(),
url,
+ mtime,
metadata,
};
});
- dispatch(setGalleryImages(preparedImages));
+
+ dispatch(addGalleryImages({ images: preparedImages, nextPage, offset }));
+
dispatch(
addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'),
diff --git a/frontend/src/app/socketio/middleware.ts b/frontend/src/app/socketio/middleware.ts
index e358449b3b..a78e3fa10c 100644
--- a/frontend/src/app/socketio/middleware.ts
+++ b/frontend/src/app/socketio/middleware.ts
@@ -24,7 +24,9 @@ import * as InvokeAI from '../invokeai';
export const socketioMiddleware = () => {
const { hostname, port } = new URL(window.location.href);
- const socketio = io(`http://${hostname}:9090`);
+ const socketio = io(`http://${hostname}:9090`, {
+ timeout: 60000,
+ });
let areListenersSet = false;
@@ -51,7 +53,8 @@ export const socketioMiddleware = () => {
emitRunESRGAN,
emitRunGFPGAN,
emitDeleteImage,
- emitRequestAllImages,
+ emitRequestImages,
+ emitRequestNewImages,
emitCancelProcessing,
emitUploadInitialImage,
emitUploadMaskImage,
@@ -140,11 +143,17 @@ export const socketioMiddleware = () => {
break;
}
- case 'socketio/requestAllImages': {
- emitRequestAllImages();
+ case 'socketio/requestImages': {
+ emitRequestImages();
break;
}
+ case 'socketio/requestNewImages': {
+ emitRequestNewImages();
+ break;
+ }
+
+
case 'socketio/cancelProcessing': {
emitCancelProcessing();
break;
diff --git a/frontend/src/common/components/GuideIcon.tsx b/frontend/src/common/components/GuideIcon.tsx
new file mode 100644
index 0000000000..2f4312ae76
--- /dev/null
+++ b/frontend/src/common/components/GuideIcon.tsx
@@ -0,0 +1,22 @@
+import { Box, forwardRef, Icon } from '@chakra-ui/react';
+import { IconType } from 'react-icons';
+import { MdHelp } from 'react-icons/md';
+import { Feature } from '../../app/features';
+import GuidePopover from './GuidePopover';
+
+type GuideIconProps = {
+ feature: Feature;
+ icon?: IconType;
+};
+
+const GuideIcon = forwardRef(
+ ({ feature, icon = MdHelp }: GuideIconProps, ref) => (
+
+
+
+
+
+ )
+);
+
+export default GuideIcon;
diff --git a/frontend/src/common/components/GuidePopover.tsx b/frontend/src/common/components/GuidePopover.tsx
new file mode 100644
index 0000000000..48a2f8d48f
--- /dev/null
+++ b/frontend/src/common/components/GuidePopover.tsx
@@ -0,0 +1,51 @@
+import {
+ Popover,
+ PopoverArrow,
+ PopoverContent,
+ PopoverTrigger,
+ PopoverHeader,
+ Flex,
+ Box,
+} from '@chakra-ui/react';
+import { SystemState } from '../../features/system/systemSlice';
+import { useAppSelector } from '../../app/store';
+import { RootState } from '../../app/store';
+import { createSelector } from '@reduxjs/toolkit';
+import { ReactElement } from 'react';
+import { Feature, FEATURES } from '../../app/features';
+
+type GuideProps = {
+ children: ReactElement;
+ feature: Feature;
+};
+
+const systemSelector = createSelector(
+ (state: RootState) => state.system,
+ (system: SystemState) => system.shouldDisplayGuides
+);
+
+const GuidePopover = ({ children, feature }: GuideProps) => {
+ const shouldDisplayGuides = useAppSelector(systemSelector);
+ const { text } = FEATURES[feature];
+ return shouldDisplayGuides ? (
+
+
+ {children}
+
+ e.preventDefault()}
+ cursor={'initial'}
+ >
+
+
+ {text}
+
+
+
+ ) : (
+ <>>
+ );
+};
+
+export default GuidePopover;
diff --git a/frontend/src/features/gallery/ImageGallery.tsx b/frontend/src/features/gallery/ImageGallery.tsx
index cdc45aedc0..c81e5b4167 100644
--- a/frontend/src/features/gallery/ImageGallery.tsx
+++ b/frontend/src/features/gallery/ImageGallery.tsx
@@ -1,5 +1,6 @@
-import { Center, Flex, Text } from '@chakra-ui/react';
-import { RootState } from '../../app/store';
+import { Button, Center, Flex, Text } from '@chakra-ui/react';
+import { requestImages } from '../../app/socketio/actions';
+import { RootState, useAppDispatch } from '../../app/store';
import { useAppSelector } from '../../app/store';
import HoverableImage from './HoverableImage';
@@ -10,7 +11,7 @@ const ImageGallery = () => {
const { images, currentImageUuid } = useAppSelector(
(state: RootState) => state.gallery
);
-
+ const dispatch = useAppDispatch();
/**
* I don't like that this needs to rerender whenever the current image is changed.
* What if we have a large number of images? I suppose pagination (planned) will
@@ -19,15 +20,22 @@ const ImageGallery = () => {
* TODO: Refactor if performance complaints, or after migrating to new API which supports pagination.
*/
+ const handleClickLoadMore = () => {
+ dispatch(requestImages());
+ };
+
return images.length ? (
-
- {[...images].reverse().map((image) => {
- const { uuid } = image;
- const isSelected = currentImageUuid === uuid;
- return (
-
- );
- })}
+
+
+ {images.map((image) => {
+ const { uuid } = image;
+ const isSelected = currentImageUuid === uuid;
+ return (
+
+ );
+ })}
+
+
) : (
diff --git a/frontend/src/features/gallery/gallerySlice.ts b/frontend/src/features/gallery/gallerySlice.ts
index 8870d90466..ba20a9780f 100644
--- a/frontend/src/features/gallery/gallerySlice.ts
+++ b/frontend/src/features/gallery/gallerySlice.ts
@@ -8,11 +8,15 @@ export interface GalleryState {
currentImageUuid: string;
images: Array;
intermediateImage?: InvokeAI.Image;
+ nextPage: number;
+ offset: number;
}
const initialState: GalleryState = {
currentImageUuid: '',
images: [],
+ nextPage: 1,
+ offset: 0,
};
export const gallerySlice = createSlice({
@@ -50,7 +54,7 @@ export const gallerySlice = createSlice({
* Clamp the new index to ensure it is valid..
*/
const newCurrentImageIndex = clamp(
- imageToDeleteIndex - 1,
+ imageToDeleteIndex,
0,
newImages.length - 1
);
@@ -67,10 +71,11 @@ export const gallerySlice = createSlice({
state.images = newImages;
},
addImage: (state, action: PayloadAction) => {
- state.images.push(action.payload);
+ state.images.unshift(action.payload);
state.currentImageUuid = action.payload.uuid;
state.intermediateImage = undefined;
state.currentImage = action.payload;
+ state.offset += 1
},
setIntermediateImage: (state, action: PayloadAction) => {
state.intermediateImage = action.payload;
@@ -78,13 +83,24 @@ export const gallerySlice = createSlice({
clearIntermediateImage: (state) => {
state.intermediateImage = undefined;
},
- setGalleryImages: (state, action: PayloadAction>) => {
- const newImages = action.payload;
- if (newImages.length) {
- const newCurrentImage = newImages[newImages.length - 1];
- state.images = newImages;
+ addGalleryImages: (
+ state,
+ action: PayloadAction<{
+ images: Array;
+ nextPage: number;
+ offset: number;
+ }>
+ ) => {
+ const { images, nextPage, offset } = action.payload;
+ if (images.length) {
+ const newCurrentImage = images[0];
+ state.images = state.images
+ .concat(images)
+ .sort((a, b) => b.mtime - a.mtime);
state.currentImage = newCurrentImage;
state.currentImageUuid = newCurrentImage.uuid;
+ state.nextPage = nextPage;
+ state.offset = offset;
}
},
},
@@ -95,7 +111,7 @@ export const {
clearIntermediateImage,
removeImage,
setCurrentImage,
- setGalleryImages,
+ addGalleryImages,
setIntermediateImage,
} = gallerySlice.actions;
diff --git a/frontend/src/features/options/OptionsAccordion.tsx b/frontend/src/features/options/OptionsAccordion.tsx
index ab60e98228..2568717090 100644
--- a/frontend/src/features/options/OptionsAccordion.tsx
+++ b/frontend/src/features/options/OptionsAccordion.tsx
@@ -31,6 +31,9 @@ import OutputOptions from './OutputOptions';
import ImageToImageOptions from './ImageToImageOptions';
import { ChangeEvent } from 'react';
+import GuideIcon from '../../common/components/GuideIcon';
+import { Feature } from '../../app/features';
+
const optionsSelector = createSelector(
(state: RootState) => state.options,
(options: OptionsState) => {
@@ -108,6 +111,7 @@ const OptionsAccordion = () => {
Seed & Variation
+
@@ -121,6 +125,7 @@ const OptionsAccordion = () => {
Sampler
+
@@ -144,6 +149,7 @@ const OptionsAccordion = () => {
onChange={handleChangeShouldRunESRGAN}
/>
+
@@ -160,13 +166,14 @@ const OptionsAccordion = () => {
width={'100%'}
mr={2}
>
- Fix Faces (GFPGAN)
+ Face Correction
+
@@ -190,6 +197,7 @@ const OptionsAccordion = () => {
onChange={handleChangeShouldUseInitImage}
/>
+
@@ -203,6 +211,7 @@ const OptionsAccordion = () => {
Output
+
diff --git a/frontend/src/features/system/SettingsModal.tsx b/frontend/src/features/system/SettingsModal.tsx
index 2f636dc44e..8c420f6871 100644
--- a/frontend/src/features/system/SettingsModal.tsx
+++ b/frontend/src/features/system/SettingsModal.tsx
@@ -20,6 +20,7 @@ import { useAppDispatch, useAppSelector } from '../../app/store';
import {
setShouldConfirmOnDelete,
setShouldDisplayInProgress,
+ setShouldDisplayGuides,
SystemState,
} from './systemSlice';
import { RootState } from '../../app/store';
@@ -31,8 +32,16 @@ import { cloneElement, ReactElement } from 'react';
const systemSelector = createSelector(
(state: RootState) => state.system,
(system: SystemState) => {
- const { shouldDisplayInProgress, shouldConfirmOnDelete } = system;
- return { shouldDisplayInProgress, shouldConfirmOnDelete };
+ const {
+ shouldDisplayInProgress,
+ shouldConfirmOnDelete,
+ shouldDisplayGuides,
+ } = system;
+ return {
+ shouldDisplayInProgress,
+ shouldConfirmOnDelete,
+ shouldDisplayGuides,
+ };
},
{
memoizeOptions: { resultEqualityCheck: isEqual },
@@ -63,8 +72,11 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
onClose: onRefreshModalClose,
} = useDisclosure();
- const { shouldDisplayInProgress, shouldConfirmOnDelete } =
- useAppSelector(systemSelector);
+ const {
+ shouldDisplayInProgress,
+ shouldConfirmOnDelete,
+ shouldDisplayGuides,
+ } = useAppSelector(systemSelector);
const dispatch = useAppDispatch();
@@ -116,6 +128,19 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
/>
+
+
+
+ Display help guides in configuration menus
+
+
+ dispatch(setShouldDisplayGuides(e.target.checked))
+ }
+ />
+
+
Reset Web UI
diff --git a/frontend/src/features/system/systemSlice.ts b/frontend/src/features/system/systemSlice.ts
index f3d8295b05..d041a947f2 100644
--- a/frontend/src/features/system/systemSlice.ts
+++ b/frontend/src/features/system/systemSlice.ts
@@ -1,7 +1,7 @@
import { createSlice } from '@reduxjs/toolkit';
import type { PayloadAction } from '@reduxjs/toolkit';
import { ExpandedIndex } from '@chakra-ui/react';
-import * as InvokeAI from '../../app/invokeai'
+import * as InvokeAI from '../../app/invokeai';
export type LogLevel = 'info' | 'warning' | 'error';
@@ -15,7 +15,9 @@ export interface Log {
[index: number]: LogEntry;
}
-export interface SystemState extends InvokeAI.SystemStatus, InvokeAI.SystemConfig {
+export interface SystemState
+ extends InvokeAI.SystemStatus,
+ InvokeAI.SystemConfig {
shouldDisplayInProgress: boolean;
log: Array;
shouldShowLogViewer: boolean;
@@ -31,6 +33,7 @@ export interface SystemState extends InvokeAI.SystemStatus, InvokeAI.SystemConfi
totalIterations: number;
currentStatus: string;
currentStatusHasSteps: boolean;
+ shouldDisplayGuides: boolean;
}
const initialSystemState = {
@@ -39,6 +42,7 @@ const initialSystemState = {
log: [],
shouldShowLogViewer: false,
shouldDisplayInProgress: false,
+ shouldDisplayGuides: true,
isGFPGANAvailable: true,
isESRGANAvailable: true,
socketId: '',
@@ -48,7 +52,7 @@ const initialSystemState = {
totalSteps: 0,
currentIteration: 0,
totalIterations: 0,
- currentStatus: '',
+ currentStatus: 'Disconnected',
currentStatusHasSteps: false,
model: '',
model_id: '',
@@ -104,6 +108,12 @@ export const systemSlice = createSlice({
},
setIsConnected: (state, action: PayloadAction) => {
state.isConnected = action.payload;
+ state.isProcessing = false;
+ state.currentStep = 0;
+ state.totalSteps = 0;
+ state.currentIteration = 0;
+ state.totalIterations = 0;
+ state.currentStatusHasSteps = false;
},
setSocketId: (state, action: PayloadAction) => {
state.socketId = action.payload;
@@ -117,6 +127,9 @@ export const systemSlice = createSlice({
setSystemConfig: (state, action: PayloadAction) => {
return { ...state, ...action.payload };
},
+ setShouldDisplayGuides: (state, action: PayloadAction) => {
+ state.shouldDisplayGuides = action.payload;
+ },
},
});
@@ -132,6 +145,7 @@ export const {
setSystemStatus,
setCurrentStatus,
setSystemConfig,
+ setShouldDisplayGuides,
} = systemSlice.actions;
export default systemSlice.reducer;
diff --git a/ldm/dream/args.py b/ldm/dream/args.py
index 2cb42d82a7..62ad9ccf01 100644
--- a/ldm/dream/args.py
+++ b/ldm/dream/args.py
@@ -339,6 +339,12 @@ class Args(object):
action='store_true',
help='Deprecated way to set --precision=float32',
)
+ model_group.add_argument(
+ '--free_gpu_mem',
+ dest='free_gpu_mem',
+ action='store_true',
+ help='Force free gpu memory before final decoding',
+ )
model_group.add_argument(
'--precision',
dest='precision',
@@ -400,7 +406,7 @@ class Args(object):
postprocessing_group.add_argument(
'--gfpgan_model_path',
type=str,
- default='experiments/pretrained_models/GFPGANv1.3.pth',
+ default='experiments/pretrained_models/GFPGANv1.4.pth',
help='Indicates the path to the GFPGAN model, relative to --gfpgan_dir.',
)
postprocessing_group.add_argument(
@@ -588,7 +594,7 @@ class Args(object):
'--upscale',
nargs='+',
type=float,
- help='Scale factor (2, 4) for upscaling final output followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75',
+ help='Scale factor (1, 2, 3, 4, etc..) for upscaling final output followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75',
default=None,
)
postprocessing_group.add_argument(
diff --git a/ldm/dream/conditioning.py b/ldm/dream/conditioning.py
index ed2d4ef431..fedd965a2c 100644
--- a/ldm/dream/conditioning.py
+++ b/ldm/dream/conditioning.py
@@ -79,10 +79,10 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
if weight_sum == 0:
print(
"Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
- equal_weight = 1 / len(parsed_prompts)
+ equal_weight = 1 / max(len(parsed_prompts), 1)
return [(x[0], equal_weight) for x in parsed_prompts]
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]
-
+
# shows how the prompt is tokenized
# usually tokens have '' to indicate end-of-word,
# but for readability it has been replaced with ' '
diff --git a/ldm/dream/generator/embiggen.py b/ldm/dream/generator/embiggen.py
index 88ac9373fb..c85bae868c 100644
--- a/ldm/dream/generator/embiggen.py
+++ b/ldm/dream/generator/embiggen.py
@@ -4,18 +4,42 @@ and generates with ldm.dream.generator.img2img
'''
import torch
-import numpy as np
-from PIL import Image
-from ldm.dream.generator.base import Generator
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.dream.generator.img2img import Img2Img
-
+import numpy as np
+from tqdm import trange
+from PIL import Image
+from ldm.dream.generator.base import Generator
+from ldm.dream.generator.img2img import Img2Img
+from ldm.dream.devices import choose_autocast
class Embiggen(Generator):
def __init__(self, model, precision):
super().__init__(model, precision)
self.init_latent = None
+ # Replace generate because Embiggen doesn't need/use most of what it does normallly
+ def generate(self,prompt,iterations=1,seed=None,
+ image_callback=None, step_callback=None,
+ **kwargs):
+ scope = choose_autocast(self.precision)
+ make_image = self.get_make_image(
+ prompt,
+ step_callback = step_callback,
+ **kwargs
+ )
+ results = []
+ seed = seed if seed else self.new_seed()
+
+ # Noise will be generated by the Img2Img generator when called
+ with scope(self.model.device.type), self.model.ema_scope():
+ for n in trange(iterations, desc='Generating'):
+ # make_image will call Img2Img which will do the equivalent of get_noise itself
+ image = make_image()
+ results.append([image, seed])
+ if image_callback is not None:
+ image_callback(image, seed)
+ seed = self.new_seed()
+ return results
+
@torch.no_grad()
def get_make_image(
self,
@@ -151,8 +175,19 @@ class Embiggen(Generator):
# Clamp values to max 255
if distanceToLR > 255:
distanceToLR = 255
- # Place the pixel as invert of distance
- agradientC.putpixel((x, y), int(255 - distanceToLR))
+ #Place the pixel as invert of distance
+ agradientC.putpixel((x, y), round(255 - distanceToLR))
+
+ # Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges
+ # Fits for a left-fading gradient on the bottom side and full opacity on the right side.
+ agradientAsymC = Image.new('L', (256, 256))
+ for y in range(256):
+ for x in range(256):
+ value = round(max(0, x-(255-y)) * (255 / max(1,y)))
+ #Clamp values
+ value = max(0, value)
+ value = min(255, value)
+ agradientAsymC.putpixel((x, y), value)
# Create alpha layers default fully white
alphaLayerL = Image.new("L", (width, height), 255)
@@ -163,8 +198,13 @@ class Embiggen(Generator):
alphaLayerT.paste(agradientT, (0, 0))
alphaLayerLTC.paste(agradientL, (0, 0))
alphaLayerLTC.paste(agradientT, (0, 0))
- alphaLayerLTC.paste(agradientC.resize(
- (overlap_size_x, overlap_size_y)), (0, 0))
+ alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0))
+ # make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile
+ # to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space
+ alphaLayerTaC = alphaLayerT.copy()
+ alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
+ alphaLayerLTaC = alphaLayerLTC.copy()
+ alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
if embiggen_tiles:
# Individual unconnected sides
@@ -242,7 +282,7 @@ class Embiggen(Generator):
del agradientT
del agradientC
- def make_image(x_T):
+ def make_image():
# Make main tiles -------------------------------------------------
if embiggen_tiles:
print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...')
@@ -251,7 +291,20 @@ class Embiggen(Generator):
f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...')
emb_tile_store = []
+ # Although we could use the same seed for every tile for determinism, at higher strengths this may
+ # produce duplicated structures for each tile and make the tiling effect more obvious
+ # instead track and iterate a local seed we pass to Img2Img
+ seed = self.seed
+ seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy
+
for tile in range(emb_tiles_x * emb_tiles_y):
+ # Don't iterate on first tile
+ if tile != 0:
+ if seed < seedintlimit:
+ seed += 1
+ else:
+ seed = 0
+
# Determine if this is a re-run and replace
if embiggen_tiles and not tile in embiggen_tiles:
continue
@@ -294,21 +347,20 @@ class Embiggen(Generator):
tile_results = gen_img2img.generate(
prompt,
- iterations=1,
- seed=self.seed,
- sampler=sampler,
- steps=steps,
- cfg_scale=cfg_scale,
- conditioning=conditioning,
- ddim_eta=ddim_eta,
- image_callback=None, # called only after the final image is generated
- step_callback=step_callback, # called after each intermediate image is generated
- width=width,
- height=height,
- init_img=init_img, # img2img doesn't need this, but it might in the future
- init_image=newinitimage, # notice that init_image is different from init_img
- mask_image=None,
- strength=strength,
+ iterations = 1,
+ seed = seed,
+ sampler = sampler,
+ steps = steps,
+ cfg_scale = cfg_scale,
+ conditioning = conditioning,
+ ddim_eta = ddim_eta,
+ image_callback = None, # called only after the final image is generated
+ step_callback = step_callback, # called after each intermediate image is generated
+ width = width,
+ height = height,
+ init_image = newinitimage, # notice that init_image is different from init_img
+ mask_image = None,
+ strength = strength,
)
emb_tile_store.append(tile_results[0][0])
@@ -381,24 +433,24 @@ class Embiggen(Generator):
# bottom of image
elif emb_row_i == emb_tiles_y - 1:
if emb_column_i == 0:
- if (tile+1) in embiggen_tiles: # Look-ahead right
- intileimage.putalpha(alphaLayerT)
+ if (tile+1) in embiggen_tiles: # Look-ahead right
+ intileimage.putalpha(alphaLayerTaC)
else:
intileimage.putalpha(alphaLayerRTC)
elif emb_column_i == emb_tiles_x - 1:
# No tiles to look ahead to
intileimage.putalpha(alphaLayerLTC)
else:
- if (tile+1) in embiggen_tiles: # Look-ahead right
- intileimage.putalpha(alphaLayerLTC)
+ if (tile+1) in embiggen_tiles: # Look-ahead right
+ intileimage.putalpha(alphaLayerLTaC)
else:
intileimage.putalpha(alphaLayerABB)
# vertical middle of image
else:
if emb_column_i == 0:
- if (tile+1) in embiggen_tiles: # Look-ahead right
- if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
- intileimage.putalpha(alphaLayerT)
+ if (tile+1) in embiggen_tiles: # Look-ahead right
+ if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
+ intileimage.putalpha(alphaLayerTaC)
else:
intileimage.putalpha(alphaLayerTB)
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
@@ -411,9 +463,9 @@ class Embiggen(Generator):
else:
intileimage.putalpha(alphaLayerABR)
else:
- if (tile+1) in embiggen_tiles: # Look-ahead right
- if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
- intileimage.putalpha(alphaLayerLTC)
+ if (tile+1) in embiggen_tiles: # Look-ahead right
+ if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
+ intileimage.putalpha(alphaLayerLTaC)
else:
intileimage.putalpha(alphaLayerABR)
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
@@ -425,9 +477,15 @@ class Embiggen(Generator):
if emb_row_i == 0 and emb_column_i >= 1:
intileimage.putalpha(alphaLayerL)
elif emb_row_i >= 1 and emb_column_i == 0:
- intileimage.putalpha(alphaLayerT)
+ if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
+ intileimage.putalpha(alphaLayerT)
+ else:
+ intileimage.putalpha(alphaLayerTaC)
else:
- intileimage.putalpha(alphaLayerLTC)
+ if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
+ intileimage.putalpha(alphaLayerLTC)
+ else:
+ intileimage.putalpha(alphaLayerLTaC)
# Layer tile onto final image
outputsuperimage.alpha_composite(intileimage, (left, top))
else:
diff --git a/ldm/dream/generator/inpaint.py b/ldm/dream/generator/inpaint.py
index 248be93bdf..da5411ad64 100644
--- a/ldm/dream/generator/inpaint.py
+++ b/ldm/dream/generator/inpaint.py
@@ -34,9 +34,9 @@ class Inpaint(Img2Img):
)
sampler = DDIMSampler(self.model, device=self.model.device)
- sampler.make_schedule(
- ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
- )
+ sampler.make_schedule(
+ ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
+ )
scope = choose_autocast(self.precision)
with scope(self.model.device.type):
diff --git a/ldm/dream/generator/txt2img.py b/ldm/dream/generator/txt2img.py
index 0c77705a1c..1ab15ba7cd 100644
--- a/ldm/dream/generator/txt2img.py
+++ b/ldm/dream/generator/txt2img.py
@@ -27,6 +27,10 @@ class Txt2Img(Generator):
height // self.downsampling_factor,
width // self.downsampling_factor,
]
+
+ if self.free_gpu_mem and self.model.model.device != self.model.device:
+ self.model.model.to(self.model.device)
+
samples, _ = sampler.sample(
batch_size = 1,
S = steps,
@@ -39,6 +43,10 @@ class Txt2Img(Generator):
eta = ddim_eta,
img_callback = step_callback
)
+
+ if self.free_gpu_mem:
+ self.model.model.to("cpu")
+
return self.sample_to_image(samples)
return make_image
diff --git a/ldm/dream/restoration/base.py b/ldm/dream/restoration/base.py
index 9037bc40cb..2605c4ac4b 100644
--- a/ldm/dream/restoration/base.py
+++ b/ldm/dream/restoration/base.py
@@ -1,34 +1,38 @@
class Restoration():
- def __init__(self, gfpgan_dir='./src/gfpgan', gfpgan_model_path='experiments/pretrained_models/GFPGANv1.3.pth', esrgan_bg_tile=400) -> None:
- self.gfpgan_dir = gfpgan_dir
- self.gfpgan_model_path = gfpgan_model_path
- self.esrgan_bg_tile = esrgan_bg_tile
+ def __init__(self) -> None:
+ pass
- def load_face_restore_models(self):
+ def load_face_restore_models(self, gfpgan_dir='./src/gfpgan', gfpgan_model_path='experiments/pretrained_models/GFPGANv1.4.pth'):
# Load GFPGAN
- gfpgan = self.load_gfpgan()
+ gfpgan = self.load_gfpgan(gfpgan_dir, gfpgan_model_path)
if gfpgan.gfpgan_model_exists:
print('>> GFPGAN Initialized')
+ else:
+ print('>> GFPGAN Disabled')
+ gfpgan = None
# Load CodeFormer
codeformer = self.load_codeformer()
if codeformer.codeformer_model_exists:
print('>> CodeFormer Initialized')
+ else:
+ print('>> CodeFormer Disabled')
+ codeformer = None
return gfpgan, codeformer
# Face Restore Models
- def load_gfpgan(self):
+ def load_gfpgan(self, gfpgan_dir, gfpgan_model_path):
from ldm.dream.restoration.gfpgan import GFPGAN
- return GFPGAN(self.gfpgan_dir, self.gfpgan_model_path)
+ return GFPGAN(gfpgan_dir, gfpgan_model_path)
def load_codeformer(self):
from ldm.dream.restoration.codeformer import CodeFormerRestoration
return CodeFormerRestoration()
# Upscale Models
- def load_esrgan(self):
+ def load_esrgan(self, esrgan_bg_tile=400):
from ldm.dream.restoration.realesrgan import ESRGAN
- esrgan = ESRGAN(self.esrgan_bg_tile)
+ esrgan = ESRGAN(esrgan_bg_tile)
print('>> ESRGAN Initialized')
return esrgan;
diff --git a/ldm/dream/restoration/gfpgan.py b/ldm/dream/restoration/gfpgan.py
index 643d1e9559..473d708961 100644
--- a/ldm/dream/restoration/gfpgan.py
+++ b/ldm/dream/restoration/gfpgan.py
@@ -11,14 +11,14 @@ class GFPGAN():
def __init__(
self,
gfpgan_dir='src/gfpgan',
- gfpgan_model_path='experiments/pretrained_models/GFPGANv1.3.pth') -> None:
+ gfpgan_model_path='experiments/pretrained_models/GFPGANv1.4.pth') -> None:
self.model_path = os.path.join(gfpgan_dir, gfpgan_model_path)
self.gfpgan_model_exists = os.path.isfile(self.model_path)
if not self.gfpgan_model_exists:
- raise Exception(
- 'GFPGAN model not found at path ' + self.model_path)
+ print('## NOT FOUND: GFPGAN model not found at ' + self.model_path)
+ return None
sys.path.append(os.path.abspath(gfpgan_dir))
def model_exists(self):
@@ -50,7 +50,7 @@ class GFPGAN():
f'>> WARNING: GFPGAN not initialized.'
)
print(
- f'>> Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth to {self.model_path}, \nor change GFPGAN directory with --gfpgan_dir.'
+ f'>> Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth to {self.model_path}, \nor change GFPGAN directory with --gfpgan_dir.'
)
image = image.convert('RGB')
diff --git a/ldm/dream/restoration/realesrgan.py b/ldm/dream/restoration/realesrgan.py
index 9823a2cbf4..dc3eebd912 100644
--- a/ldm/dream/restoration/realesrgan.py
+++ b/ldm/dream/restoration/realesrgan.py
@@ -14,73 +14,53 @@ class ESRGAN():
else:
use_half_precision = True
- def load_esrgan_bg_upsampler(self, upsampler_scale):
+ def load_esrgan_bg_upsampler(self):
if not torch.cuda.is_available(): # CPU or MPS on M1
use_half_precision = False
else:
use_half_precision = True
- model_path = {
- 2: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
- 4: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth',
- }
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact
+ from realesrgan import RealESRGANer
- if upsampler_scale not in model_path:
- return None
- else:
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from realesrgan import RealESRGANer
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
+ model_path = 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
+ scale = 4
- if upsampler_scale == 4:
- model = RRDBNet(
- num_in_ch=3,
- num_out_ch=3,
- num_feat=64,
- num_block=23,
- num_grow_ch=32,
- scale=4,
- )
- if upsampler_scale == 2:
- model = RRDBNet(
- num_in_ch=3,
- num_out_ch=3,
- num_feat=64,
- num_block=23,
- num_grow_ch=32,
- scale=2,
- )
-
- bg_upsampler = RealESRGANer(
- scale=upsampler_scale,
- model_path=model_path[upsampler_scale],
- model=model,
- tile=self.bg_tile_size,
- tile_pad=10,
- pre_pad=0,
- half=use_half_precision,
- )
+ bg_upsampler = RealESRGANer(
+ scale=scale,
+ model_path=model_path,
+ model=model,
+ tile=self.bg_tile_size,
+ tile_pad=10,
+ pre_pad=0,
+ half=use_half_precision,
+ )
return bg_upsampler
def process(self, image, strength: float, seed: str = None, upsampler_scale: int = 2):
- if seed is not None:
- print(
- f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
- )
-
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=UserWarning)
try:
- upsampler = self.load_esrgan_bg_upsampler(upsampler_scale)
+ upsampler = self.load_esrgan_bg_upsampler()
except Exception:
import traceback
import sys
-
print('>> Error loading Real-ESRGAN:', file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
+ if upsampler_scale == 0:
+ print('>> Real-ESRGAN: Invalid scaling option. Image not upscaled.')
+ return image
+
+ if seed is not None:
+ print(
+ f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
+ )
+
output, _ = upsampler.enhance(
np.array(image, dtype=np.uint8),
outscale=upsampler_scale,
diff --git a/ldm/dream/server.py b/ldm/dream/server.py
index 03114ac9d2..bff1117fd3 100644
--- a/ldm/dream/server.py
+++ b/ldm/dream/server.py
@@ -161,7 +161,7 @@ class DreamServer(BaseHTTPRequestHandler):
# is complete. The upscaling replaces the original file, so the second
# entry should not be inserted into the image list.
# LS: This repeats code in dream.py
- def image_done(image, seed, upscaled=False):
+ def image_done(image, seed, upscaled=False, first_seed=None):
name = f'{prefix}.{seed}.png'
iter_opt = copy.copy(opt)
if opt.variation_amount > 0:
diff --git a/ldm/generate.py b/ldm/generate.py
index 7f1953a80e..8456014ec2 100644
--- a/ldm/generate.py
+++ b/ldm/generate.py
@@ -497,11 +497,8 @@ class Generate:
prompt = None
try:
args = metadata_from_png(image_path)
- if len(args) > 1:
- print("* Can't postprocess a grid")
- return
- seed = args[0].seed
- prompt = args[0].prompt
+ seed = args.seed
+ prompt = args.prompt
print(f'>> retrieved seed {seed} and prompt "{prompt}" from {image_path}')
except:
m = re.search('(\d+)\.png$',image_path)
@@ -591,8 +588,8 @@ class Generate:
def _make_images(
self,
- img_path,
- mask_path,
+ img,
+ mask,
width,
height,
fit=False,
@@ -600,11 +597,11 @@ class Generate:
):
init_image = None
init_mask = None
- if not img_path:
+ if not img:
return None, None
image = self._load_img(
- img_path,
+ img,
width,
height,
fit=fit
@@ -614,7 +611,7 @@ class Generate:
init_image = self._create_init_image(image) # this returns a torch tensor
# if image has a transparent area and no mask was provided, then try to generate mask
- if self._has_transparency(image) and not mask_path:
+ if self._has_transparency(image) and not mask:
print(
'>> Initial image has transparent areas. Will inpaint in these regions.')
if self._check_for_erasure(image):
@@ -626,13 +623,19 @@ class Generate:
# this returns a torch tensor
init_mask = self._create_init_mask(image)
- if mask_path:
+ if mask:
mask_image = self._load_img(
- mask_path, width, height, fit=fit) # this returns an Image
+ mask, width, height, fit=fit) # this returns an Image
init_mask = self._create_init_mask(mask_image)
return init_image, init_mask
+ def _make_base(self):
+ if not self.generators.get('base'):
+ from ldm.dream.generator import Generator
+ self.generators['base'] = Generator(self.model, self.precision)
+ return self.generators['base']
+
def _make_img2img(self):
if not self.generators.get('img2img'):
from ldm.dream.generator.img2img import Img2Img
@@ -649,6 +652,7 @@ class Generate:
if not self.generators.get('txt2img'):
from ldm.dream.generator.txt2img import Txt2Img
self.generators['txt2img'] = Txt2Img(self.model, self.precision)
+ self.generators['txt2img'].free_gpu_mem = self.free_gpu_mem
return self.generators['txt2img']
def _make_inpaint(self):
@@ -717,6 +721,21 @@ class Generate:
for r in image_list:
image, seed = r
try:
+ if strength > 0:
+ if self.gfpgan is not None or self.codeformer is not None:
+ if facetool == 'gfpgan':
+ if self.gfpgan is None:
+ print('>> GFPGAN not found. Face restoration is disabled.')
+ else:
+ image = self.gfpgan.process(image, strength, seed)
+ if facetool == 'codeformer':
+ if self.codeformer is None:
+ print('>> CodeFormer not found. Face restoration is disabled.')
+ else:
+ cf_device = 'cpu' if str(self.device) == 'mps' else self.device
+ image = self.codeformer.process(image=image, strength=strength, device=cf_device, seed=seed, fidelity=codeformer_fidelity)
+ else:
+ print(">> Face Restoration is disabled.")
if upscale is not None:
if self.esrgan is not None:
if len(upscale) < 2:
@@ -725,14 +744,6 @@ class Generate:
image, upscale[1], seed, int(upscale[0]))
else:
print(">> ESRGAN is disabled. Image not upscaled.")
- if strength > 0:
- if self.gfpgan is not None and self.codeformer is not None:
- if facetool == 'codeformer':
- image = self.codeformer.process(image=image, strength=strength, device=self.device, seed=seed, fidelity=codeformer_fidelity)
- else:
- image = self.gfpgan.process(image, strength, seed)
- else:
- print(">> Face Restoration is disabled.")
except Exception as e:
print(
f'>> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}'
@@ -745,13 +756,7 @@ class Generate:
# to help WebGUI - front end to generator util function
def sample_to_image(self, samples):
- return self._sample_to_image(samples)
-
- def _sample_to_image(self, samples):
- if not self.base_generator:
- from ldm.dream.generator import Generator
- self.base_generator = Generator(self.model)
- return self.base_generator.sample_to_image(samples)
+ return self._make_base().sample_to_image(samples)
def _set_sampler(self):
msg = f'>> Setting Sampler to {self.sampler_name}'
@@ -828,15 +833,24 @@ class Generate:
return model
- def _load_img(self, path, width, height, fit=False):
- assert os.path.exists(path), f'>> {path}: File not found'
+ def _load_img(self, img, width, height, fit=False):
+ if isinstance(img, Image.Image):
+ image = img
+ print(
+ f'>> using provided input image of size {image.width}x{image.height}'
+ )
+ elif isinstance(img, str):
+ assert os.path.exists(img), f'>> {img}: File not found'
- # with Image.open(path) as img:
- # image = img.convert('RGBA')
- image = Image.open(path)
- print(
- f'>> loaded input image of size {image.width}x{image.height} from {path}'
- )
+ image = Image.open(img)
+ print(
+ f'>> loaded input image of size {image.width}x{image.height} from {img}'
+ )
+ else:
+ image = Image.open(img)
+ print(
+ f'>> loaded input image of size {image.width}x{image.height}'
+ )
if fit:
image = self._fit_image(image, (width, height))
else:
@@ -922,7 +936,7 @@ class Generate:
# BUG: We need to use the model's downsample factor rather than hardcoding "8"
from ldm.dream.generator.base import downsampling
image = image.resize((image.width//downsampling, image.height //
- downsampling), resample=Image.Resampling.LANCZOS)
+ downsampling), resample=Image.Resampling.NEAREST)
# print(
# f'>> DEBUG: writing the mask to mask.png'
# )
diff --git a/notebooks/Stable-Diffusion-local-Windows.ipynb b/notebooks/Stable-Diffusion-local-Windows.ipynb
index 45495d6d40..4a2ede6788 100644
--- a/notebooks/Stable-Diffusion-local-Windows.ipynb
+++ b/notebooks/Stable-Diffusion-local-Windows.ipynb
@@ -13,7 +13,7 @@
"source": [
"Note that you will need NVIDIA drivers, Python 3.10, and Git installed\n",
"beforehand - simplified\n",
- "[step-by-step instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install)\n",
+ "[step-by-step instructions](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)\n",
"are available in the wiki (you'll only need steps 1, 2, & 3 )"
]
},
@@ -40,8 +40,8 @@
"outputs": [],
"source": [
"%%cmd\n",
- "git clone https://github.com/lstein/stable-diffusion.git\n",
- "cd /content/stable-diffusion/\n",
+ "git clone https://github.com/invoke-ai/InvokeAI.git\n",
+ "cd /content/InvokeAI/\n",
"git checkout --quiet development"
]
},
@@ -52,14 +52,14 @@
"outputs": [],
"source": [
"%%cmd\n",
- "pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate stable-diffusion"
+ "pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate invoke-ai"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Switch the notebook kernel to the new 'stable-diffusion' environment!\n",
+ "# Switch the notebook kernel to the new 'invoke-ai' environment!\n",
"\n",
"## VSCode: restart VSCode and come back to this cell\n",
"\n",
@@ -67,7 +67,7 @@
"1. Type \"Select Interpreter\" and select \"Jupyter: Select Interpreter to Start Jupyter Server\"\n",
"1. VSCode will say that it needs to install packages. Click the \"Install\" button.\n",
"1. Once the install is finished, do 1 & 2 again\n",
- "1. Pick 'stable-diffusion'\n",
+ "1. Pick 'invoke-ai'\n",
"1. Run the following cell"
]
},
@@ -77,7 +77,7 @@
"metadata": {},
"outputs": [],
"source": [
- "%cd stable-diffusion"
+ "%cd InvokeAI"
]
},
{
@@ -88,7 +88,7 @@
"## Jupyter/JupyterLab\n",
"\n",
"1. Run the cell below\n",
- "1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'stable-diffusion' from the drop-down.\n"
+ "1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'invoke-ai' from the drop-down.\n"
]
},
{
@@ -106,9 +106,9 @@
"source": [
"# DO NOT RUN THIS CELL IF YOU ARE USING VSCODE!!\n",
"%%cmd\n",
- "pew workon stable-diffusion\n",
+ "pew workon invoke-ai\n",
"pip3 install ipykernel\n",
- "python -m ipykernel install --name=stable-diffusion"
+ "python -m ipykernel install --name=invoke-ai"
]
},
{
@@ -182,15 +182,20 @@
"\n",
"Now:\n",
"\n",
- "1. `cd` to wherever the 'stable-diffusion' directory is\n",
- "1. Run `pew workon stable-diffusion`\n",
+ "1. `cd` to wherever the 'InvokeAI' directory is\n",
+ "1. Run `pew workon invoke-ai`\n",
"1. Run `winpty python scripts\\dream.py`"
]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": []
}
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3.10.6 ('ldm')",
+ "display_name": "Python 3.10.6 64-bit",
"language": "python",
"name": "python3"
},
@@ -208,7 +213,7 @@
},
"vscode": {
"interpreter": {
- "hash": "a05e4574567b7bc2c98f7f9aa579f9ea5b8739b54844ab610ac85881c4be2659"
+ "hash": "5e164cef426134bf171f386fbddecb52046b6c1479f922ab8dfdd30df05e0e80"
}
}
},
diff --git a/requirements.txt b/requirements.txt
index d0b739f82a..7323ad66bb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -22,7 +22,7 @@ taming-transformers-rom1504
test-tube
torch-fidelity
torchmetrics
-transformers
+transformers==4.21.*
flask==2.1.3
flask_socketio==5.3.0
flask_cors==3.0.10
diff --git a/scripts/dream.py b/scripts/dream.py
index d4d66503c9..cac8c2aee4 100755
--- a/scripts/dream.py
+++ b/scripts/dream.py
@@ -47,16 +47,19 @@ def main():
# Loading Face Restoration and ESRGAN Modules
try:
gfpgan, codeformer, esrgan = None, None, None
- from ldm.dream.restoration import Restoration
- restoration = Restoration(opt.gfpgan_dir, opt.gfpgan_model_path, opt.esrgan_bg_tile)
- if opt.restore:
- gfpgan, codeformer = restoration.load_face_restore_models()
+ if opt.restore or opt.esrgan:
+ from ldm.dream.restoration import Restoration
+ restoration = Restoration()
+ if opt.restore:
+ gfpgan, codeformer = restoration.load_face_restore_models(opt.gfpgan_dir, opt.gfpgan_model_path)
+ else:
+ print('>> Face restoration disabled')
+ if opt.esrgan:
+ esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
+ else:
+ print('>> Upscaling disabled')
else:
- print('>> Face restoration disabled')
- if opt.esrgan:
- esrgan = restoration.load_esrgan()
- else:
- print('>> Upscaling disabled')
+ print('>> Face restoration and upscaling disabled')
except (ModuleNotFoundError, ImportError):
import traceback
print(traceback.format_exc(), file=sys.stderr)
@@ -105,6 +108,8 @@ def main():
# preload the model
gen.load_model()
+ #set additional option
+ gen.free_gpu_mem = opt.free_gpu_mem
if not infile:
print(
@@ -170,9 +175,10 @@ def main_loop(gen, opt, infile):
if opt.init_img:
try:
- oldargs = metadata_from_png(opt.init_img)
- opt.prompt = oldargs.prompt
- print(f'>> Retrieved old prompt "{opt.prompt}" from {opt.init_img}')
+ if not opt.prompt:
+ oldargs = metadata_from_png(opt.init_img)
+ opt.prompt = oldargs.prompt
+ print(f'>> Retrieved old prompt "{opt.prompt}" from {opt.init_img}')
except AttributeError:
pass
except KeyError:
@@ -429,7 +435,7 @@ def dream_server_loop(gen, host, port, outdir, gfpgan):
f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.")
else:
print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.")
- print(f">> Point your browser at http://{host}:{port}.")
+ print(f">> Point your browser at http://{host}:{port}")
try:
dream_server.serve_forever()
diff --git a/scripts/preload_models.py b/scripts/preload_models.py
index c681ab91ad..f1475d38ec 100644
--- a/scripts/preload_models.py
+++ b/scripts/preload_models.py
@@ -49,33 +49,13 @@ except ModuleNotFoundError:
if gfpgan:
print('Loading models from RealESRGAN and facexlib')
try:
- from basicsr.archs.rrdbnet_arch import RRDBNet
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
- RealESRGANer(
- scale=2,
- model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
- model=RRDBNet(
- num_in_ch=3,
- num_out_ch=3,
- num_feat=64,
- num_block=23,
- num_grow_ch=32,
- scale=2,
- ),
- )
-
RealESRGANer(
scale=4,
- model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth',
- model=RRDBNet(
- num_in_ch=3,
- num_out_ch=3,
- num_feat=64,
- num_block=23,
- num_grow_ch=32,
- scale=4,
- ),
+ model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth',
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
)
FaceRestoreHelper(1, det_model='retinaface_resnet50')
@@ -87,8 +67,8 @@ if gfpgan:
try:
import urllib.request
- model_url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth'
- model_dest = 'src/gfpgan/experiments/pretrained_models/GFPGANv1.3.pth'
+ model_url = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth'
+ model_dest = 'src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth'
if not os.path.exists(model_dest):
print('downloading gfpgan model file...')