mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
78ef946e01
- Implement new model loader and modify invocations and embeddings - Finish implementation loaders for all models currently supported by InvokeAI. - Move lora, textual_inversion, and model patching support into backend/embeddings. - Restore support for model cache statistics collection (a little ugly, needs work). - Fixed up invocations that load and patch models. - Move seamless and silencewarnings utils into better location
16 lines
544 B
Python
16 lines
544 B
Python
"""
|
|
Initialization file for the invokeai.backend.stable_diffusion package
|
|
"""
|
|
from .diffusers_pipeline import PipelineIntermediateState, StableDiffusionGeneratorPipeline # noqa: F401
|
|
from .diffusion import InvokeAIDiffuserComponent # noqa: F401
|
|
from .diffusion.cross_attention_map_saving import AttentionMapSaver # noqa: F401
|
|
from .seamless import set_seamless # noqa: F401
|
|
|
|
__all__ = [
|
|
"PipelineIntermediateState",
|
|
"StableDiffusionGeneratorPipeline",
|
|
"InvokeAIDiffuserComponent",
|
|
"AttentionMapSaver",
|
|
"set_seamless",
|
|
]
|