mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
feat(ui): add main model trigger phrases
This commit is contained in:
parent
2f0a653a7f
commit
fdecb886b2
@ -855,7 +855,8 @@
|
|||||||
"statusConverting": "Converting",
|
"statusConverting": "Converting",
|
||||||
"syncModels": "Sync Models",
|
"syncModels": "Sync Models",
|
||||||
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you add models to the InvokeAI root folder or autoimport directory after the application has booted.",
|
"syncModelsDesc": "If your models are out of sync with the backend, you can refresh them up using this option. This is generally handy in cases where you add models to the InvokeAI root folder or autoimport directory after the application has booted.",
|
||||||
"triggerPhrases": "Trigger Phrases",
|
"loraTriggerPhrases": "LoRA Trigger Phrases",
|
||||||
|
"mainModelTriggerPhrases": "Main Model Trigger Phrases",
|
||||||
"typePhraseHere": "Type phrase here",
|
"typePhraseHere": "Type phrase here",
|
||||||
"upcastAttention": "Upcast Attention",
|
"upcastAttention": "Upcast Attention",
|
||||||
"uploadImage": "Upload Image",
|
"uploadImage": "Upload Image",
|
||||||
|
@ -64,7 +64,7 @@ export const ModelView = () => {
|
|||||||
<DefaultSettings />
|
<DefaultSettings />
|
||||||
</Box>
|
</Box>
|
||||||
)}
|
)}
|
||||||
{data.type === 'lora' && (
|
{(data.type === 'main' || data.type === 'lora') && (
|
||||||
<Box layerStyle="second" borderRadius="base" p={4}>
|
<Box layerStyle="second" borderRadius="base" p={4}>
|
||||||
<TriggerPhrases />
|
<TriggerPhrases />
|
||||||
</Box>
|
</Box>
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
import type { ChakraProps, ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
import type { ChakraProps, ComboboxOnChange, ComboboxOption } from '@invoke-ai/ui-library';
|
||||||
import { Combobox, FormControl } from '@invoke-ai/ui-library';
|
import { Combobox, FormControl } from '@invoke-ai/ui-library';
|
||||||
|
import { skipToken } from '@reduxjs/toolkit/query';
|
||||||
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
import { createMemoizedSelector } from 'app/store/createMemoizedSelector';
|
||||||
import { useAppSelector } from 'app/store/storeHooks';
|
import { useAppSelector } from 'app/store/storeHooks';
|
||||||
import type { GroupBase } from 'chakra-react-select';
|
import type { GroupBase } from 'chakra-react-select';
|
||||||
import { selectLoraSlice } from 'features/lora/store/loraSlice';
|
import { selectLoraSlice } from 'features/lora/store/loraSlice';
|
||||||
|
import { selectGenerationSlice } from 'features/parameters/store/generationSlice';
|
||||||
import type { PromptTriggerSelectProps } from 'features/prompt/types';
|
import type { PromptTriggerSelectProps } from 'features/prompt/types';
|
||||||
import { t } from 'i18next';
|
import { t } from 'i18next';
|
||||||
import { flatten, map } from 'lodash-es';
|
import { flatten, map } from 'lodash-es';
|
||||||
@ -13,18 +15,23 @@ import {
|
|||||||
loraModelsAdapterSelectors,
|
loraModelsAdapterSelectors,
|
||||||
textualInversionModelsAdapterSelectors,
|
textualInversionModelsAdapterSelectors,
|
||||||
useGetLoRAModelsQuery,
|
useGetLoRAModelsQuery,
|
||||||
|
useGetModelConfigQuery,
|
||||||
useGetTextualInversionModelsQuery,
|
useGetTextualInversionModelsQuery,
|
||||||
} from 'services/api/endpoints/models';
|
} from 'services/api/endpoints/models';
|
||||||
|
|
||||||
const noOptionsMessage = () => t('prompt.noMatchingTriggers');
|
const noOptionsMessage = () => t('prompt.noMatchingTriggers');
|
||||||
|
|
||||||
const selectLoRAs = createMemoizedSelector(selectLoraSlice, (loras) => loras.loras);
|
const selectLoRAs = createMemoizedSelector(selectLoraSlice, (loras) => loras.loras);
|
||||||
|
const selectMainModel = createMemoizedSelector(selectGenerationSlice, (generation) => generation.model);
|
||||||
|
|
||||||
export const PromptTriggerSelect = memo(({ onSelect, onClose }: PromptTriggerSelectProps) => {
|
export const PromptTriggerSelect = memo(({ onSelect, onClose }: PromptTriggerSelectProps) => {
|
||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
|
|
||||||
const currentBaseModel = useAppSelector((s) => s.generation.model?.base);
|
const mainModel = useAppSelector(selectMainModel);
|
||||||
const addedLoRAs = useAppSelector(selectLoRAs);
|
const addedLoRAs = useAppSelector(selectLoRAs);
|
||||||
|
const { data: mainModelConfig, isLoading: isLoadingMainModelConfig } = useGetModelConfigQuery(
|
||||||
|
mainModel?.key ?? skipToken
|
||||||
|
);
|
||||||
const { data: loraModels, isLoading: isLoadingLoRAs } = useGetLoRAModelsQuery();
|
const { data: loraModels, isLoading: isLoadingLoRAs } = useGetLoRAModelsQuery();
|
||||||
const { data: tiModels, isLoading: isLoadingTIs } = useGetTextualInversionModelsQuery();
|
const { data: tiModels, isLoading: isLoadingTIs } = useGetTextualInversionModelsQuery();
|
||||||
|
|
||||||
@ -46,7 +53,7 @@ export const PromptTriggerSelect = memo(({ onSelect, onClose }: PromptTriggerSel
|
|||||||
if (tiModels) {
|
if (tiModels) {
|
||||||
const embeddingOptions = textualInversionModelsAdapterSelectors
|
const embeddingOptions = textualInversionModelsAdapterSelectors
|
||||||
.selectAll(tiModels)
|
.selectAll(tiModels)
|
||||||
.filter((ti) => ti.base === currentBaseModel)
|
.filter((ti) => ti.base === mainModelConfig?.base)
|
||||||
.map((model) => ({ label: model.name, value: `<${model.name}>` }));
|
.map((model) => ({ label: model.name, value: `<${model.name}>` }));
|
||||||
|
|
||||||
if (embeddingOptions.length > 0) {
|
if (embeddingOptions.length > 0) {
|
||||||
@ -71,18 +78,33 @@ export const PromptTriggerSelect = memo(({ onSelect, onClose }: PromptTriggerSel
|
|||||||
|
|
||||||
if (triggerPhraseOptions.length > 0) {
|
if (triggerPhraseOptions.length > 0) {
|
||||||
_options.push({
|
_options.push({
|
||||||
label: t('modelManager.triggerPhrases'),
|
label: t('modelManager.loraTriggerPhrases'),
|
||||||
options: flatten(triggerPhraseOptions),
|
options: flatten(triggerPhraseOptions),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mainModelConfig && mainModelConfig.trigger_phrases?.length) {
|
||||||
|
_options.push({
|
||||||
|
label: t('modelManager.mainModelTriggerPhrases'),
|
||||||
|
options: mainModelConfig.trigger_phrases.map((triggerPhrase) => ({
|
||||||
|
label: triggerPhrase,
|
||||||
|
value: triggerPhrase,
|
||||||
|
})),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return _options;
|
return _options;
|
||||||
}, [tiModels, loraModels, t, currentBaseModel, addedLoRAs]);
|
}, [tiModels, loraModels, mainModelConfig, t, addedLoRAs]);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<FormControl>
|
<FormControl>
|
||||||
<Combobox
|
<Combobox
|
||||||
placeholder={isLoadingLoRAs || isLoadingTIs ? t('common.loading') : t('prompt.addPromptTrigger')}
|
placeholder={
|
||||||
|
isLoadingLoRAs || isLoadingTIs || isLoadingMainModelConfig
|
||||||
|
? t('common.loading')
|
||||||
|
: t('prompt.addPromptTrigger')
|
||||||
|
}
|
||||||
defaultMenuIsOpen
|
defaultMenuIsOpen
|
||||||
autoFocus
|
autoFocus
|
||||||
value={null}
|
value={null}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user