diff --git a/mise.toml b/mise.toml new file mode 100644 index 00000000000..126f680735f --- /dev/null +++ b/mise.toml @@ -0,0 +1,2 @@ +[tools] +node = "20" diff --git a/packages/components/credentials/LmStudioApi.credential.ts b/packages/components/credentials/LmStudioApi.credential.ts new file mode 100644 index 00000000000..42e10fe96d4 --- /dev/null +++ b/packages/components/credentials/LmStudioApi.credential.ts @@ -0,0 +1,24 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class LMStudioApi implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'LM Studio API' + this.name = 'lmStudioApi' + this.version = 1.0 + this.inputs = [ + { + label: 'LM Studio Api Key', + name: 'lmStudioApiKey', + type: 'password', + placeholder: '' + } + ] + } +} + +module.exports = { credClass: LMStudioApi } diff --git a/packages/components/nodes/chatmodels/ChatLmStudio/ChatLmStudio.ts b/packages/components/nodes/chatmodels/ChatLmStudio/ChatLmStudio.ts new file mode 100644 index 00000000000..d4a6f6d1354 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatLmStudio/ChatLmStudio.ts @@ -0,0 +1,149 @@ +import { ChatOpenAI as LangchainChatLmStudio, ChatOpenAIFields as ChatLmStudioFields } from '@langchain/openai' +import { BaseCache } from '@langchain/core/caches' +import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { FlowiseChatLmStudio } from './FlowiseChatLmStudio' + +class ChatLmStudio_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Chat LMStudio' + this.name = 'chatLmStudio' + this.version = 3.0 + this.type = 'ChatLmStudio' + this.icon = 'lmstudio.png' + this.category = 'Chat Models' + this.description = 'Use local LLMs using LmStudio' + this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(LangchainChatLmStudio)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['lmStudioApi'], + optional: true + } + this.inputs = [ + { + label: 'Cache', + name: 'cache', + type: 'BaseCache', + optional: true + }, + { + label: 'Base URL', + name: 'baseURL', + type: 'string', + placeholder: 'http://localhost:1234/v1' + }, + { + label: 'Model Name', + name: 'modelName', + type: 'string', + placeholder: 'gpt4all-lora-quantized.bin' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Allow Image Uploads', + name: 'allowImageUploads', + type: 'boolean', + description: + 'Allow image input. Refer to the docs for more details.', + default: false, + optional: true + }, + { + label: 'Streaming', + name: 'streaming', + type: 'boolean', + default: true, + optional: true, + additionalParams: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + const baseURL = nodeData.inputs?.baseURL as string + const streaming = nodeData.inputs?.streaming as boolean + const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const lmStudioApiKey = getCredentialParam('lmStudioApiKey', credentialData, nodeData) + + const cache = nodeData.inputs?.cache as BaseCache + + const obj: ChatLmStudioFields = { + modelName, + streaming: streaming ?? true, + configuration: { + baseURL, + apiKey: lmStudioApiKey + } + } + + if (temperature) obj.temperature = parseFloat(temperature) + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + if (cache) obj.cache = cache + + const multiModalOption: IMultiModalOption = { + image: { + allowImageUploads: allowImageUploads ?? false + } + } + + const model = new FlowiseChatLmStudio(nodeData.id, obj) + model.setMultiModalOption(multiModalOption) + + return model + } +} + +module.exports = { nodeClass: ChatLmStudio_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatLmStudio/FlowiseChatLmStudio.ts b/packages/components/nodes/chatmodels/ChatLmStudio/FlowiseChatLmStudio.ts new file mode 100644 index 00000000000..1b7126d6739 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatLmStudio/FlowiseChatLmStudio.ts @@ -0,0 +1,34 @@ +import { ChatOpenAI as LangchainChatLmStudio, ChatOpenAIFields as ChatLmStudioFields } from '@langchain/openai' +import { IMultiModalOption, IVisionChatModal } from '../../../src' + +export class FlowiseChatLmStudio extends LangchainChatLmStudio implements IVisionChatModal { + configuredModel: string + configuredMaxToken?: number + multiModalOption: IMultiModalOption + builtInTools: Record[] = [] + id: string + + constructor(id: string, fields?: ChatLmStudioFields) { + super(fields) + this.id = id + this.configuredModel = fields?.modelName ?? '' + this.configuredMaxToken = fields?.maxTokens + } + + revertToOriginalModel(): void { + this.model = this.configuredModel + this.maxTokens = this.configuredMaxToken + } + + setMultiModalOption(multiModalOption: IMultiModalOption): void { + this.multiModalOption = multiModalOption + } + + setVisionModel(): void { + // pass + } + + addBuiltInTools(builtInTool: Record): void { + this.builtInTools.push(builtInTool) + } +} diff --git a/packages/components/nodes/chatmodels/ChatLmStudio/lmstudio.png b/packages/components/nodes/chatmodels/ChatLmStudio/lmstudio.png new file mode 100644 index 00000000000..56d584053f2 Binary files /dev/null and b/packages/components/nodes/chatmodels/ChatLmStudio/lmstudio.png differ diff --git a/packages/components/nodes/embeddings/LMStudioAIEmbedding/LmStudioEmbedding.ts b/packages/components/nodes/embeddings/LMStudioAIEmbedding/LmStudioEmbedding.ts new file mode 100644 index 00000000000..a0719b6218d --- /dev/null +++ b/packages/components/nodes/embeddings/LMStudioAIEmbedding/LmStudioEmbedding.ts @@ -0,0 +1,74 @@ +import { + ClientOptions, + OpenAIEmbeddings as LmStudioEmbeddings, + OpenAIEmbeddingsParams as LmStudioEmbeddingsParams +} from '@langchain/openai' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getCredentialData, getCredentialParam } from '../../../src/utils' + +class LmStudioEmbedding_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'LMStudio Embeddings' + this.name = 'lmStudioEmbeddings' + this.version = 1.0 + this.type = 'LmStudio Embeddings' + this.icon = 'lmstudio.png' + this.category = 'Embeddings' + this.description = 'Use local embeddings from LMStudio' + this.baseClasses = [this.type, 'Embeddings'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['lmStudioApi'], + optional: true + } + this.inputs = [ + { + label: 'Base URL', + name: 'baseURL', + type: 'string', + placeholder: 'http://localhost:1234/v1' + }, + { + label: 'Model Name', + name: 'modelName', + type: 'string', + placeholder: 'text-embedding-ada-002' + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const modelName = nodeData.inputs?.modelName as string + const baseURL = nodeData.inputs?.baseURL as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const lmStudioApiKey = getCredentialParam('lmStudioApiKey', credentialData, nodeData) + + const obj: Partial & { configuration?: ClientOptions } = { + modelName, + configuration: { + apiKey: lmStudioApiKey, + baseURL + } + } + + const model = new LmStudioEmbeddings(obj) + + return model + } +} + +module.exports = { nodeClass: LmStudioEmbedding_Embeddings } diff --git a/packages/components/nodes/embeddings/LMStudioAIEmbedding/lmstudio.png b/packages/components/nodes/embeddings/LMStudioAIEmbedding/lmstudio.png new file mode 100644 index 00000000000..56d584053f2 Binary files /dev/null and b/packages/components/nodes/embeddings/LMStudioAIEmbedding/lmstudio.png differ diff --git a/packages/components/nodes/llms/LmStudio/LmStudio.ts b/packages/components/nodes/llms/LmStudio/LmStudio.ts new file mode 100644 index 00000000000..bb4c348bca7 --- /dev/null +++ b/packages/components/nodes/llms/LmStudio/LmStudio.ts @@ -0,0 +1,171 @@ +import { BaseCache } from '@langchain/core/caches' +import { BaseLLMParams } from '@langchain/core/language_models/llms' +import { ClientOptions, OpenAI as LmStudio, OpenAIInput as LmStudioInput } from '@langchain/openai' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' + +class LmStudio_LLMs implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'LMStudio' + this.name = 'lmStudio' + this.version = 4.0 + this.type = 'LmStudio' + this.icon = 'lmstudio.png' + this.category = 'LLMs' + this.description = 'Wrapper around LMStudio large language models' + this.baseClasses = [this.type, ...getBaseClasses(LmStudio)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['lmStudioApi'], + optional: true + } + this.inputs = [ + { + label: 'Cache', + name: 'cache', + type: 'BaseCache', + optional: true + }, + { + label: 'Base URL', + name: 'baseURL', + type: 'string', + placeholder: 'http://localhost:1234/v1' + }, + { + label: 'Model Name', + name: 'modelName', + type: 'string', + placeholder: 'gpt4all-lora-quantized.bin' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.7, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Best Of', + name: 'bestOf', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Frequency Penalty', + name: 'frequencyPenalty', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Presence Penalty', + name: 'presencePenalty', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Batch Size', + name: 'batchSize', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'BaseOptions', + name: 'baseOptions', + type: 'json', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string + const presencePenalty = nodeData.inputs?.presencePenalty as string + const timeout = nodeData.inputs?.timeout as string + const batchSize = nodeData.inputs?.batchSize as string + const bestOf = nodeData.inputs?.bestOf as string + const streaming = nodeData.inputs?.streaming as boolean + const baseURL = nodeData.inputs?.baseURL as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const lmStudioApiKey = getCredentialParam('lmStudioApiKey', credentialData, nodeData) + + const cache = nodeData.inputs?.cache as BaseCache + + const obj: Partial & BaseLLMParams & { configuration?: ClientOptions } = { + modelName, + streaming: streaming ?? true, + configuration: { + apiKey: lmStudioApiKey, + baseURL + } + } + + if (temperature) obj.temperature = parseFloat(temperature) + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty) + if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty) + if (timeout) obj.timeout = parseInt(timeout, 10) + if (batchSize) obj.batchSize = parseInt(batchSize, 10) + if (bestOf) obj.bestOf = parseInt(bestOf, 10) + + if (cache) obj.cache = cache + + const model = new LmStudio(obj) + return model + } +} + +module.exports = { nodeClass: LmStudio_LLMs } diff --git a/packages/components/nodes/llms/LmStudio/lmstudio.png b/packages/components/nodes/llms/LmStudio/lmstudio.png new file mode 100644 index 00000000000..56d584053f2 Binary files /dev/null and b/packages/components/nodes/llms/LmStudio/lmstudio.png differ diff --git a/packages/ui/src/views/chatmessage/ChatMessage.jsx b/packages/ui/src/views/chatmessage/ChatMessage.jsx index d2f30d41fe3..ac954f15ff9 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.jsx +++ b/packages/ui/src/views/chatmessage/ChatMessage.jsx @@ -217,6 +217,8 @@ const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, previews, setP const [inputHistory] = useState(new ChatInputHistory(10)) const inputRef = useRef(null) + const isKeyboardCursorAtStart = useRef(true) + const isKeyboardCursorAtEnd = useRef(true) const getChatmessageApi = useApi(chatmessageApi.getInternalChatmessageFromChatflow) const getAllExecutionsApi = useApi(executionsApi.getAllExecutions) const getIsChatflowStreamingApi = useApi(chatflowsApi.getIsChatflowStreaming) @@ -978,16 +980,16 @@ const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, previews, setP if (typeof selectedInput === 'string') { if (selectedInput !== undefined && selectedInput.trim() !== '') input = selectedInput - - if (input.trim()) { - inputHistory.addToHistory(input) - } } else if (typeof selectedInput === 'object') { input = Object.entries(selectedInput) .map(([key, value]) => `${key}: ${value}`) .join('\n') } + if (input.trim()) { + inputHistory.addToHistory(input) + } + setLoading(true) clearAgentflowNodeStatus() @@ -1184,15 +1186,25 @@ const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, previews, setP scrollToBottom() }, 100) } + // Allow cursor to navigate easily in multiline text (except at the end or start of the text) + const handleKeyboardCursorPosition = (e) => { + const targetValue = e.target.value ?? '' + const cursorPosition = e.target.selectionStart + isKeyboardCursorAtStart.current = cursorPosition === 0 + isKeyboardCursorAtEnd.current = targetValue.length > 0 ? cursorPosition === targetValue.length : cursorPosition === 0 + } + // Prevent blank submissions and allow for multiline input const handleEnter = (e) => { + const isCursorPositionAtStart = isKeyboardCursorAtStart.current + const isCursorPositionAtEnd = isKeyboardCursorAtEnd.current // Check if IME composition is in progress const isIMEComposition = e.isComposing || e.keyCode === 229 - if (e.key === 'ArrowUp' && !isIMEComposition) { + if (e.key === 'ArrowUp' && !isIMEComposition && isCursorPositionAtStart) { e.preventDefault() const previousInput = inputHistory.getPreviousInput(userInput) setUserInput(previousInput) - } else if (e.key === 'ArrowDown' && !isIMEComposition) { + } else if (e.key === 'ArrowDown' && !isIMEComposition && isCursorPositionAtEnd) { e.preventDefault() const nextInput = inputHistory.getNextInput() setUserInput(nextInput) @@ -2969,6 +2981,7 @@ const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, previews, setP sx={{ width: '100%' }} disabled={getInputDisabled()} onKeyDown={handleEnter} + onKeyUp={handleKeyboardCursorPosition} id='userInput' name='userInput' placeholder={loading ? 'Waiting for response...' : 'Type your question...'}