From 5fb9d17c0006af716585fd70619a0c1f4ad314c1 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 9 May 2024 09:38:19 -0400 Subject: [PATCH] chatllm: use a better prompt for the generated chat name (#2322) Signed-off-by: Jared Van Bortel --- gpt4all-chat/chatllm.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index a6af93e0..89505ef7 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -782,13 +782,13 @@ void ChatLLM::generateName() if (!isModelLoaded()) return; - std::string instructPrompt("### Instruction:\n%1\n### Response:\n"); // standard Alpaca + auto promptTemplate = MySettings::globalInstance()->modelPromptTemplate(m_modelInfo); auto promptFunc = std::bind(&ChatLLM::handleNamePrompt, this, std::placeholders::_1); auto responseFunc = std::bind(&ChatLLM::handleNameResponse, this, std::placeholders::_1, std::placeholders::_2); auto recalcFunc = std::bind(&ChatLLM::handleNameRecalculate, this, std::placeholders::_1); LLModel::PromptContext ctx = m_ctx; - m_llModelInfo.model->prompt("Describe response above in three words.", instructPrompt, promptFunc, responseFunc, - recalcFunc, ctx); + m_llModelInfo.model->prompt("Describe the above conversation in three words or less.", + promptTemplate.toStdString(), promptFunc, responseFunc, recalcFunc, ctx); std::string trimmed = trim_whitespace(m_nameResponse); if (trimmed != m_nameResponse) { m_nameResponse = trimmed;