chat: join ChatLLM threads without calling destructors (#2043)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
pull/2034/head^2
Jared Van Bortel 3 months ago committed by GitHub
parent d8c842263f
commit a0bd96f75d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -46,6 +46,7 @@ public:
explicit Chat(QObject *parent = nullptr);
explicit Chat(bool isServer, QObject *parent = nullptr);
virtual ~Chat();
void destroy() { m_llmodel->destroy(); }
void connectLLM();
QString id() const { return m_id; }

@ -192,13 +192,8 @@ public:
int count() const { return m_chats.size(); }
void clearChats() {
m_newChat = nullptr;
m_serverChat = nullptr;
m_currentChat = nullptr;
for (auto * chat: m_chats) { delete chat; }
m_chats.clear();
}
// stop ChatLLM threads for clean shutdown
void destroyChats() { for (auto *chat: m_chats) { chat->destroy(); } }
void removeChatFile(Chat *chat) const;
Q_INVOKABLE void saveChats();

@ -95,6 +95,10 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer)
ChatLLM::~ChatLLM()
{
destroy();
}
void ChatLLM::destroy() {
m_stopGenerating = true;
m_llmThread.quit();
m_llmThread.wait();

@ -72,6 +72,7 @@ public:
ChatLLM(Chat *parent, bool isServer = false);
virtual ~ChatLLM();
void destroy();
bool isModelLoaded() const;
void regenerateResponse();
void resetResponse();

@ -67,7 +67,7 @@ int main(int argc, char *argv[])
// Make sure ChatLLM threads are joined before global destructors run.
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
ChatListModel::globalInstance()->clearChats();
ChatListModel::globalInstance()->destroyChats();
return res;
}

Loading…
Cancel
Save