Merge branch 'main' of https://github.com/nomic-ai/gpt4all into model-warnings

pull/2034/head
Jared Van Bortel 3 months ago
commit 9afb2e5e01

@ -113,7 +113,8 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
llmodel_response_callback response_callback,
llmodel_recalculate_callback recalculate_callback,
llmodel_prompt_context *ctx,
bool special)
bool special,
const char *fake_reply)
{
LLModelWrapper *wrapper = reinterpret_cast<LLModelWrapper*>(model);
@ -141,8 +142,13 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
wrapper->promptContext.repeat_last_n = ctx->repeat_last_n;
wrapper->promptContext.contextErase = ctx->context_erase;
std::string fake_reply_str;
if (fake_reply) { fake_reply_str = fake_reply; }
auto *fake_reply_p = fake_reply ? &fake_reply_str : nullptr;
// Call the C++ prompt method
wrapper->llModel->prompt(prompt, prompt_template, prompt_func, response_func, recalc_func, wrapper->promptContext, special);
wrapper->llModel->prompt(prompt, prompt_template, prompt_func, response_func, recalc_func, wrapper->promptContext,
special, fake_reply_p);
// Update the C context by giving access to the wrappers raw pointers to std::vector data
// which involves no copies

@ -169,6 +169,7 @@ uint64_t llmodel_restore_state_data(llmodel_model model, const uint8_t *src);
* @param response_callback A callback function for handling the generated response.
* @param recalculate_callback A callback function for handling recalculation requests.
* @param special True if special tokens in the prompt should be processed, false otherwise.
* @param fake_reply A string to insert into context as the model's reply, or NULL to generate one.
* @param ctx A pointer to the llmodel_prompt_context structure.
*/
void llmodel_prompt(llmodel_model model, const char *prompt,
@ -177,7 +178,8 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
llmodel_response_callback response_callback,
llmodel_recalculate_callback recalculate_callback,
llmodel_prompt_context *ctx,
bool special);
bool special,
const char *fake_reply);
/**
* Generate an embedding using the model.

@ -100,6 +100,7 @@ llmodel.llmodel_prompt.argtypes = [
RecalculateCallback,
ctypes.POINTER(LLModelPromptContext),
ctypes.c_bool,
ctypes.c_char_p,
]
llmodel.llmodel_prompt.restype = None
@ -361,6 +362,7 @@ class LLModel:
RecalculateCallback(self._recalculate_callback),
self.context,
special,
ctypes.c_char_p(),
)

@ -158,9 +158,8 @@ class GPT4All:
if model_filename == m["filename"]:
config.update(m)
config["systemPrompt"] = config["systemPrompt"].strip()
config["promptTemplate"] = config["promptTemplate"].replace(
"%1", "{0}", 1
) # change to Python-style formatting
# change to Python-style formatting
config["promptTemplate"] = config["promptTemplate"].replace("%1", "{0}", 1).replace("%2", "{1}", 1)
break
# Validate download directory
@ -347,7 +346,7 @@ class GPT4All:
self.model.prompt_model(self.current_chat_session[0]["content"], "%1",
_pyllmodel.empty_response_callback,
n_batch=n_batch, n_predict=0, special=True)
prompt_template = self._current_prompt_template.format("%1")
prompt_template = self._current_prompt_template.format("%1", "%2")
else:
warnings.warn(
"_format_chat_prompt_template is deprecated. Please use a chat session with a prompt template.",

@ -18,7 +18,7 @@ endif()
set(APP_VERSION_MAJOR 2)
set(APP_VERSION_MINOR 7)
set(APP_VERSION_PATCH 2)
set(APP_VERSION_PATCH 3)
set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}")
# Include the binary directory for the generated header file

@ -46,6 +46,7 @@ public:
explicit Chat(QObject *parent = nullptr);
explicit Chat(bool isServer, QObject *parent = nullptr);
virtual ~Chat();
void destroy() { m_llmodel->destroy(); }
void connectLLM();
QString id() const { return m_id; }

@ -86,13 +86,33 @@ void ChatGPT::prompt(const std::string &prompt,
Q_UNUSED(promptCallback);
Q_UNUSED(recalculateCallback);
Q_UNUSED(special);
Q_UNUSED(fakeReply); // FIXME(cebtenzzre): I broke ChatGPT
if (!isModelLoaded()) {
std::cerr << "ChatGPT ERROR: prompt won't work with an unloaded model!\n";
return;
}
if (!promptCtx.n_past) { m_queuedPrompts.clear(); }
Q_ASSERT(promptCtx.n_past <= m_context.size());
m_context.resize(promptCtx.n_past);
// FIXME(cebtenzzre): We're assuming people don't try to use %2 with ChatGPT. What would that even mean?
m_queuedPrompts << QString::fromStdString(promptTemplate).arg(QString::fromStdString(prompt));
if (!promptCtx.n_predict && !fakeReply) {
return; // response explicitly suppressed, queue prompt for later
}
QString formattedPrompt = m_queuedPrompts.join("");
m_queuedPrompts.clear();
if (fakeReply) {
promptCtx.n_past += 1;
m_context.append(formattedPrompt);
m_context.append(QString::fromStdString(*fakeReply));
return;
}
// FIXME: We don't set the max_tokens on purpose because in order to do so safely without encountering
// an error we need to be able to count the tokens in our prompt. The only way to do this is to use
// the OpenAI tiktokken library or to implement our own tokenization function that matches precisely
@ -104,8 +124,9 @@ void ChatGPT::prompt(const std::string &prompt,
root.insert("temperature", promptCtx.temp);
root.insert("top_p", promptCtx.top_p);
// conversation history
QJsonArray messages;
for (int i = 0; i < m_context.count() && i < promptCtx.n_past; ++i) {
for (int i = 0; i < m_context.count(); ++i) {
QJsonObject message;
message.insert("role", i % 2 == 0 ? "assistant" : "user");
message.insert("content", m_context.at(i));
@ -114,7 +135,7 @@ void ChatGPT::prompt(const std::string &prompt,
QJsonObject promptObject;
promptObject.insert("role", "user");
promptObject.insert("content", QString::fromStdString(promptTemplate).arg(QString::fromStdString(prompt)));
promptObject.insert("content", formattedPrompt);
messages.append(promptObject);
root.insert("messages", messages);
@ -138,7 +159,7 @@ void ChatGPT::prompt(const std::string &prompt,
workerThread.wait();
promptCtx.n_past += 1;
m_context.append(QString::fromStdString(prompt));
m_context.append(formattedPrompt);
m_context.append(worker.currentResponse());
m_responseCallback = nullptr;

@ -3,11 +3,14 @@
#include <stdexcept>
#include <QObject>
#include <QNetworkAccessManager>
#include <QNetworkReply>
#include <QNetworkRequest>
#include <QNetworkAccessManager>
#include <QObject>
#include <QString>
#include <QStringList>
#include <QThread>
#include "../gpt4all-backend/llmodel.h"
class ChatGPT;
@ -126,6 +129,7 @@ private:
QString m_modelName;
QString m_apiKey;
QList<QString> m_context;
QStringList m_queuedPrompts;
};
#endif // CHATGPT_H

@ -192,13 +192,8 @@ public:
int count() const { return m_chats.size(); }
void clearChats() {
m_newChat = nullptr;
m_serverChat = nullptr;
m_currentChat = nullptr;
for (auto * chat: m_chats) { delete chat; }
m_chats.clear();
}
// stop ChatLLM threads for clean shutdown
void destroyChats() { for (auto *chat: m_chats) { chat->destroy(); } }
void removeChatFile(Chat *chat) const;
Q_INVOKABLE void saveChats();

@ -95,6 +95,10 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer)
ChatLLM::~ChatLLM()
{
destroy();
}
void ChatLLM::destroy() {
m_stopGenerating = true;
m_llmThread.quit();
m_llmThread.wait();
@ -487,7 +491,7 @@ void ChatLLM::resetResponse()
void ChatLLM::resetContext()
{
regenerateResponse();
resetResponse();
m_processedSystemPrompt = false;
m_ctx = LLModel::PromptContext();
}

@ -72,6 +72,7 @@ public:
ChatLLM(Chat *parent, bool isServer = false);
virtual ~ChatLLM();
void destroy();
bool isModelLoaded() const;
void regenerateResponse();
void resetResponse();

@ -130,7 +130,7 @@ void Download::downloadModel(const QString &modelFile)
ModelList::globalInstance()->updateDataByFilename(modelFile, ModelList::DownloadingRole, true);
ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFile);
QString url = !info.url.isEmpty() ? info.url : "http://gpt4all.io/models/gguf/" + modelFile;
QString url = !info.url().isEmpty() ? info.url() : "http://gpt4all.io/models/gguf/" + modelFile;
Network::globalInstance()->sendDownloadStarted(modelFile);
QNetworkRequest request(url);
request.setAttribute(QNetworkRequest::User, modelFile);
@ -201,6 +201,8 @@ void Download::removeModel(const QString &modelFile)
QFile file(filePath);
if (file.exists()) {
const ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFile);
ModelList::globalInstance()->removeInstalled(info);
Network::globalInstance()->sendRemoveModel(modelFile);
file.remove();
}
@ -364,8 +366,8 @@ HashAndSaveFile::HashAndSaveFile()
m_hashAndSaveThread.start();
}
void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &saveFilePath,
QFile *tempFile, QNetworkReply *modelReply)
void HashAndSaveFile::hashAndSave(const QString &expectedHash, QCryptographicHash::Algorithm a,
const QString &saveFilePath, QFile *tempFile, QNetworkReply *modelReply)
{
Q_ASSERT(!tempFile->isOpen());
QString modelFilename = modelReply->request().attribute(QNetworkRequest::User).toString();
@ -379,13 +381,16 @@ void HashAndSaveFile::hashAndSave(const QString &expectedHash, const QString &sa
return;
}
QCryptographicHash hash(QCryptographicHash::Md5);
QCryptographicHash hash(a);
while(!tempFile->atEnd())
hash.addData(tempFile->read(16384));
if (hash.result().toHex() != expectedHash) {
if (hash.result().toHex() != expectedHash.toLatin1()) {
tempFile->close();
const QString error
= QString("ERROR: Download error MD5SUM did not match: %1 != %2 for %3").arg(hash.result().toHex()).arg(expectedHash).arg(modelFilename);
= QString("ERROR: Download error hash did not match: %1 != %2 for %3")
.arg(hash.result().toHex())
.arg(expectedHash.toLatin1())
.arg(modelFilename);
qWarning() << error;
tempFile->remove();
emit hashAndSaveFinished(false, error, tempFile, modelReply);
@ -472,9 +477,12 @@ void Download::handleModelDownloadFinished()
// Notify that we are calculating hash
ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::CalcHashRole, true);
QByteArray md5sum = ModelList::globalInstance()->modelInfoByFilename(modelFilename).md5sum;
QByteArray hash = ModelList::globalInstance()->modelInfoByFilename(modelFilename).hash;
ModelInfo::HashAlgorithm hashAlgorithm = ModelList::globalInstance()->modelInfoByFilename(modelFilename).hashAlgorithm;
const QString saveFilePath = MySettings::globalInstance()->modelPath() + modelFilename;
emit requestHashAndSave(md5sum, saveFilePath, tempFile, modelReply);
emit requestHashAndSave(hash,
(hashAlgorithm == ModelInfo::Md5 ? QCryptographicHash::Md5 : QCryptographicHash::Sha256),
saveFilePath, tempFile, modelReply);
}
void Download::handleHashAndSaveFinished(bool success, const QString &error,
@ -489,10 +497,14 @@ void Download::handleHashAndSaveFinished(bool success, const QString &error,
tempFile->deleteLater();
ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadingRole, false);
if (!success)
if (!success) {
ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadErrorRole, error);
else
} else {
ModelInfo info = ModelList::globalInstance()->modelInfoByFilename(modelFilename);
if (info.isDiscovered())
ModelList::globalInstance()->updateDiscoveredInstalled(info);
ModelList::globalInstance()->updateDataByFilename(modelFilename, ModelList::DownloadErrorRole, QString());
}
}
void Download::handleReadyRead()

@ -28,7 +28,7 @@ public:
HashAndSaveFile();
public Q_SLOTS:
void hashAndSave(const QString &hash, const QString &saveFilePath,
void hashAndSave(const QString &hash, QCryptographicHash::Algorithm a, const QString &saveFilePath,
QFile *tempFile, QNetworkReply *modelReply);
Q_SIGNALS:
@ -72,7 +72,7 @@ private Q_SLOTS:
Q_SIGNALS:
void releaseInfoChanged();
void hasNewerReleaseChanged();
void requestHashAndSave(const QString &hash, const QString &saveFilePath,
void requestHashAndSave(const QString &hash, QCryptographicHash::Algorithm a, const QString &saveFilePath,
QFile *tempFile, QNetworkReply *modelReply);
private:

@ -42,8 +42,11 @@ LLM::LLM()
m_compatHardware = minimal;
QNetworkInformation::loadDefaultBackend();
connect(QNetworkInformation::instance(), &QNetworkInformation::reachabilityChanged,
this, &LLM::isNetworkOnlineChanged);
auto * netinfo = QNetworkInformation::instance();
if (netinfo) {
connect(netinfo, &QNetworkInformation::reachabilityChanged,
this, &LLM::isNetworkOnlineChanged);
}
}
bool LLM::hasSettingsAccess() const
@ -108,8 +111,6 @@ QString LLM::systemTotalRAMInGBString() const
bool LLM::isNetworkOnline() const
{
if (!QNetworkInformation::instance())
return false;
return QNetworkInformation::instance()->reachability() == QNetworkInformation::Reachability::Online;
auto * netinfo = QNetworkInformation::instance();
return !netinfo || netinfo->reachability() == QNetworkInformation::Reachability::Online;
}

@ -67,7 +67,7 @@ int main(int argc, char *argv[])
// Make sure ChatLLM threads are joined before global destructors run.
// Otherwise, we can get a heap-use-after-free inside of llama.cpp.
ChatListModel::globalInstance()->clearChats();
ChatListModel::globalInstance()->destroyChats();
return res;
}

@ -1,6 +1,22 @@
[
{
"order": "a",
"md5sum": "a5f6b4eabd3992da4d7fb7f020f921eb",
"name": "Nous Hermes 2 Mistral DPO",
"filename": "Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
"filesize": "4108928000",
"requires": "2.7.1",
"ramrequired": "8",
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "<strong>Best overall fast chat model</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Accepts system prompts in ChatML format</li><li>Trained by Mistral AI<li>Finetuned by Nous Research on the OpenHermes-2.5 dataset<li>Licensed for commercial use</ul>",
"url": "https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO-GGUF/resolve/main/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": ""
},
{
"order": "b",
"md5sum": "f692417a22405d80573ac10cb0cd6c6a",
"name": "Mistral OpenOrca",
"filename": "mistral-7b-openorca.gguf2.Q4_0.gguf",
@ -10,13 +26,13 @@
"parameters": "7 billion",
"quant": "q4_0",
"type": "Mistral",
"description": "<strong>Best overall fast chat model</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by Mistral AI<li>Finetuned on OpenOrca dataset curated via <a href=\"https://atlas.nomic.ai/\">Nomic Atlas</a><li>Licensed for commercial use</ul>",
"description": "<strong>Strong overall fast chat model</strong><br><ul><li>Fast responses</li><li>Chat based model</li><li>Trained by Mistral AI<li>Finetuned on OpenOrca dataset curated via <a href=\"https://atlas.nomic.ai/\">Nomic Atlas</a><li>Licensed for commercial use</ul>",
"url": "https://gpt4all.io/models/gguf/mistral-7b-openorca.gguf2.Q4_0.gguf",
"promptTemplate": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n%2<|im_end|>\n",
"systemPrompt": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>"
},
{
"order": "b",
"order": "c",
"md5sum": "97463be739b50525df56d33b26b00852",
"name": "Mistral Instruct",
"filename": "mistral-7b-instruct-v0.1.Q4_0.gguf",
@ -27,12 +43,12 @@
"quant": "q4_0",
"type": "Mistral",
"systemPrompt": " ",
"description": "<strong>Best overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
"description": "<strong>Strong overall fast instruction following model</strong><br><ul><li>Fast responses</li><li>Trained by Mistral AI<li>Uncensored</li><li>Licensed for commercial use</li></ul>",
"url": "https://gpt4all.io/models/gguf/mistral-7b-instruct-v0.1.Q4_0.gguf",
"promptTemplate": "[INST] %1 [/INST]"
},
{
"order": "c",
"order": "d",
"md5sum": "c4c78adf744d6a20f05c8751e3961b84",
"name": "GPT4All Falcon",
"filename": "gpt4all-falcon-newbpe-q4_0.gguf",
@ -89,7 +105,7 @@
"quant": "q4_0",
"type": "LLaMA2",
"systemPrompt": " ",
"description": "<strong>Best overall larger model</strong><br><ul><li>Instruction based<li>Gives very long responses<li>Finetuned with only 1k of high-quality data<li>Trained by Microsoft and Peking University<li>Cannot be used commercially</ul>",
"description": "<strong>Strong overall larger model</strong><br><ul><li>Instruction based<li>Gives very long responses<li>Finetuned with only 1k of high-quality data<li>Trained by Microsoft and Peking University<li>Cannot be used commercially</ul>",
"url": "https://gpt4all.io/models/gguf/wizardlm-13b-v1.2.Q4_0.gguf"
},
{

@ -705,6 +705,25 @@
* Jared Van Bortel (Nomic AI)
* Adam Treat (Nomic AI)
* Community (beta testers, bug reporters, bindings authors)
"
},
{
"version": "2.7.2",
"notes":
"
* New support for model search/discovery using huggingface search in downloads
* Support for more model architectures for GPU acceleration
* Three different crash fixes for corner case settings
* Add a minp sampling parameter
* Bert layoer norm epsilon value
* Fix problem with blank lines between reply and next prompt
",
"contributors":
"
* Christopher Barrera
* Jared Van Bortel (Nomic AI)
* Adam Treat (Nomic AI)
* Community (beta testers, bug reporters, bindings authors)
"
}
]

File diff suppressed because it is too large Load Diff

@ -11,16 +11,17 @@ struct ModelInfo {
Q_PROPERTY(QString filename READ filename WRITE setFilename)
Q_PROPERTY(QString dirpath MEMBER dirpath)
Q_PROPERTY(QString filesize MEMBER filesize)
Q_PROPERTY(QByteArray md5sum MEMBER md5sum)
Q_PROPERTY(QByteArray hash MEMBER hash)
Q_PROPERTY(HashAlgorithm hashAlgorithm MEMBER hashAlgorithm)
Q_PROPERTY(bool calcHash MEMBER calcHash)
Q_PROPERTY(bool installed MEMBER installed)
Q_PROPERTY(bool isDefault MEMBER isDefault)
Q_PROPERTY(bool disableGUI MEMBER disableGUI)
Q_PROPERTY(bool isOnline MEMBER isOnline)
Q_PROPERTY(QString description MEMBER description)
Q_PROPERTY(QString description READ description WRITE setDescription)
Q_PROPERTY(QString requiresVersion MEMBER requiresVersion)
Q_PROPERTY(QString deprecatedVersion MEMBER deprecatedVersion)
Q_PROPERTY(QString url MEMBER url)
Q_PROPERTY(QString versionRemoved MEMBER versionRemoved)
Q_PROPERTY(QString url READ url WRITE setUrl)
Q_PROPERTY(qint64 bytesReceived MEMBER bytesReceived)
Q_PROPERTY(qint64 bytesTotal MEMBER bytesTotal)
Q_PROPERTY(qint64 timestamp MEMBER timestamp)
@ -31,9 +32,10 @@ struct ModelInfo {
Q_PROPERTY(QString order MEMBER order)
Q_PROPERTY(int ramrequired MEMBER ramrequired)
Q_PROPERTY(QString parameters MEMBER parameters)
Q_PROPERTY(QString quant MEMBER quant)
Q_PROPERTY(QString type MEMBER type)
Q_PROPERTY(bool isClone MEMBER isClone)
Q_PROPERTY(QString quant READ quant WRITE setQuant)
Q_PROPERTY(QString type READ type WRITE setType)
Q_PROPERTY(bool isClone READ isClone WRITE setIsClone)
Q_PROPERTY(bool isDiscovered READ isDiscovered WRITE setIsDiscovered)
Q_PROPERTY(double temperature READ temperature WRITE setTemperature)
Q_PROPERTY(double topP READ topP WRITE setTopP)
Q_PROPERTY(double minP READ minP WRITE setMinP)
@ -48,8 +50,16 @@ struct ModelInfo {
Q_PROPERTY(int repeatPenaltyTokens READ repeatPenaltyTokens WRITE setRepeatPenaltyTokens)
Q_PROPERTY(QString promptTemplate READ promptTemplate WRITE setPromptTemplate)
Q_PROPERTY(QString systemPrompt READ systemPrompt WRITE setSystemPrompt)
Q_PROPERTY(int likes READ likes WRITE setLikes)
Q_PROPERTY(int downloads READ downloads WRITE setDownloads)
Q_PROPERTY(QDateTime recency READ recency WRITE setRecency)
public:
enum HashAlgorithm {
Md5,
Sha256
};
QString id() const;
void setId(const QString &id);
@ -59,18 +69,44 @@ public:
QString filename() const;
void setFilename(const QString &name);
QString description() const;
void setDescription(const QString &d);
QString url() const;
void setUrl(const QString &u);
QString quant() const;
void setQuant(const QString &q);
QString type() const;
void setType(const QString &t);
bool isClone() const;
void setIsClone(bool b);
bool isDiscovered() const;
void setIsDiscovered(bool b);
int likes() const;
void setLikes(int l);
int downloads() const;
void setDownloads(int d);
QDateTime recency() const;
void setRecency(const QDateTime &r);
QString dirpath;
QString filesize;
QByteArray md5sum;
QByteArray hash;
HashAlgorithm hashAlgorithm;
bool calcHash = false;
bool installed = false;
bool isDefault = false;
bool isOnline = false;
bool disableGUI = false;
QString description;
QString requiresVersion;
QString deprecatedVersion;
QString url;
QString versionRemoved;
qint64 bytesReceived = 0;
qint64 bytesTotal = 0;
qint64 timestamp = 0;
@ -79,11 +115,8 @@ public:
bool isIncomplete = false;
QString downloadError;
QString order;
int ramrequired = 0;
int ramrequired = -1;
QString parameters;
QString quant;
QString type;
bool isClone = false;
bool operator==(const ModelInfo &other) const {
return m_id == other.m_id;
@ -116,10 +149,21 @@ public:
QString systemPrompt() const;
void setSystemPrompt(const QString &p);
bool shouldSaveMetadata() const;
private:
QString m_id;
QString m_name;
QString m_filename;
QString m_description;
QString m_url;
QString m_quant;
QString m_type;
bool m_isClone = false;
bool m_isDiscovered = false;
int m_likes = -1;
int m_downloads = -1;
QDateTime m_recency;
double m_temperature = 0.7;
double m_topP = 0.4;
double m_minP = 0.0;
@ -183,6 +227,8 @@ public:
bool isExpanded() const;
void setExpanded(bool expanded);
Q_INVOKABLE void discoverAndFilter(const QString &discover);
Q_SIGNALS:
void countChanged();
@ -195,6 +241,7 @@ Q_SIGNALS:
private:
bool m_expanded;
int m_limit;
QString m_discoverFilter;
};
class ModelList : public QAbstractListModel
@ -207,17 +254,30 @@ class ModelList : public QAbstractListModel
Q_PROPERTY(DownloadableModels* downloadableModels READ downloadableModels NOTIFY downloadableModelsChanged)
Q_PROPERTY(QList<QString> userDefaultModelList READ userDefaultModelList NOTIFY userDefaultModelListChanged)
Q_PROPERTY(bool asyncModelRequestOngoing READ asyncModelRequestOngoing NOTIFY asyncModelRequestOngoingChanged)
Q_PROPERTY(int discoverLimit READ discoverLimit WRITE setDiscoverLimit NOTIFY discoverLimitChanged)
Q_PROPERTY(int discoverSortDirection READ discoverSortDirection WRITE setDiscoverSortDirection NOTIFY discoverSortDirectionChanged)
Q_PROPERTY(DiscoverSort discoverSort READ discoverSort WRITE setDiscoverSort NOTIFY discoverSortChanged)
Q_PROPERTY(float discoverProgress READ discoverProgress NOTIFY discoverProgressChanged)
Q_PROPERTY(bool discoverInProgress READ discoverInProgress NOTIFY discoverInProgressChanged)
public:
static ModelList *globalInstance();
enum DiscoverSort {
Default,
Likes,
Downloads,
Recent
};
enum Roles {
IdRole = Qt::UserRole + 1,
NameRole,
FilenameRole,
DirpathRole,
FilesizeRole,
Md5sumRole,
HashRole,
HashAlgorithmRole,
CalcHashRole,
InstalledRole,
DefaultRole,
@ -225,7 +285,7 @@ public:
DisableGUIRole,
DescriptionRole,
RequiresVersionRole,
DeprecatedVersionRole,
VersionRemovedRole,
UrlRole,
BytesReceivedRole,
BytesTotalRole,
@ -240,6 +300,7 @@ public:
QuantRole,
TypeRole,
IsCloneRole,
IsDiscoveredRole,
TemperatureRole,
TopPRole,
TopKRole,
@ -252,6 +313,9 @@ public:
PromptTemplateRole,
SystemPromptRole,
MinPRole,
LikesRole,
DownloadsRole,
RecencyRole
};
QHash<int, QByteArray> roleNames() const override
@ -262,7 +326,8 @@ public:
roles[FilenameRole] = "filename";
roles[DirpathRole] = "dirpath";
roles[FilesizeRole] = "filesize";
roles[Md5sumRole] = "md5sum";
roles[HashRole] = "hash";
roles[HashAlgorithmRole] = "hashAlgorithm";
roles[CalcHashRole] = "calcHash";
roles[InstalledRole] = "installed";
roles[DefaultRole] = "isDefault";
@ -270,7 +335,7 @@ public:
roles[DisableGUIRole] = "disableGUI";
roles[DescriptionRole] = "description";
roles[RequiresVersionRole] = "requiresVersion";
roles[DeprecatedVersionRole] = "deprecatedVersion";
roles[VersionRemovedRole] = "versionRemoved";
roles[UrlRole] = "url";
roles[BytesReceivedRole] = "bytesReceived";
roles[BytesTotalRole] = "bytesTotal";
@ -285,6 +350,7 @@ public:
roles[QuantRole] = "quant";
roles[TypeRole] = "type";
roles[IsCloneRole] = "isClone";
roles[IsDiscoveredRole] = "isDiscovered";
roles[TemperatureRole] = "temperature";
roles[TopPRole] = "topP";
roles[MinPRole] = "minP";
@ -297,6 +363,9 @@ public:
roles[RepeatPenaltyTokensRole] = "repeatPenaltyTokens";
roles[PromptTemplateRole] = "promptTemplate";
roles[SystemPromptRole] = "systemPrompt";
roles[LikesRole] = "likes";
roles[DownloadsRole] = "downloads";
roles[RecencyRole] = "recency";
return roles;
}
@ -306,6 +375,7 @@ public:
QVariant dataByFilename(const QString &filename, int role) const;
void updateData(const QString &id, int role, const QVariant &value);
void updateDataByFilename(const QString &filename, int role, const QVariant &value);
void updateData(const QString &id, const QVector<QPair<int, QVariant>> &data);
int count() const { return m_models.size(); }
@ -315,7 +385,8 @@ public:
Q_INVOKABLE ModelInfo modelInfoByFilename(const QString &filename) const;
Q_INVOKABLE bool isUniqueName(const QString &name) const;
Q_INVOKABLE QString clone(const ModelInfo &model);
Q_INVOKABLE void remove(const ModelInfo &model);
Q_INVOKABLE void removeClone(const ModelInfo &model);
Q_INVOKABLE void removeInstalled(const ModelInfo &model);
ModelInfo defaultModelInfo() const;
int defaultEmbeddingModelIndex() const;
@ -345,6 +416,21 @@ public:
bool asyncModelRequestOngoing() const { return m_asyncModelRequestOngoing; }
void updateModelsFromDirectory();
void updateDiscoveredInstalled(const ModelInfo &info);
int discoverLimit() const;
void setDiscoverLimit(int limit);
int discoverSortDirection() const;
void setDiscoverSortDirection(int direction); // -1 or 1
DiscoverSort discoverSort() const;
void setDiscoverSort(DiscoverSort sort);
float discoverProgress() const;
bool discoverInProgress() const;
Q_INVOKABLE void discoverSearch(const QString &discover);
Q_SIGNALS:
void countChanged();
@ -354,22 +440,35 @@ Q_SIGNALS:
void userDefaultModelListChanged();
void asyncModelRequestOngoingChanged();
void defaultEmbeddingModelIndexChanged();
void discoverLimitChanged();
void discoverSortDirectionChanged();
void discoverSortChanged();
void discoverProgressChanged();
void discoverInProgressChanged();
private Q_SLOTS:
void resortModel();
void updateModelsFromJson();
void updateModelsFromJsonAsync();
void updateModelsFromSettings();
void updateDataForSettings();
void handleModelsJsonDownloadFinished();
void handleModelsJsonDownloadErrorOccurred(QNetworkReply::NetworkError code);
void handleDiscoveryFinished();
void handleDiscoveryErrorOccurred(QNetworkReply::NetworkError code);
void handleDiscoveryItemFinished();
void handleDiscoveryItemErrorOccurred(QNetworkReply::NetworkError code);
void handleSslErrors(QNetworkReply *reply, const QList<QSslError> &errors);
private:
void removeInternal(const ModelInfo &model);
void clearDiscoveredModels();
QString modelDirPath(const QString &modelName, bool isOnline);
int indexForModel(ModelInfo *model);
QVariant dataInternal(const ModelInfo *info, int role) const;
static bool lessThan(const ModelInfo* a, const ModelInfo* b);
static bool lessThan(const ModelInfo* a, const ModelInfo* b, DiscoverSort s, int d);
void parseModelsJsonFile(const QByteArray &jsonData, bool save);
void parseDiscoveryJsonFile(const QByteArray &jsonData);
QString uniqueModelName(const ModelInfo &model) const;
private:
@ -381,8 +480,14 @@ private:
QList<ModelInfo*> m_models;
QHash<QString, ModelInfo*> m_modelMap;
bool m_asyncModelRequestOngoing;
int m_discoverLimit;
int m_discoverSortDirection;
DiscoverSort m_discoverSort;
int m_discoverNumberOfResults;
int m_discoverResultsCompleted;
bool m_discoverInProgress;
private:
protected:
explicit ModelList();
~ModelList() {}
friend class MyModelList;

@ -140,12 +140,10 @@ void MySettings::setModelName(const ModelInfo &m, const QString &name, bool forc
return;
QSettings setting;
if ((m.m_name == name || m.m_filename == name) && !m.isClone)
if ((m.m_name == name || m.m_filename == name) && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/name");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/name", name);
if (m.isClone)
setting.setValue(QString("model-%1").arg(m.id()) + "/isClone", "true");
setting.sync();
if (!force)
emit nameChanged(m);
@ -164,7 +162,7 @@ void MySettings::setModelFilename(const ModelInfo &m, const QString &filename, b
return;
QSettings setting;
if (m.m_filename == filename && !m.isClone)
if (m.m_filename == filename && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/filename");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/filename", filename);
@ -173,6 +171,186 @@ void MySettings::setModelFilename(const ModelInfo &m, const QString &filename, b
emit filenameChanged(m);
}
QString MySettings::modelDescription(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/description", m.m_description).toString();
}
void MySettings::setModelDescription(const ModelInfo &m, const QString &d, bool force)
{
if ((modelDescription(m) == d || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_description == d && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/description");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/description", d);
setting.sync();
}
QString MySettings::modelUrl(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/url", m.m_url).toString();
}
void MySettings::setModelUrl(const ModelInfo &m, const QString &u, bool force)
{
if ((modelUrl(m) == u || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_url == u && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/url");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/url", u);
setting.sync();
}
QString MySettings::modelQuant(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/quant", m.m_quant).toString();
}
void MySettings::setModelQuant(const ModelInfo &m, const QString &q, bool force)
{
if ((modelUrl(m) == q || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_quant == q && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/quant");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/quant", q);
setting.sync();
}
QString MySettings::modelType(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/type", m.m_type).toString();
}
void MySettings::setModelType(const ModelInfo &m, const QString &t, bool force)
{
if ((modelType(m) == t || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_type == t && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/type");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/type", t);
setting.sync();
}
bool MySettings::modelIsClone(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/isClone", m.m_isClone).toBool();
}
void MySettings::setModelIsClone(const ModelInfo &m, bool b, bool force)
{
if ((modelIsClone(m) == b || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_isClone == b && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/isClone");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/isClone", b);
setting.sync();
}
bool MySettings::modelIsDiscovered(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/isDiscovered", m.m_isDiscovered).toBool();
}
void MySettings::setModelIsDiscovered(const ModelInfo &m, bool b, bool force)
{
if ((modelIsDiscovered(m) == b || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_isDiscovered == b && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/isDiscovered");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/isDiscovered", b);
setting.sync();
}
int MySettings::modelLikes(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/likes", m.m_likes).toInt();
}
void MySettings::setModelLikes(const ModelInfo &m, int l, bool force)
{
if ((modelLikes(m) == l || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_likes == l && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/likes");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/likes", l);
setting.sync();
}
int MySettings::modelDownloads(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/downloads", m.m_downloads).toInt();
}
void MySettings::setModelDownloads(const ModelInfo &m, int d, bool force)
{
if ((modelDownloads(m) == d || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_downloads == d && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/downloads");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/downloads", d);
setting.sync();
}
QDateTime MySettings::modelRecency(const ModelInfo &m) const
{
QSettings setting;
setting.sync();
return setting.value(QString("model-%1").arg(m.id()) + "/recency", m.m_recency).toDateTime();
}
void MySettings::setModelRecency(const ModelInfo &m, const QDateTime &r, bool force)
{
if ((modelRecency(m) == r || m.id().isEmpty()) && !force)
return;
QSettings setting;
if (m.m_recency == r && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/recency");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/recency", r);
setting.sync();
}
double MySettings::modelTemperature(const ModelInfo &m) const
{
QSettings setting;
@ -186,7 +364,7 @@ void MySettings::setModelTemperature(const ModelInfo &m, double t, bool force)
return;
QSettings setting;
if (m.m_temperature == t && !m.isClone)
if (m.m_temperature == t && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/temperature");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/temperature", t);
@ -215,7 +393,7 @@ void MySettings::setModelTopP(const ModelInfo &m, double p, bool force)
return;
QSettings setting;
if (m.m_topP == p && !m.isClone)
if (m.m_topP == p && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/topP");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/topP", p);
@ -230,7 +408,7 @@ void MySettings::setModelMinP(const ModelInfo &m, double p, bool force)
return;
QSettings setting;
if (m.m_minP == p && !m.isClone)
if (m.m_minP == p && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/minP");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/minP", p);
@ -252,7 +430,7 @@ void MySettings::setModelTopK(const ModelInfo &m, int k, bool force)
return;
QSettings setting;
if (m.m_topK == k && !m.isClone)
if (m.m_topK == k && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/topK");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/topK", k);
@ -274,7 +452,7 @@ void MySettings::setModelMaxLength(const ModelInfo &m, int l, bool force)
return;
QSettings setting;
if (m.m_maxLength == l && !m.isClone)
if (m.m_maxLength == l && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/maxLength");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/maxLength", l);
@ -296,7 +474,7 @@ void MySettings::setModelPromptBatchSize(const ModelInfo &m, int s, bool force)
return;
QSettings setting;
if (m.m_promptBatchSize == s && !m.isClone)
if (m.m_promptBatchSize == s && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/promptBatchSize");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/promptBatchSize", s);
@ -318,7 +496,7 @@ void MySettings::setModelContextLength(const ModelInfo &m, int l, bool force)
return;
QSettings setting;
if (m.m_contextLength == l && !m.isClone)
if (m.m_contextLength == l && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/contextLength");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/contextLength", l);
@ -340,7 +518,7 @@ void MySettings::setModelGpuLayers(const ModelInfo &m, int l, bool force)
return;
QSettings setting;
if (m.m_gpuLayers == l && !m.isClone)
if (m.m_gpuLayers == l && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/gpuLayers");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/gpuLayers", l);
@ -362,7 +540,7 @@ void MySettings::setModelRepeatPenalty(const ModelInfo &m, double p, bool force)
return;
QSettings setting;
if (m.m_repeatPenalty == p && !m.isClone)
if (m.m_repeatPenalty == p && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/repeatPenalty");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/repeatPenalty", p);
@ -384,7 +562,7 @@ void MySettings::setModelRepeatPenaltyTokens(const ModelInfo &m, int t, bool for
return;
QSettings setting;
if (m.m_repeatPenaltyTokens == t && !m.isClone)
if (m.m_repeatPenaltyTokens == t && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/repeatPenaltyTokens");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/repeatPenaltyTokens", t);
@ -406,7 +584,7 @@ void MySettings::setModelPromptTemplate(const ModelInfo &m, const QString &t, bo
return;
QSettings setting;
if (m.m_promptTemplate == t && !m.isClone)
if (m.m_promptTemplate == t && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/promptTemplate");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/promptTemplate", t);
@ -428,7 +606,7 @@ void MySettings::setModelSystemPrompt(const ModelInfo &m, const QString &p, bool
return;
QSettings setting;
if (m.m_systemPrompt == p && !m.isClone)
if (m.m_systemPrompt == p && !m.shouldSaveMetadata())
setting.remove(QString("model-%1").arg(m.id()) + "/systemPrompt");
else
setting.setValue(QString("model-%1").arg(m.id()) + "/systemPrompt", p);

@ -43,6 +43,26 @@ public:
Q_INVOKABLE void setModelName(const ModelInfo &m, const QString &name, bool force = false);
QString modelFilename(const ModelInfo &m) const;
Q_INVOKABLE void setModelFilename(const ModelInfo &m, const QString &filename, bool force = false);
QString modelDescription(const ModelInfo &m) const;
void setModelDescription(const ModelInfo &m, const QString &d, bool force = false);
QString modelUrl(const ModelInfo &m) const;
void setModelUrl(const ModelInfo &m, const QString &u, bool force = false);
QString modelQuant(const ModelInfo &m) const;
void setModelQuant(const ModelInfo &m, const QString &q, bool force = false);
QString modelType(const ModelInfo &m) const;
void setModelType(const ModelInfo &m, const QString &t, bool force = false);
bool modelIsClone(const ModelInfo &m) const;
void setModelIsClone(const ModelInfo &m, bool b, bool force = false);
bool modelIsDiscovered(const ModelInfo &m) const;
void setModelIsDiscovered(const ModelInfo &m, bool b, bool force = false);
int modelLikes(const ModelInfo &m) const;
void setModelLikes(const ModelInfo &m, int l, bool force = false);
int modelDownloads(const ModelInfo &m) const;
void setModelDownloads(const ModelInfo &m, int d, bool force = false);
QDateTime modelRecency(const ModelInfo &m) const;
void setModelRecency(const ModelInfo &m, const QDateTime &r, bool force = false);
double modelTemperature(const ModelInfo &m) const;
Q_INVOKABLE void setModelTemperature(const ModelInfo &m, double t, bool force = false);
double modelTopP(const ModelInfo &m) const;
@ -112,7 +132,6 @@ public:
bool networkUsageStatsActive() const;
void setNetworkUsageStatsActive(bool b);
QVector<QString> deviceList() const;
void setDeviceList(const QVector<QString> &deviceList);

@ -41,17 +41,222 @@ MyDialog {
Label {
id: listLabel
text: qsTr("Available Models")
visible: false
Layout.alignment: Qt.AlignLeft
text: qsTr("Discover and Download Models")
visible: true
Layout.fillWidth: true
horizontalAlignment: Qt.AlignHCenter
verticalAlignment: Qt.AlignVCenter
color: theme.titleTextColor
font.pixelSize: theme.fontSizeLarge
font.pixelSize: theme.fontSizeLargest
font.bold: true
}
Item {
height: 0 // for visible space between close button and rest of dialog
RowLayout {
Layout.fillWidth: true
Layout.alignment: Qt.AlignCenter
Layout.margins: 0
spacing: 10
MyTextField {
id: discoverField
property string textBeingSearched: ""
readOnly: ModelList.discoverInProgress
Layout.alignment: Qt.AlignCenter
Layout.preferredWidth: 720
Layout.preferredHeight: 90
font.pixelSize: theme.fontSizeLarger
placeholderText: qsTr("Discover and download models by keyword search...")
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Text field for discovering and filtering downloadable models")
Connections {
target: ModelList
function onDiscoverInProgressChanged() {
if (ModelList.discoverInProgress) {
discoverField.textBeingSearched = discoverField.text;
discoverField.text = qsTr("Searching \u00B7 ") + discoverField.textBeingSearched;
} else {
discoverField.text = discoverField.textBeingSearched;
discoverField.textBeingSearched = "";
}
}
}
background: ProgressBar {
id: discoverProgressBar
indeterminate: ModelList.discoverInProgress && ModelList.discoverProgress === 0.0
value: ModelList.discoverProgress
background: Rectangle {
color: theme.controlBackground
radius: 10
}
contentItem: Item {
Rectangle {
visible: ModelList.discoverInProgress
anchors.bottom: parent.bottom
width: discoverProgressBar.visualPosition * parent.width
height: 10
radius: 2
color: theme.progressForeground
}
}
}
Keys.onReturnPressed: (event)=> {
if (event.modifiers & Qt.ControlModifier || event.modifiers & Qt.ShiftModifier)
event.accepted = false;
else {
editingFinished();
sendDiscovery()
}
}
function sendDiscovery() {
ModelList.downloadableModels.discoverAndFilter(discoverField.text);
}
RowLayout {
spacing: 0
anchors.right: discoverField.right
anchors.verticalCenter: discoverField.verticalCenter
anchors.rightMargin: 15
visible: !ModelList.discoverInProgress
MyMiniButton {
id: clearDiscoverButton
backgroundColor: theme.textColor
backgroundColorHovered: theme.iconBackgroundDark
visible: discoverField.text !== ""
contentItem: Text {
color: clearDiscoverButton.hovered ? theme.iconBackgroundDark : theme.textColor
text: "\u2715"
font.pixelSize: theme.fontSizeLarge
}
onClicked: {
discoverField.text = ""
discoverField.sendDiscovery() // should clear results
}
}
MyMiniButton {
backgroundColor: theme.textColor
backgroundColorHovered: theme.iconBackgroundDark
source: "qrc:/gpt4all/icons/settings.svg"
onClicked: {
discoveryTools.visible = !discoveryTools.visible
}
}
MyMiniButton {
id: sendButton
enabled: !ModelList.discoverInProgress
backgroundColor: theme.textColor
backgroundColorHovered: theme.iconBackgroundDark
source: "qrc:/gpt4all/icons/send_message.svg"
Accessible.name: qsTr("Initiate model discovery and filtering")
Accessible.description: qsTr("Triggers discovery and filtering of models")
onClicked: {
discoverField.sendDiscovery()
}
}
}
}
}
RowLayout {
id: discoveryTools
Layout.fillWidth: true
Layout.alignment: Qt.AlignCenter
Layout.margins: 0
spacing: 20
visible: false
MyComboBox {
id: comboSort
model: [qsTr("Default"), qsTr("Likes"), qsTr("Downloads"), qsTr("Recent")]
currentIndex: ModelList.discoverSort
contentItem: Text {
anchors.horizontalCenter: parent.horizontalCenter
rightPadding: 30
color: theme.textColor
text: {
return qsTr("Sort by: ") + comboSort.displayText
}
font.pixelSize: theme.fontSizeLarger
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
elide: Text.ElideRight
}
onActivated: function (index) {
ModelList.discoverSort = index;
}
}
MyComboBox {
id: comboSortDirection
model: [qsTr("Asc"), qsTr("Desc")]
currentIndex: {
if (ModelList.discoverSortDirection === 1)
return 0
else
return 1;
}
contentItem: Text {
anchors.horizontalCenter: parent.horizontalCenter
rightPadding: 30
color: theme.textColor
text: {
return qsTr("Sort dir: ") + comboSortDirection.displayText
}
font.pixelSize: theme.fontSizeLarger
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
elide: Text.ElideRight
}
onActivated: function (index) {
if (index === 0)
ModelList.discoverSortDirection = 1;
else
ModelList.discoverSortDirection = -1;
}
}
MyComboBox {
id: comboLimit
model: ["5", "10", "20", "50", "100", qsTr("None")]
currentIndex: {
if (ModelList.discoverLimit === 5)
return 0;
else if (ModelList.discoverLimit === 10)
return 1;
else if (ModelList.discoverLimit === 20)
return 2;
else if (ModelList.discoverLimit === 50)
return 3;
else if (ModelList.discoverLimit === 100)
return 4;
else if (ModelList.discoverLimit === -1)
return 5;
}
contentItem: Text {
anchors.horizontalCenter: parent.horizontalCenter
rightPadding: 30
color: theme.textColor
text: {
return qsTr("Limit: ") + comboLimit.displayText
}
font.pixelSize: theme.fontSizeLarger
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
elide: Text.ElideRight
}
onActivated: function (index) {
switch (index) {
case 0:
ModelList.discoverLimit = 5; break;
case 1:
ModelList.discoverLimit = 10; break;
case 2:
ModelList.discoverLimit = 20; break;
case 3:
ModelList.discoverLimit = 50; break;
case 4:
ModelList.discoverLimit = 100; break;
case 5:
ModelList.discoverLimit = -1; break;
}
}
}
}
Label {
@ -60,7 +265,7 @@ MyDialog {
Layout.fillHeight: true
horizontalAlignment: Qt.AlignHCenter
verticalAlignment: Qt.AlignVCenter
text: qsTr("Network error: could not retrieve http://gpt4all.io/models/models2.json")
text: qsTr("Network error: could not retrieve http://gpt4all.io/models/models3.json")
font.pixelSize: theme.fontSizeLarge
color: theme.mutedTextColor
}
@ -213,15 +418,11 @@ MyDialog {
Layout.leftMargin: 20
textFormat: Text.StyledText
text: "<strong><font size=\"1\">"
+ (qsTr("Download size: ") + filesize)
+ "<br>"
+ (qsTr("RAM required: ") + (ramrequired > 0 ? ramrequired + " GB" : qsTr("minimal")))
+ "<br>"
+ (qsTr("Parameters: ") + parameters)
+ "<br>"
+ (qsTr("Quantization: ") + quant)
+ "<br>"
+ (qsTr("Type: ") + type)
+ (qsTr("File size: ") + filesize)
+ (ramrequired < 0 ? "" : "<br>" + (qsTr("RAM required: ") + (ramrequired > 0 ? ramrequired + " GB" : qsTr("minimal"))))
+ (parameters === "" ? "" : "<br>" + qsTr("Parameters: ") + parameters)
+ (quant === "" ? "" : "<br>" + (qsTr("Quantization: ") + quant))
+ (type === "" ? "" : "<br>" + (qsTr("Type: ") + type))
+ "</strong></font>"
color: theme.textColor
font.pixelSize: theme.fontSizeLarge
@ -350,11 +551,6 @@ MyDialog {
Accessible.role: Accessible.EditableText
Accessible.name: placeholderText
Accessible.description: qsTr("Whether the file hash is being calculated")
TextMetrics {
id: textMetrics
font: apiKey.font
text: apiKey.placeholderText
}
}
}
}

@ -82,7 +82,7 @@ MySettingsTab {
enabled: root.currentModelInfo.isClone
text: qsTr("Remove")
onClicked: {
ModelList.remove(root.currentModelInfo);
ModelList.removeClone(root.currentModelInfo);
comboBox.currentIndex = 0;
}
}
@ -453,7 +453,7 @@ MySettingsTab {
Accessible.description: ToolTip.text
}
MySettingsLabel {
id: minPLabel
id: minPLabel
text: qsTr("Min P")
Layout.row: 3
Layout.column: 0

@ -11,8 +11,8 @@ Button {
property color backgroundColorHovered: theme.iconBackgroundHovered
property alias source: image.source
property alias fillMode: image.fillMode
width: 30
height: 30
implicitWidth: 30
implicitHeight: 30
contentItem: Text {
text: myButton.text
horizontalAlignment: Text.AlignHCenter

Loading…
Cancel
Save