docs: use standard openai params (#20160)

Part of #20085
pull/20161/head
Bagatur 2 months ago committed by GitHub
parent e1a24d09c5
commit 5ae0e687b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -59,7 +59,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=1.0)" "llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)"
] ]
}, },
{ {

@ -84,7 +84,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)" "chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
] ]
}, },

@ -229,7 +229,7 @@
" prompt = hub.pull(\"rlm/rag-prompt\")\n", " prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n", "\n",
" # LLM\n", " # LLM\n",
" llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n", " llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n",
"\n", "\n",
" # Post-processing\n", " # Post-processing\n",
" def format_docs(docs):\n", " def format_docs(docs):\n",

@ -236,7 +236,7 @@
" prompt = hub.pull(\"rlm/rag-prompt\")\n", " prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n", "\n",
" # LLM\n", " # LLM\n",
" llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", " llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n", "\n",
" # Post-processing\n", " # Post-processing\n",
" def format_docs(docs):\n", " def format_docs(docs):\n",

@ -84,7 +84,7 @@
"from langchain.retrievers import KayAiRetriever\n", "from langchain.retrievers import KayAiRetriever\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"retriever = KayAiRetriever.create(\n", "retriever = KayAiRetriever.create(\n",
" dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n", " dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n",
")\n", ")\n",

@ -274,7 +274,7 @@
"db = SQLDatabase.from_uri(\n", "db = SQLDatabase.from_uri(\n",
" CONNECTION_STRING\n", " CONNECTION_STRING\n",
") # We reconnect to db so the new columns are loaded as well.\n", ") # We reconnect to db so the new columns are loaded as well.\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"\n", "\n",
"sql_query_chain = (\n", "sql_query_chain = (\n",
" RunnablePassthrough.assign(schema=get_schema)\n", " RunnablePassthrough.assign(schema=get_schema)\n",

@ -3811,7 +3811,7 @@
"from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
] ]
}, },

@ -424,7 +424,7 @@
" DialogueAgentWithTools(\n", " DialogueAgentWithTools(\n",
" name=name,\n", " name=name,\n",
" system_message=SystemMessage(content=system_message),\n", " system_message=SystemMessage(content=system_message),\n",
" model=ChatOpenAI(model_name=\"gpt-4\", temperature=0.2),\n", " model=ChatOpenAI(model=\"gpt-4\", temperature=0.2),\n",
" tool_names=tools,\n", " tool_names=tools,\n",
" top_k_results=2,\n", " top_k_results=2,\n",
" )\n", " )\n",

@ -601,7 +601,7 @@
"source": [ "source": [
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)" "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)"
] ]
}, },
{ {

@ -94,12 +94,12 @@ from langchain_openai import ChatOpenAI
llm = ChatOpenAI() llm = ChatOpenAI()
``` ```
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:
```python ```python
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
llm = ChatOpenAI(openai_api_key="...") llm = ChatOpenAI(api_key="...")
``` ```
</TabItem> </TabItem>
@ -509,7 +509,7 @@ from langchain.agents import AgentExecutor
# Get the prompt to use - you can modify this! # Get the prompt to use - you can modify this!
prompt = hub.pull("hwchase17/openai-functions-agent") prompt = hub.pull("hwchase17/openai-functions-agent")
# You need to set OPENAI_API_KEY environment variable or pass it as argument `openai_api_key`. # You need to set OPENAI_API_KEY environment variable or pass it as argument `api_key`.
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
agent = create_openai_functions_agent(llm, tools, prompt) agent = create_openai_functions_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

@ -27,7 +27,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes
from langchain.agents import AgentType, initialize_agent, load_tools from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4", temperature=0) llm = ChatOpenAI(model="gpt-4", temperature=0)
tools = load_tools(["ddg-search", "llm-math"], llm=llm) tools = load_tools(["ddg-search", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION) agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
``` ```

@ -204,7 +204,7 @@
" ]\n", " ]\n",
")\n", ")\n",
"# Here we're going to use a bad model name to easily create a chain that will error\n", "# Here we're going to use a bad model name to easily create a chain that will error\n",
"chat_model = ChatOpenAI(model_name=\"gpt-fake\")\n", "chat_model = ChatOpenAI(model=\"gpt-fake\")\n",
"bad_chain = chat_prompt | chat_model | StrOutputParser()" "bad_chain = chat_prompt | chat_model | StrOutputParser()"
] ]
}, },

@ -218,7 +218,7 @@
"source": [ "source": [
"# Build a QA chain\n", "# Build a QA chain\n",
"qa_chain = RetrievalQA.from_chain_type(\n", "qa_chain = RetrievalQA.from_chain_type(\n",
" llm=ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0),\n", " llm=ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
" chain_type=\"stuff\",\n", " chain_type=\"stuff\",\n",
" retriever=vectordb.as_retriever(),\n", " retriever=vectordb.as_retriever(),\n",
")" ")"

@ -30,7 +30,7 @@ messages = [
HumanMessage(content="Ping?"), HumanMessage(content="Ping?"),
] ]
llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback]) llm = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[log10_callback])
``` ```
[Log10 + Langchain + Logs docs](https://github.com/log10-io/log10/blob/main/logging.md#langchain-logger) [Log10 + Langchain + Logs docs](https://github.com/log10-io/log10/blob/main/logging.md#langchain-logger)
@ -55,7 +55,7 @@ messages = [
HumanMessage(content="Ping?"), HumanMessage(content="Ping?"),
] ]
llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"]) llm = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"])
completion = llm.predict_messages(messages, tags=["foobar"]) completion = llm.predict_messages(messages, tags=["foobar"])
print(completion) print(completion)

@ -203,7 +203,7 @@
"from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
] ]
}, },

@ -153,7 +153,7 @@
"from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
] ]
}, },

@ -140,7 +140,7 @@
"from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
] ]
}, },

@ -81,7 +81,7 @@
"from langchain_community.retrievers import KayAiRetriever\n", "from langchain_community.retrievers import KayAiRetriever\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"retriever = KayAiRetriever.create(\n", "retriever = KayAiRetriever.create(\n",
" dataset_id=\"company\", data_types=[\"10-K\", \"10-Q\"], num_contexts=6\n", " dataset_id=\"company\", data_types=[\"10-K\", \"10-Q\"], num_contexts=6\n",
")\n", ")\n",

@ -202,7 +202,7 @@
"from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
] ]
}, },

@ -144,7 +144,7 @@
"prompt = ChatPromptTemplate.from_template(template)\n", "prompt = ChatPromptTemplate.from_template(template)\n",
"\n", "\n",
"\"\"\" Obtain a Large Language Model \"\"\"\n", "\"\"\" Obtain a Large Language Model \"\"\"\n",
"LLM = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "LLM = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n", "\n",
"\"\"\" Create a chain for the RAG flow \"\"\"\n", "\"\"\" Create a chain for the RAG flow \"\"\"\n",
"rag_chain = (\n", "rag_chain = (\n",

@ -394,7 +394,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"qa_chain = RetrievalQA.from_chain_type(llm, retriever=vector_db.as_retriever())" "qa_chain = RetrievalQA.from_chain_type(llm, retriever=vector_db.as_retriever())"
] ]
}, },

@ -437,7 +437,7 @@
"source": [ "source": [
"from langchain.chains import ConversationalRetrievalChain\n", "from langchain.chains import ConversationalRetrievalChain\n",
"\n", "\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"memory = ConversationBufferMemory(\n", "memory = ConversationBufferMemory(\n",
" memory_key=\"chat_history\", output_key=\"answer\", return_messages=True\n", " memory_key=\"chat_history\", output_key=\"answer\", return_messages=True\n",
")\n", ")\n",

@ -589,7 +589,7 @@
"source": [ "source": [
"from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.chat_models import ChatOpenAI\n",
"\n", "\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"llm.predict(\"What did the president say about Justice Breyer\")" "llm.predict(\"What did the president say about Justice Breyer\")"
] ]
}, },
@ -824,7 +824,7 @@
"source": [ "source": [
"from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.chat_models import ChatOpenAI\n",
"\n", "\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)" "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
] ]
}, },
{ {

@ -35,12 +35,12 @@ Accessing the API requires an API key, which you can get by creating an account
export OPENAI_API_KEY="..." export OPENAI_API_KEY="..."
``` ```
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:
```python ```python
from langchain_openai import OpenAIEmbeddings from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings(openai_api_key="...") embeddings_model = OpenAIEmbeddings(api_key="...")
``` ```
Otherwise you can initialize without any params: Otherwise you can initialize without any params:

@ -52,7 +52,7 @@
"source": [ "source": [
"```{=mdx}\n", "```{=mdx}\n",
"<ChatModelTabs\n", "<ChatModelTabs\n",
" openaiParams={`model=\"gpt-3.5-turbo-0125\", openai_api_key=\"...\"`}\n", " openaiParams={`model=\"gpt-3.5-turbo-0125\", api_key=\"...\"`}\n",
" anthropicParams={`model=\"claude-3-sonnet-20240229\", anthropic_api_key=\"...\"`}\n", " anthropicParams={`model=\"claude-3-sonnet-20240229\", anthropic_api_key=\"...\"`}\n",
" fireworksParams={`model=\"accounts/fireworks/models/mixtral-8x7b-instruct\", fireworks_api_key=\"...\"`}\n", " fireworksParams={`model=\"accounts/fireworks/models/mixtral-8x7b-instruct\", fireworks_api_key=\"...\"`}\n",
" mistralParams={`model=\"mistral-large-latest\", mistral_api_key=\"...\"`}\n", " mistralParams={`model=\"mistral-large-latest\", mistral_api_key=\"...\"`}\n",

@ -30,7 +30,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = ChatOpenAI(model_name=\"gpt-4\")" "llm = ChatOpenAI(model=\"gpt-4\")"
] ]
}, },
{ {

@ -63,11 +63,11 @@ llm = OpenAI()
chat_model = ChatOpenAI(model="gpt-3.5-turbo-0125") chat_model = ChatOpenAI(model="gpt-3.5-turbo-0125")
``` ```
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:
```python ```python
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
llm = ChatOpenAI(openai_api_key="...") llm = ChatOpenAI(api_key="...")
``` ```
Both `llm` and `chat_model` are objects that represent configuration for a particular model. Both `llm` and `chat_model` are objects that represent configuration for a particular model.

@ -40,7 +40,7 @@
"export OPENAI_API_KEY=\"...\"\n", "export OPENAI_API_KEY=\"...\"\n",
"```\n", "```\n",
"\n", "\n",
"If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:\n", "If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:\n",
"\n" "\n"
] ]
}, },
@ -53,7 +53,7 @@
"source": [ "source": [
"from langchain_openai import OpenAI\n", "from langchain_openai import OpenAI\n",
"\n", "\n",
"llm = OpenAI(openai_api_key=\"...\")" "llm = OpenAI(api_key=\"...\")"
] ]
}, },
{ {

@ -38,11 +38,11 @@ llm = OpenAI()
chat_model = ChatOpenAI(model="gpt-3.5-turbo-0125") chat_model = ChatOpenAI(model="gpt-3.5-turbo-0125")
``` ```
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:
```python ```python
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
llm = ChatOpenAI(openai_api_key="...") llm = ChatOpenAI(api_key="...")
``` ```
</TabItem> </TabItem>

@ -237,7 +237,7 @@
"from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"llm = ChatOpenAI(model_name=\"gpt-4\")\n", "llm = ChatOpenAI(model=\"gpt-4\")\n",
"\n", "\n",
"# First we need a prompt that we can pass into an LLM to generate this search query\n", "# First we need a prompt that we can pass into an LLM to generate this search query\n",
"\n", "\n",

@ -269,7 +269,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# LLM\n", "# LLM\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0.7)\n", "model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0.7)\n",
"chain = create_data_generation_chain(model)" "chain = create_data_generation_chain(model)"
] ]
}, },

@ -151,7 +151,7 @@
"# Retrieve and generate using the relevant snippets of the blog.\n", "# Retrieve and generate using the relevant snippets of the blog.\n",
"retriever = vectorstore.as_retriever()\n", "retriever = vectorstore.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n", "prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n", "\n",
"\n", "\n",
"def format_docs(docs):\n", "def format_docs(docs):\n",
@ -417,7 +417,7 @@
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"\n", "\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n", "\n",
"\n", "\n",
"### Construct retriever ###\n", "### Construct retriever ###\n",

@ -143,7 +143,7 @@
"# Retrieve and generate using the relevant snippets of the blog.\n", "# Retrieve and generate using the relevant snippets of the blog.\n",
"retriever = vectorstore.as_retriever()\n", "retriever = vectorstore.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n", "prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n", "\n",
"\n", "\n",
"def format_docs(docs):\n", "def format_docs(docs):\n",

@ -143,7 +143,7 @@
"# Retrieve and generate using the relevant snippets of the blog.\n", "# Retrieve and generate using the relevant snippets of the blog.\n",
"retriever = vectorstore.as_retriever()\n", "retriever = vectorstore.as_retriever()\n",
"prompt = hub.pull(\"rlm/rag-prompt\")\n", "prompt = hub.pull(\"rlm/rag-prompt\")\n",
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n", "\n",
"\n", "\n",
"def format_docs(docs):\n", "def format_docs(docs):\n",

@ -160,7 +160,7 @@ class ChatOpenAI(BaseChatModel):
.. code-block:: python .. code-block:: python
from langchain_community.chat_models import ChatOpenAI from langchain_community.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo") openai = ChatOpenAI(model="gpt-3.5-turbo")
""" """
@property @property

@ -33,7 +33,7 @@ class PromptLayerChatOpenAI(ChatOpenAI):
.. code-block:: python .. code-block:: python
from langchain_community.chat_models import PromptLayerChatOpenAI from langchain_community.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo") openai = PromptLayerChatOpenAI(model="gpt-3.5-turbo")
""" """
pl_tags: Optional[List[str]] pl_tags: Optional[List[str]]

@ -60,7 +60,7 @@ def create_stuff_documents_chain(
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
[("system", "What are everyone's favorite colors:\\n\\n{context}")] [("system", "What are everyone's favorite colors:\\n\\n{context}")]
) )
llm = ChatOpenAI(model_name="gpt-3.5-turbo") llm = ChatOpenAI(model="gpt-3.5-turbo")
chain = create_stuff_documents_chain(llm, prompt) chain = create_stuff_documents_chain(llm, prompt)
docs = [ docs = [

@ -240,7 +240,7 @@ class ChatOpenAI(BaseChatModel):
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
model = ChatOpenAI(model_name="gpt-3.5-turbo") model = ChatOpenAI(model="gpt-3.5-turbo")
""" """
@property @property

@ -20,8 +20,8 @@ corrector_schema = [
cypher_validation = CypherQueryCorrector(corrector_schema) cypher_validation = CypherQueryCorrector(corrector_schema)
# LLMs # LLMs
cypher_llm = ChatOpenAI(model_name="gpt-4", temperature=0.0) cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0)
qa_llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0) qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0)
# Extract entities from text # Extract entities from text

@ -20,8 +20,8 @@ corrector_schema = [
cypher_validation = CypherQueryCorrector(corrector_schema) cypher_validation = CypherQueryCorrector(corrector_schema)
# LLMs # LLMs
cypher_llm = ChatOpenAI(model_name="gpt-4", temperature=0.0) cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0)
qa_llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0) qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0)
def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory: def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory:

@ -17,8 +17,8 @@ corrector_schema = [
cypher_validation = CypherQueryCorrector(corrector_schema) cypher_validation = CypherQueryCorrector(corrector_schema)
# LLMs # LLMs
cypher_llm = ChatOpenAI(model_name="gpt-4", temperature=0.0) cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0)
qa_llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0) qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0)
# Generate Cypher statement based on natural language input # Generate Cypher statement based on natural language input
cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question: cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question:

@ -203,13 +203,13 @@ class Instruction(BaseModel):
agent_executor = ( agent_executor = (
get_agent_executor(ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0.0)) get_agent_executor(ChatOpenAI(model="gpt-4-1106-preview", temperature=0.0))
.configurable_alternatives( .configurable_alternatives(
ConfigurableField("model_name"), ConfigurableField("model_name"),
default_key="gpt4turbo", default_key="gpt4turbo",
gpt4=get_agent_executor(ChatOpenAI(model_name="gpt-4", temperature=0.0)), gpt4=get_agent_executor(ChatOpenAI(model="gpt-4", temperature=0.0)),
gpt35t=get_agent_executor( gpt35t=get_agent_executor(
ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0), ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0),
), ),
) )
.with_types(input_type=Instruction, output_type=str) .with_types(input_type=Instruction, output_type=str)

@ -47,7 +47,7 @@ prompt = ChatPromptTemplate.from_template(template)
# RAG # RAG
model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
chain = ( chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt | prompt

@ -54,7 +54,7 @@ prompt = ChatPromptTemplate.from_template(template)
# RAG Chain # RAG Chain
model = ChatOpenAI(model_name="gpt-3.5-turbo-16k") model = ChatOpenAI(model="gpt-3.5-turbo-16k")
chain = ( chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
| prompt | prompt

Loading…
Cancel
Save