many chains (#8)

* many chains

* add summarize

* reorganize
pull/12/head
Samantha Whitmore 1 year ago committed by GitHub
parent b2fe66a10e
commit ad6e2b0755
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,29 @@
{
"memory": null,
"verbose": true,
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"input_key": "question",
"output_key": "answer",
"prompt": {
"input_variables": [
"question"
],
"output_parser": null,
"template": "If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n\nQuestion: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n\nI need to take the following actions:\n- List all files in the directory\n- Create a new directory\n- Copy the files from the first directory into the second directory\n```bash\nls\nmkdir myNewDirectory\ncp -r target/* myNewDirectory\n```\n\nThat is the format. Begin!\n\nQuestion: {question}",
"template_format": "f-string",
"_type": "prompt"
},
"_type": "llm_bash_chain"
}

@ -0,0 +1,57 @@
{
"memory": null,
"verbose": true,
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.7,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"create_draft_answer_prompt": {
"input_variables": [
"question"
],
"output_parser": null,
"template": "{question}\n\n",
"template_format": "f-string",
"_type": "prompt"
},
"list_assertions_prompt": {
"input_variables": [
"statement"
],
"output_parser": null,
"template": "Here is a statement:\n{statement}\nMake a bullet point list of the assumptions you made when producing the above statement.\n\n",
"template_format": "f-string",
"_type": "prompt"
},
"check_assertions_prompt": {
"input_variables": [
"assertions"
],
"output_parser": null,
"template": "Here is a bullet point list of assertions:\n{assertions}\nFor each assertion, determine whether it is true or false. If it is false, explain why.\n\n",
"template_format": "f-string",
"_type": "prompt"
},
"revised_answer_prompt": {
"input_variables": [
"checked_assertions",
"question"
],
"output_parser": null,
"template": "{checked_assertions}\n\nQuestion: In light of the above assertions and checks, how would you answer the question '{question}'?\n\nAnswer:",
"template_format": "f-string",
"_type": "prompt"
},
"input_key": "query",
"output_key": "result",
"_type": "llm_checker_chain"
}

@ -0,0 +1,29 @@
{
"memory": null,
"verbose": true,
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"prompt": {
"input_variables": [
"question"
],
"output_parser": null,
"template": "You are GPT-3, and you can't do math.\n\nYou can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.\n\nSo we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we\u2019ll take care of the rest:\n\nQuestion: ${{Question with hard calculation.}}\n```python\n${{Code that prints what you need to know}}\n```\n```output\n${{Output of your code}}\n```\nAnswer: ${{Answer}}\n\nOtherwise, use this simpler format:\n\nQuestion: ${{Question without hard calculation}}\nAnswer: ${{Answer}}\n\nBegin.\n\nQuestion: What is 37593 * 67?\n\n```python\nprint(37593 * 67)\n```\n```output\n2518731\n```\nAnswer: 2518731\n\nQuestion: {question}\n",
"template_format": "f-string",
"_type": "prompt"
},
"input_key": "question",
"output_key": "answer",
"_type": "llm_math_chain"
}

@ -0,0 +1,38 @@
{
"memory": null,
"verbose": false,
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"query",
"requests_result"
],
"output_parser": null,
"template": "Between >>> and <<< are the raw search result text from google.\nExtract the answer to the question '{query}' or say \"not found\" if the information is not contained.\nUse the format\nExtracted:<answer or \"not found\">\n>>> {requests_result} <<<\nExtracted:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"text_length": 8000,
"requests_key": "requests_result",
"input_key": "url",
"output_key": "output",
"_type": "llm_requests_chain"
}

File diff suppressed because one or more lines are too long

@ -0,0 +1,49 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"context",
"question"
],
"output_parser": {
"regex": "(.*?)\\nScore: (.*)",
"output_keys": [
"answer",
"score"
],
"default_output_key": null,
"_type": "regex_parser"
},
"template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nIn addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better answer\n- Better responds fully to the asked question, with sufficient level of detail\n- If you do not know the answer based on the context, that should be a score of 0\n- Don't be overconfident!\n\nExample #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question}\nHelpful Answer:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_variable_name": "context",
"rank_key": "score",
"answer_key": "answer",
"metadata_keys": null,
"return_intermediate_steps": true,
"_type": "map_rerank_documents_chain"
}

@ -0,0 +1,68 @@
{
"memory": null,
"verbose": true,
"api_request_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"api_docs",
"question"
],
"output_parser": null,
"template": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"api_answer_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"api_docs",
"question",
"api_url",
"api_response"
],
"output_parser": null,
"template": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url: {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"api_docs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t\u00b0C (\u00b0F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0\u00b0C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.",
"question_key": "question",
"output_key": "output",
"_type": "api_chain"
}

@ -0,0 +1,30 @@
{
"memory": null,
"verbose": true,
"llm": {
"model_name": "code-davinci-002",
"temperature": 0.0,
"max_tokens": 512,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"prompt": {
"input_variables": [
"question"
],
"output_parser": null,
"template": "Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\"\"\"\n money_initial = 23\n bagels = 5\n bagel_cost = 3\n money_spent = bagels * bagel_cost\n money_left = money_initial - money_spent\n result = money_left\n return result\n\n\n\n\n\nQ: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\"\"\"\n golf_balls_initial = 58\n golf_balls_lost_tuesday = 23\n golf_balls_lost_wednesday = 2\n golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday\n result = golf_balls_left\n return result\n\n\n\n\n\nQ: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\"\"\"\n computers_initial = 9\n computers_per_day = 5\n num_days = 4 # 4 days between monday and thursday\n computers_added = computers_per_day * num_days\n computers_total = computers_initial + computers_added\n result = computers_total\n return result\n\n\n\n\n\nQ: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\"\"\"\n toys_initial = 5\n mom_toys = 2\n dad_toys = 2\n total_received = mom_toys + dad_toys\n total_toys = toys_initial + total_received\n result = total_toys\n return result\n\n\n\n\n\nQ: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\"\"\"\n jason_lollipops_initial = 20\n jason_lollipops_after = 12\n denny_lollipops = jason_lollipops_initial - jason_lollipops_after\n result = denny_lollipops\n return result\n\n\n\n\n\nQ: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\"\"\"\n leah_chocolates = 32\n sister_chocolates = 42\n total_chocolates = leah_chocolates + sister_chocolates\n chocolates_eaten = 35\n chocolates_left = total_chocolates - chocolates_eaten\n result = chocolates_left\n return result\n\n\n\n\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\"\"\"\n cars_initial = 3\n cars_arrived = 2\n total_cars = cars_initial + cars_arrived\n result = total_cars\n return result\n\n\n\n\n\nQ: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\"\"\"\n trees_initial = 15\n trees_after = 21\n trees_added = trees_after - trees_initial\n result = trees_added\n return result\n\n\n\n\n\nQ: {question}\n\n# solution in Python:\n\n\n",
"template_format": "f-string",
"_type": "prompt"
},
"stop": "\n\n",
"get_answer_expr": "print(solution())",
"output_key": "result",
"_type": "pal_chain"
}

File diff suppressed because one or more lines are too long

@ -0,0 +1,51 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"context",
"question"
],
"output_parser": {
"regex": "(.*?)\\nScore: (.*)",
"output_keys": [
"answer",
"score"
],
"default_output_key": null,
"_type": "regex_parser"
},
"template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\nIn addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:\n\nQuestion: [question here]\nHelpful Answer: [answer here]\nScore: [score between 0 and 100]\n\nHow to determine the score:\n- Higher is a better answer\n- Better responds fully to the asked question, with sufficient level of detail\n- If you do not know the answer based on the context, that should be a score of 0\n- Don't be overconfident!\n\nExample #1\n\nContext:\n---------\nApples are red\n---------\nQuestion: what color are apples?\nHelpful Answer: red\nScore: 100\n\nExample #2\n\nContext:\n---------\nit was night and the witness forgot his glasses. he was not sure if it was a sports car or an suv\n---------\nQuestion: what type was the car?\nHelpful Answer: a sports car or an suv\nScore: 60\n\nExample #3\n\nContext:\n---------\nPears are either red or orange\n---------\nQuestion: what color are apples?\nHelpful Answer: This document does not answer the question\nScore: 0\n\nBegin!\n\nContext:\n---------\n{context}\n---------\nQuestion: {question}\nHelpful Answer:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_variable_name": "context",
"rank_key": "score",
"answer_key": "answer",
"metadata_keys": [
"source"
],
"return_intermediate_steps": true,
"_type": "map_rerank_documents_chain"
}

@ -0,0 +1,79 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"initial_llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"context_str",
"question"
],
"output_parser": null,
"template": "Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {question}\n",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"refine_llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"question",
"existing_answer",
"context_str"
],
"output_parser": null,
"template": "The original question is as follows: {question}\nWe have provided an existing answer, including sources: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If you do update it, please update the sources as well. If the context isn't useful, return the original answer.",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_variable_name": "context_str",
"initial_response_name": "existing_answer",
"document_prompt": {
"input_variables": [
"page_content",
"source"
],
"output_parser": null,
"template": "Content: {page_content}\nSource: {source}",
"template_format": "f-string",
"_type": "prompt"
},
"return_intermediate_steps": false,
"_type": "refine_documents_chain"
}

File diff suppressed because one or more lines are too long

@ -0,0 +1,78 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"initial_llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"context_str",
"question"
],
"output_parser": null,
"template": "Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {question}\n",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"refine_llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"question",
"existing_answer",
"context_str"
],
"output_parser": null,
"template": "The original question is as follows: {question}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_variable_name": "context_str",
"initial_response_name": "existing_answer",
"document_prompt": {
"input_variables": [
"page_content"
],
"output_parser": null,
"template": "{page_content}",
"template_format": "f-string",
"_type": "prompt"
},
"return_intermediate_steps": false,
"_type": "refine_documents_chain"
}

@ -0,0 +1,46 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"context",
"question"
],
"output_parser": null,
"template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_prompt": {
"input_variables": [
"page_content"
],
"output_parser": null,
"template": "{page_content}",
"template_format": "f-string",
"_type": "prompt"
},
"document_variable_name": "context",
"_type": "stuff_documents_chain"
}

@ -0,0 +1,83 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"text"
],
"output_parser": null,
"template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"combine_document_chain": {
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"text"
],
"output_parser": null,
"template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_prompt": {
"input_variables": [
"page_content"
],
"output_parser": null,
"template": "{page_content}",
"template_format": "f-string",
"_type": "prompt"
},
"document_variable_name": "text",
"_type": "stuff_documents_chain"
},
"collapse_document_chain": null,
"document_variable_name": "text",
"return_intermediate_steps": false,
"_type": "map_reduce_documents_chain"
}

@ -0,0 +1,76 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"initial_llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"text"
],
"output_parser": null,
"template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"refine_llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"existing_answer",
"text"
],
"output_parser": null,
"template": "Your job is to produce a final summary\nWe have provided an existing summary up to a certain point: {existing_answer}\nWe have the opportunity to refine the existing summary(only if needed) with some more context below.\n------------\n{text}\n------------\nGiven the new context, refine the original summaryIf the context isn't useful, return the original summary.",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_variable_name": "text",
"initial_response_name": "existing_answer",
"document_prompt": {
"input_variables": [
"page_content"
],
"output_parser": null,
"template": "{page_content}",
"template_format": "f-string",
"_type": "prompt"
},
"return_intermediate_steps": false,
"_type": "refine_documents_chain"
}

@ -0,0 +1,45 @@
{
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"text"
],
"output_parser": null,
"template": "Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.0,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_prompt": {
"input_variables": [
"page_content"
],
"output_parser": null,
"template": "{page_content}",
"template_format": "f-string",
"_type": "prompt"
},
"document_variable_name": "text",
"_type": "stuff_documents_chain"
}

File diff suppressed because one or more lines are too long

@ -0,0 +1,56 @@
{
"memory": null,
"verbose": false,
"k": 4,
"combine_documents_chain": {
"memory": null,
"verbose": false,
"input_key": "input_documents",
"output_key": "output_text",
"llm_chain": {
"memory": null,
"verbose": false,
"prompt": {
"input_variables": [
"context",
"question"
],
"output_parser": null,
"template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:",
"template_format": "f-string",
"_type": "prompt"
},
"llm": {
"model_name": "text-davinci-003",
"temperature": 0.7,
"max_tokens": 256,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"n": 1,
"best_of": 1,
"request_timeout": null,
"logit_bias": {},
"_type": "openai"
},
"output_key": "text",
"_type": "llm_chain"
},
"document_prompt": {
"input_variables": [
"page_content"
],
"output_parser": null,
"template": "{page_content}",
"template_format": "f-string",
"_type": "prompt"
},
"document_variable_name": "context",
"_type": "stuff_documents_chain"
},
"input_key": "query",
"output_key": "result",
"return_source_documents": false,
"search_kwargs": {},
"_type": "vector_db_qa"
}
Loading…
Cancel
Save