docs, experimental[patch], langchain[patch], community[patch]: update storage imports (#15429)

ran 
```bash
g grep -l "langchain.vectorstores" | xargs -L 1 sed -i '' "s/langchain\.vectorstores/langchain_community.vectorstores/g"
g grep -l "langchain.document_loaders" | xargs -L 1 sed -i '' "s/langchain\.document_loaders/langchain_community.document_loaders/g"
g grep -l "langchain.chat_loaders" | xargs -L 1 sed -i '' "s/langchain\.chat_loaders/langchain_community.chat_loaders/g"
g grep -l "langchain.document_transformers" | xargs -L 1 sed -i '' "s/langchain\.document_transformers/langchain_community.document_transformers/g"
g grep -l "langchain\.graphs" | xargs -L 1 sed -i '' "s/langchain\.graphs/langchain_community.graphs/g"
g grep -l "langchain\.memory\.chat_message_histories" | xargs -L 1 sed -i '' "s/langchain\.memory\.chat_message_histories/langchain_community.chat_message_histories/g"
gco master libs/langchain/tests/unit_tests/*/test_imports.py
gco master libs/langchain/tests/unit_tests/**/test_public_api.py
```
pull/15433/head
Bagatur 5 months ago committed by GitHub
parent a33d92306c
commit fa5d49f2c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -101,7 +101,7 @@
"If you want to use the provided folder, then simply opt for a [pdf loader](https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf) for the document:\n",
"\n",
"```\n",
"from langchain.document_loaders import PyPDFLoader\n",
"from langchain_community.document_loaders import PyPDFLoader\n",
"loader = PyPDFLoader(path + fname)\n",
"docs = loader.load()\n",
"tables = [] # Ignore w/ basic pdf loader\n",
@ -355,8 +355,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",

@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import PyPDFLoader\n",
"from langchain_community.document_loaders import PyPDFLoader\n",
"\n",
"loader = PyPDFLoader(\"./cj/cj.pdf\")\n",
"docs = loader.load()\n",
@ -344,8 +344,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings import VertexAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",

@ -320,8 +320,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",

@ -375,8 +375,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",

@ -378,8 +378,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings import GPT4AllEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"# The vectorstore to use to index the child chunks\n",

@ -62,7 +62,7 @@
"path = \"/Users/rlm/Desktop/cpi/\"\n",
"\n",
"# Load\n",
"from langchain.document_loaders import PyPDFLoader\n",
"from langchain_community.document_loaders import PyPDFLoader\n",
"\n",
"loader = PyPDFLoader(path + \"cpi.pdf\")\n",
"pdf_pages = loader.load()\n",
@ -132,8 +132,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"baseline = Chroma.from_texts(\n",
" texts=all_splits_pypdf_texts,\n",

@ -29,9 +29,9 @@
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"llm = OpenAI(temperature=0)"
]
@ -69,7 +69,7 @@
}
],
"source": [
"from langchain.document_loaders import TextLoader\n",
"from langchain_community.document_loaders import TextLoader\n",
"\n",
"loader = TextLoader(doc_path)\n",
"documents = loader.load()\n",
@ -99,7 +99,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import WebBaseLoader"
"from langchain_community.document_loaders import WebBaseLoader"
]
},
{

@ -62,8 +62,8 @@
"outputs": [],
"source": [
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{
@ -167,7 +167,7 @@
},
"outputs": [],
"source": [
"from langchain.memory.chat_message_histories import FileChatMessageHistory\n",
"from langchain_community.chat_message_histories import FileChatMessageHistory\n",
"\n",
"agent = AutoGPT.from_llm_and_tools(\n",
" ai_name=\"Tom\",\n",

@ -311,8 +311,8 @@
"# Memory\n",
"import faiss\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"\n",
"embeddings_model = OpenAIEmbeddings()\n",
"embedding_size = 1536\n",

@ -54,7 +54,7 @@
"outputs": [],
"source": [
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.vectorstores import FAISS"
"from langchain_community.vectorstores import FAISS"
]
},
{

@ -63,7 +63,7 @@
"%pip install faiss-cpu > /dev/null\n",
"%pip install google-search-results > /dev/null\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.vectorstores import FAISS"
"from langchain_community.vectorstores import FAISS"
]
},
{

@ -23,9 +23,9 @@
"metadata": {},
"source": [
"1. Prepare data:\n",
" 1. Upload all python project files using the `langchain.document_loaders.TextLoader`. We will call these files the **documents**.\n",
" 1. Upload all python project files using the `langchain_community.document_loaders.TextLoader`. We will call these files the **documents**.\n",
" 2. Split all documents to chunks using the `langchain.text_splitter.CharacterTextSplitter`.\n",
" 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain.vectorstores.DeepLake`\n",
" 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain_community.vectorstores.DeepLake`\n",
"2. Question-Answering:\n",
" 1. Build a chain from `langchain.chat_models.ChatOpenAI` and `langchain.chains.ConversationalRetrievalChain`\n",
" 2. Prepare questions.\n",
@ -166,7 +166,7 @@
}
],
"source": [
"from langchain.document_loaders import TextLoader\n",
"from langchain_community.document_loaders import TextLoader\n",
"\n",
"root_dir = \"../../../../../../libs\"\n",
"\n",
@ -706,7 +706,7 @@
{
"data": {
"text/plain": [
"<langchain.vectorstores.deeplake.DeepLake at 0x7fe1b67d7a30>"
"<langchain_community.vectorstores.deeplake.DeepLake at 0x7fe1b67d7a30>"
]
},
"execution_count": 15,
@ -715,7 +715,7 @@
}
],
"source": [
"from langchain.vectorstores import DeepLake\n",
"from langchain_community.vectorstores import DeepLake\n",
"\n",
"username = \"<USERNAME_OR_ORG>\"\n",
"\n",
@ -740,7 +740,7 @@
"metadata": {},
"outputs": [],
"source": [
"# from langchain.vectorstores import DeepLake\n",
"# from langchain_community.vectorstores import DeepLake\n",
"\n",
"# db = DeepLake.from_documents(\n",
"# texts, embeddings, dataset_path=f\"hub://{<org_id>}/langchain-code\", runtime={\"tensor_db\": True}\n",

@ -115,8 +115,8 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{

@ -139,8 +139,8 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{

@ -104,8 +104,8 @@
"outputs": [],
"source": [
"from langchain.schema import Document\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings import OpenAIEmbeddings"
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS"
]
},
{

@ -56,9 +56,9 @@
" CharacterTextSplitter,\n",
" RecursiveCharacterTextSplitter,\n",
")\n",
"from langchain.vectorstores import DeepLake\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.vectorstores import DeepLake\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",

@ -547,8 +547,8 @@
"\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores.chroma import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",

@ -49,9 +49,9 @@
"\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.retrievers import TimeWeightedVectorStoreRetriever\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from termcolor import colored"
]
},

@ -172,7 +172,7 @@
"outputs": [],
"source": [
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"with open(\"../../state_of_the_union.txt\") as f:\n",
" state_of_the_union = f.read()\n",

@ -187,7 +187,7 @@
"\n",
"import chromadb\n",
"import numpy as np\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_experimental.open_clip import OpenCLIPEmbeddings\n",
"from PIL import Image as _PILImage\n",
"\n",

@ -20,10 +20,10 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain.document_loaders import TextLoader\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings"
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Chroma"
]
},
{

@ -59,11 +59,13 @@
"from baidubce.auth.bce_credentials import BceCredentials\n",
"from baidubce.bce_client_configuration import BceClientConfiguration\n",
"from langchain.chains.retrieval_qa import RetrievalQA\n",
"from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores import BESVectorStore\n",
"from langchain_community.document_loaders.baiducloud_bos_directory import (\n",
" BaiduBOSDirectoryLoader,\n",
")\n",
"from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n",
"from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint"
"from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n",
"from langchain_community.vectorstores import BESVectorStore"
]
},
{

@ -30,8 +30,8 @@
"outputs": [],
"source": [
"import pinecone\n",
"from langchain.vectorstores import Pinecone\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import Pinecone\n",
"\n",
"pinecone.init(api_key=\"...\", environment=\"...\")"
]

@ -53,10 +53,10 @@
"from langchain.prompts.base import StringPromptTemplate\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import BaseLLM, OpenAI\n",
"from langchain_community.vectorstores import Chroma\n",
"from pydantic import BaseModel, Field"
]
},

@ -1083,8 +1083,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.vectorstores import ElasticsearchStore\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import ElasticsearchStore\n",
"\n",
"embeddings = OpenAIEmbeddings()"
]

@ -996,7 +996,7 @@ from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
from langchain.vectorstores import Chroma
from langchain_community.vectorstores import Chroma
example_prompt = PromptTemplate(
input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],

@ -37,8 +37,8 @@
"import getpass\n",
"import os\n",
"\n",
"from langchain.vectorstores import DeepLake\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import DeepLake\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n",
"activeloop_token = getpass.getpass(\"Activeloop Token:\")\n",
@ -110,7 +110,7 @@
"source": [
"import os\n",
"\n",
"from langchain.document_loaders import TextLoader\n",
"from langchain_community.document_loaders import TextLoader\n",
"\n",
"root_dir = \"./the-algorithm\"\n",
"docs = []\n",

@ -56,5 +56,5 @@ from langchain_community.chat_models import integration_class_REPLACE_ME
See a [usage example](/docs/integrations/document_loaders/INCLUDE_REAL_NAME).
```python
from langchain.document_loaders import integration_class_REPLACE_ME
from langchain_community.document_loaders import integration_class_REPLACE_ME
```

@ -39,9 +39,9 @@
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
]

@ -325,9 +325,9 @@
"# pip install langchain docarray tiktoken\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import DocArrayInMemorySearch\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import DocArrayInMemorySearch\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"\n",

@ -45,9 +45,9 @@
],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
@ -129,9 +129,9 @@
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",

@ -131,8 +131,8 @@
"source": [
"from typing import Optional\n",
"\n",
"from langchain.memory.chat_message_histories import RedisChatMessageHistory\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_community.chat_message_histories import RedisChatMessageHistory\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_core.chat_history import BaseChatMessageHistory\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory"

@ -98,9 +98,9 @@
],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",

@ -659,8 +659,8 @@
}
],
"source": [
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",

@ -638,8 +638,8 @@
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"\n",
"# 2. Load the data: In our case data's already loaded\n",
"# 3. Anonymize the data before indexing\n",

@ -215,11 +215,11 @@
"source": [
"import requests\n",
"from langchain.chains import RetrievalQA\n",
"from langchain.document_loaders import TextLoader\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.embeddings.openai import OpenAIEmbeddings\n",
"from langchain_community.llms import OpenAI\n",
"from langchain_community.vectorstores import Chroma\n",
"\n",
"text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n",
"\n",

@ -316,8 +316,8 @@
"# os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n",
"\n",
"from langchain.chains.summarize import load_summarize_chain\n",
"from langchain.document_loaders import WebBaseLoader\n",
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"\n",
"# Create callback handler. This logs latency, errors, token usage, prompts, as well as prompt responses to Infino.\n",
"handler = InfinoCallbackHandler(\n",

@ -79,8 +79,8 @@
"import re\n",
"from typing import Iterator, List\n",
"\n",
"from langchain.chat_loaders import base as chat_loaders\n",
"from langchain.schema import BaseMessage, HumanMessage\n",
"from langchain_community.chat_loaders import base as chat_loaders\n",
"\n",
"logger = logging.getLogger()\n",
"\n",
@ -216,8 +216,8 @@
"source": [
"from typing import List\n",
"\n",
"from langchain.chat_loaders.base import ChatSession\n",
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.base import ChatSession\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")\n",

@ -106,7 +106,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.facebook_messenger import (\n",
"from langchain_community.chat_loaders.facebook_messenger import (\n",
" FolderFacebookMessengerChatLoader,\n",
" SingleFileFacebookMessengerChatLoader,\n",
")"
@ -201,7 +201,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")"

@ -73,7 +73,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.gmail import GMailLoader"
"from langchain_community.chat_loaders.gmail import GMailLoader"
]
},
{
@ -125,7 +125,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
")"
]

@ -80,7 +80,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.imessage import IMessageChatLoader"
"from langchain_community.chat_loaders.imessage import IMessageChatLoader"
]
},
{
@ -116,8 +116,8 @@
"source": [
"from typing import List\n",
"\n",
"from langchain.chat_loaders.base import ChatSession\n",
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.base import ChatSession\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")\n",

@ -128,7 +128,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.langsmith import LangSmithDatasetChatLoader\n",
"from langchain_community.chat_loaders.langsmith import LangSmithDatasetChatLoader\n",
"\n",
"loader = LangSmithDatasetChatLoader(dataset_name=dataset_name)\n",
"\n",

@ -263,7 +263,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.langsmith import LangSmithRunChatLoader\n",
"from langchain_community.chat_loaders.langsmith import LangSmithRunChatLoader\n",
"\n",
"loader = LangSmithRunChatLoader(runs=llm_runs)\n",
"\n",

@ -53,7 +53,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.slack import SlackChatLoader"
"from langchain_community.chat_loaders.slack import SlackChatLoader"
]
},
{
@ -87,8 +87,8 @@
"source": [
"from typing import List\n",
"\n",
"from langchain.chat_loaders.base import ChatSession\n",
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.base import ChatSession\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")\n",

@ -102,7 +102,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.telegram import TelegramChatLoader"
"from langchain_community.chat_loaders.telegram import TelegramChatLoader"
]
},
{
@ -136,8 +136,8 @@
"source": [
"from typing import List\n",
"\n",
"from langchain.chat_loaders.base import ChatSession\n",
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.base import ChatSession\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")\n",

@ -78,8 +78,8 @@
"import re\n",
"from typing import Iterator, List\n",
"\n",
"from langchain.chat_loaders import base as chat_loaders\n",
"from langchain.schema import BaseMessage, HumanMessage\n",
"from langchain_community.chat_loaders import base as chat_loaders\n",
"\n",
"logger = logging.getLogger()\n",
"\n",
@ -209,8 +209,8 @@
"source": [
"from typing import List\n",
"\n",
"from langchain.chat_loaders.base import ChatSession\n",
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.base import ChatSession\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")\n",

@ -74,7 +74,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_loaders.whatsapp import WhatsAppChatLoader"
"from langchain_community.chat_loaders.whatsapp import WhatsAppChatLoader"
]
},
{
@ -126,8 +126,8 @@
"source": [
"from typing import List\n",
"\n",
"from langchain.chat_loaders.base import ChatSession\n",
"from langchain.chat_loaders.utils import (\n",
"from langchain_community.chat_loaders.base import ChatSession\n",
"from langchain_community.chat_loaders.utils import (\n",
" map_ai_messages,\n",
" merge_chat_runs,\n",
")\n",

@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AcreomLoader"
"from langchain_community.document_loaders import AcreomLoader"
]
},
{

@ -98,7 +98,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteCDKLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteCDKLoader\n",
"from source_github.source import SourceGithub # plug in your own source here\n",
"\n",
"config = {\n",

@ -85,7 +85,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteGongLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteGongLoader\n",
"\n",
"config = {\n",
" # your gong configuration\n",

@ -87,7 +87,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteHubspotLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteHubspotLoader\n",
"\n",
"config = {\n",
" # your hubspot configuration\n",

@ -54,7 +54,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AirbyteJSONLoader"
"from langchain_community.document_loaders import AirbyteJSONLoader"
]
},
{

@ -92,7 +92,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteSalesforceLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteSalesforceLoader\n",
"\n",
"config = {\n",
" # your salesforce configuration\n",

@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteShopifyLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteShopifyLoader\n",
"\n",
"config = {\n",
" # your shopify configuration\n",

@ -85,7 +85,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteStripeLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteStripeLoader\n",
"\n",
"config = {\n",
" # your stripe configuration\n",

@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteTypeformLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteTypeformLoader\n",
"\n",
"config = {\n",
" # your typeform configuration\n",

@ -89,7 +89,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.airbyte import AirbyteZendeskSupportLoader\n",
"from langchain_community.document_loaders.airbyte import AirbyteZendeskSupportLoader\n",
"\n",
"config = {\n",
" # your zendesk-support configuration\n",

@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AirtableLoader"
"from langchain_community.document_loaders import AirtableLoader"
]
},
{

@ -58,7 +58,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import MaxComputeLoader"
"from langchain_community.document_loaders import MaxComputeLoader"
]
},
{

@ -60,7 +60,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import AmazonTextractPDFLoader\n",
"from langchain_community.document_loaders import AmazonTextractPDFLoader\n",
"\n",
"loader = AmazonTextractPDFLoader(\"example_data/alejandro_rosalez_sample-small.jpeg\")\n",
"documents = loader.load()"
@ -116,7 +116,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import AmazonTextractPDFLoader\n",
"from langchain_community.document_loaders import AmazonTextractPDFLoader\n",
"\n",
"loader = AmazonTextractPDFLoader(\n",
" \"https://amazon-textract-public-content.s3.us-east-2.amazonaws.com/langchain/alejandro_rosalez_sample_1.jpg\"\n",

@ -40,8 +40,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import ApifyDatasetLoader\n",
"from langchain.document_loaders.base import Document"
"from langchain_community.document_loaders import ApifyDatasetLoader\n",
"from langchain_community.document_loaders.base import Document"
]
},
{
@ -101,8 +101,8 @@
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain.document_loaders import ApifyDatasetLoader\n",
"from langchain.indexes import VectorstoreIndexCreator"
"from langchain.indexes import VectorstoreIndexCreator\n",
"from langchain_community.document_loaders import ApifyDatasetLoader"
]
},
{

@ -7,7 +7,7 @@
"source": [
"# ArcGIS\n",
"\n",
"This notebook demonstrates the use of the `langchain.document_loaders.ArcGISLoader` class.\n",
"This notebook demonstrates the use of the `langchain_community.document_loaders.ArcGISLoader` class.\n",
"\n",
"You will need to install the ArcGIS API for Python `arcgis` and, optionally, `bs4.BeautifulSoup`.\n",
"\n",
@ -21,7 +21,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import ArcGISLoader\n",
"from langchain_community.document_loaders import ArcGISLoader\n",
"\n",
"URL = \"https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7\"\n",
"loader = ArcGISLoader(URL)\n",

@ -88,7 +88,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import ArxivLoader"
"from langchain_community.document_loaders import ArxivLoader"
]
},
{

@ -53,7 +53,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AssemblyAIAudioTranscriptLoader\n",
"from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader\n",
"\n",
"audio_file = \"https://storage.googleapis.com/aai-docs-samples/nbc.mp3\"\n",
"# or a local file path: audio_file = \"./nbc.mp3\"\n",
@ -148,7 +148,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.assemblyai import TranscriptFormat\n",
"from langchain_community.document_loaders.assemblyai import TranscriptFormat\n",
"\n",
"loader = AssemblyAIAudioTranscriptLoader(\n",
" file_path=\"./your_file.mp3\",\n",

@ -45,7 +45,7 @@
}
],
"source": [
"from langchain.document_loaders import AsyncChromiumLoader\n",
"from langchain_community.document_loaders import AsyncChromiumLoader\n",
"\n",
"urls = [\"https://www.wsj.com\"]\n",
"loader = AsyncChromiumLoader(urls)\n",
@ -71,7 +71,7 @@
}
],
"source": [
"from langchain.document_transformers import Html2TextTransformer\n",
"from langchain_community.document_transformers import Html2TextTransformer\n",
"\n",
"html2text = Html2TextTransformer()\n",
"docs_transformed = html2text.transform_documents(docs)\n",

@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AsyncHtmlLoader"
"from langchain_community.document_loaders import AsyncHtmlLoader"
]
},
{

@ -35,7 +35,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import S3DirectoryLoader"
"from langchain_community.document_loaders import S3DirectoryLoader"
]
},
{

@ -21,7 +21,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import S3FileLoader"
"from langchain_community.document_loaders import S3FileLoader"
]
},
{

@ -19,7 +19,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AZLyricsLoader"
"from langchain_community.document_loaders import AZLyricsLoader"
]
},
{

@ -39,7 +39,7 @@
"source": [
"from azure.ai.resources.client import AIClient\n",
"from azure.identity import DefaultAzureCredential\n",
"from langchain.document_loaders import AzureAIDataLoader"
"from langchain_community.document_loaders import AzureAIDataLoader"
]
},
{

@ -39,7 +39,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import AzureBlobStorageContainerLoader"
"from langchain_community.document_loaders import AzureBlobStorageContainerLoader"
]
},
{

@ -31,7 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import AzureBlobStorageFileLoader"
"from langchain_community.document_loaders import AzureBlobStorageFileLoader"
]
},
{

@ -65,7 +65,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import BibtexLoader"
"from langchain_community.document_loaders import BibtexLoader"
]
},
{

@ -35,7 +35,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import BiliBiliLoader"
"from langchain_community.document_loaders import BiliBiliLoader"
]
},
{

@ -23,7 +23,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import BlackboardLoader\n",
"from langchain_community.document_loaders import BlackboardLoader\n",
"\n",
"loader = BlackboardLoader(\n",
" blackboard_course_url=\"https://blackboard.example.com/webapps/blackboard/execute/announcement?method=search&context=course_entry&course_id=_123456_1\",\n",

@ -80,7 +80,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders.blockchain import (\n",
"from langchain_community.document_loaders.blockchain import (\n",
" BlockchainDocumentLoader,\n",
" BlockchainType,\n",
")\n",

@ -48,7 +48,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import BraveSearchLoader"
"from langchain_community.document_loaders import BraveSearchLoader"
]
},
{

@ -18,7 +18,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import BrowserlessLoader"
"from langchain_community.document_loaders import BrowserlessLoader"
]
},
{

@ -22,7 +22,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders.chatgpt import ChatGPTLoader"
"from langchain_community.document_loaders.chatgpt import ChatGPTLoader"
]
},
{

@ -19,7 +19,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import CollegeConfidentialLoader"
"from langchain_community.document_loaders import CollegeConfidentialLoader"
]
},
{

@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import ConcurrentLoader"
"from langchain_community.document_loaders import ConcurrentLoader"
]
},
{

@ -67,7 +67,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import ConfluenceLoader\n",
"from langchain_community.document_loaders import ConfluenceLoader\n",
"\n",
"loader = ConfluenceLoader(\n",
" url=\"https://yoursite.atlassian.com/wiki\", username=\"me\", api_key=\"12345\"\n",
@ -93,7 +93,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import ConfluenceLoader\n",
"from langchain_community.document_loaders import ConfluenceLoader\n",
"\n",
"loader = ConfluenceLoader(url=\"https://yoursite.atlassian.com/wiki\", token=\"12345\")\n",
"documents = loader.load(\n",

@ -24,7 +24,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import CoNLLULoader"
"from langchain_community.document_loaders import CoNLLULoader"
]
},
{

@ -45,7 +45,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.couchbase import CouchbaseLoader\n",
"from langchain_community.document_loaders.couchbase import CouchbaseLoader\n",
"\n",
"connection_string = \"couchbase://localhost\" # valid Couchbase connection string\n",
"db_username = (\n",

@ -22,7 +22,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import CSVLoader"
"from langchain_community.document_loaders.csv_loader import CSVLoader"
]
},
{
@ -167,7 +167,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import UnstructuredCSVLoader"
"from langchain_community.document_loaders.csv_loader import UnstructuredCSVLoader"
]
},
{

@ -79,7 +79,7 @@
"outputs": [],
"source": [
"import jwt\n",
"from langchain.document_loaders import CubeSemanticLoader\n",
"from langchain_community.document_loaders import CubeSemanticLoader\n",
"\n",
"api_url = \"https://api-example.gcp-us-central1.cubecloudapp.dev/cubejs-api/v1/meta\"\n",
"cubejs_api_secret = \"api-secret-here\"\n",

@ -18,7 +18,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import DatadogLogsLoader"
"from langchain_community.document_loaders import DatadogLogsLoader"
]
},
{

@ -45,7 +45,7 @@
"source": [
"import os\n",
"\n",
"from langchain.document_loaders import DiffbotLoader\n",
"from langchain_community.document_loaders import DiffbotLoader\n",
"\n",
"loader = DiffbotLoader(urls=urls, api_token=os.environ.get(\"DIFFBOT_API_TOKEN\"))"
]

@ -52,7 +52,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.discord import DiscordChatLoader"
"from langchain_community.document_loaders.discord import DiscordChatLoader"
]
},
{

@ -56,7 +56,7 @@
"source": [
"import os\n",
"\n",
"from langchain.document_loaders import DocugamiLoader"
"from langchain_community.document_loaders import DocugamiLoader"
]
},
{
@ -211,9 +211,9 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQA\n",
"from langchain.vectorstores.chroma import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.llms.openai import OpenAI\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"\n",
"embedding = OpenAIEmbeddings()\n",
"vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)\n",
@ -366,7 +366,7 @@
"source": [
"from langchain.chains.query_constructor.schema import AttributeInfo\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"from langchain.vectorstores.chroma import Chroma\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"\n",
"EXCLUDE_KEYS = [\"id\", \"xpath\", \"structure\"]\n",
"metadata_field_info = [\n",
@ -471,7 +471,7 @@
"source": [
"from typing import Dict, List\n",
"\n",
"from langchain.document_loaders import DocugamiLoader\n",
"from langchain_community.document_loaders import DocugamiLoader\n",
"from langchain_core.documents import Document\n",
"\n",
"loader = DocugamiLoader(docset_id=\"zo954yqy53wp\")\n",
@ -541,8 +541,8 @@
"source": [
"from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores.chroma import Chroma\n",
"from langchain_community.embeddings import OpenAIEmbeddings\n",
"from langchain_community.vectorstores.chroma import Chroma\n",
"\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(collection_name=\"big2small\", embedding_function=OpenAIEmbeddings())\n",

File diff suppressed because one or more lines are too long

@ -56,7 +56,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import DropboxLoader"
"from langchain_community.document_loaders import DropboxLoader"
]
},
{

@ -30,7 +30,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import DuckDBLoader"
"from langchain_community.document_loaders import DuckDBLoader"
]
},
{

@ -39,7 +39,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import UnstructuredEmailLoader"
"from langchain_community.document_loaders import UnstructuredEmailLoader"
]
},
{
@ -220,7 +220,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import OutlookMessageLoader"
"from langchain_community.document_loaders import OutlookMessageLoader"
]
},
{

@ -31,7 +31,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import UnstructuredEPubLoader"
"from langchain_community.document_loaders import UnstructuredEPubLoader"
]
},
{

@ -87,7 +87,7 @@
"source": [
"import os\n",
"\n",
"from langchain.document_loaders import EtherscanLoader"
"from langchain_community.document_loaders import EtherscanLoader"
]
},
{

@ -51,7 +51,7 @@
}
],
"source": [
"from langchain.document_loaders import EverNoteLoader\n",
"from langchain_community.document_loaders import EverNoteLoader\n",
"\n",
"# By default all notes are combined into a single Document\n",
"loader = EverNoteLoader(\"example_data/testing.enex\")\n",

@ -28,7 +28,7 @@
},
"outputs": [],
"source": [
"from langchain.document_loaders import FacebookChatLoader"
"from langchain_community.document_loaders import FacebookChatLoader"
]
},
{

@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.fauna import FaunaLoader\n",
"from langchain_community.document_loaders.fauna import FaunaLoader\n",
"\n",
"secret = \"<enter-valid-fauna-secret>\"\n",
"query = \"Item.all()\" # Fauna query. Assumes that the collection is called \"Item\"\n",

@ -23,14 +23,14 @@
"source": [
"import os\n",
"\n",
"from langchain.document_loaders.figma import FigmaFileLoader\n",
"from langchain.indexes import VectorstoreIndexCreator\n",
"from langchain.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain_community.chat_models import ChatOpenAI"
"from langchain_community.chat_models import ChatOpenAI\n",
"from langchain_community.document_loaders.figma import FigmaFileLoader"
]
},
{

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save