From b1fa726377e1861f8860e510472dca3e1c0b1c1f Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Sat, 6 Jan 2024 15:54:48 -0800 Subject: [PATCH] docs: langchain-openai (#15513) Updates docs and cookbooks to import ChatOpenAI, OpenAI, and OpenAI Embeddings from `langchain_openai` There are likely more --------- Co-authored-by: Harrison Chase --- cookbook/LLaMA2_sql_chat.ipynb | 4 +- cookbook/Multi_modal_RAG.ipynb | 6 +-- cookbook/Semi_Structured_RAG.ipynb | 8 ++-- .../Semi_structured_and_multi_modal_RAG.ipynb | 8 ++-- ...mi_structured_multi_modal_RAG_LLaMA2.ipynb | 4 +- cookbook/advanced_rag_eval.ipynb | 6 +-- cookbook/agent_vectorstore.ipynb | 5 +- cookbook/analyze_document.ipynb | 2 +- cookbook/autogpt/autogpt.ipynb | 8 ++-- cookbook/autogpt/marathon_times.ipynb | 4 +- cookbook/baby_agi.ipynb | 5 +- cookbook/baby_agi_with_agent.ipynb | 7 ++- cookbook/camel_role_playing.ipynb | 2 +- .../causal_program_aided_language_model.ipynb | 2 +- cookbook/code-analysis-deeplake.ipynb | 4 +- .../custom_agent_with_plugin_retrieval.ipynb | 8 ++-- ...ith_plugin_retrieval_using_plugnplai.ipynb | 8 ++-- .../custom_agent_with_tool_retrieval.ipynb | 8 ++-- cookbook/databricks_sql_db.ipynb | 2 +- .../deeplake_semantic_search_over_chat.ipynb | 3 +- cookbook/docugami_xml_kg_rag.ipynb | 6 +-- cookbook/elasticsearch_db_qa.ipynb | 2 +- cookbook/extraction_openai_tools.ipynb | 6 +-- ...oking_retrieval_augmented_generation.ipynb | 5 +- ...eractive_simulacra_of_human_behavior.ipynb | 3 +- cookbook/hugginggpt.ipynb | 2 +- cookbook/human_approval.ipynb | 2 +- .../hypothetical_document_embeddings.ipynb | 3 +- cookbook/learned_prompt_optimization.ipynb | 2 +- cookbook/llm_bash.ipynb | 2 +- cookbook/llm_checker.ipynb | 2 +- cookbook/llm_math.ipynb | 2 +- cookbook/llm_summarization_checker.ipynb | 6 +-- cookbook/llm_symbolic_math.ipynb | 2 +- cookbook/meta_prompt.ipynb | 2 +- cookbook/multi_modal_QA.ipynb | 4 +- cookbook/multi_modal_RAG_chroma.ipynb | 2 +- cookbook/multi_modal_output_agent.ipynb | 2 +- cookbook/multi_player_dnd.ipynb | 2 +- cookbook/multiagent_authoritarian.ipynb | 2 +- cookbook/multiagent_bidding.ipynb | 2 +- cookbook/myscale_vector_sql.ipynb | 6 +-- cookbook/openai_functions_retrieval_qa.ipynb | 6 +-- cookbook/openai_v1_cookbook.ipynb | 8 ++-- cookbook/petting_zoo.ipynb | 2 +- cookbook/plan_and_execute_agent.ipynb | 5 +- cookbook/press_releases.ipynb | 2 +- cookbook/program_aided_language_model.ipynb | 4 +- cookbook/qa_citations.ipynb | 2 +- cookbook/rag_fusion.ipynb | 6 +-- cookbook/retrieval_in_sql.ipynb | 8 ++-- cookbook/rewrite.ipynb | 6 +-- cookbook/sales_agent_with_context.ipynb | 5 +- ...lecting_llms_based_on_context_length.ipynb | 4 +- cookbook/self_query_hotel_search.ipynb | 4 +- cookbook/sharedmemory_for_tools.ipynb | 4 +- cookbook/smart_llm.ipynb | 4 +- cookbook/sql_db_qa.mdx | 4 +- cookbook/stepback-qa.ipynb | 6 +-- cookbook/tree_of_thought.ipynb | 2 +- ...tter-the-algorithm-analysis-deeplake.ipynb | 4 +- cookbook/two_agent_debate_tools.ipynb | 2 +- cookbook/two_player_dnd.ipynb | 2 +- cookbook/wikibase_agent.ipynb | 2 +- .../cookbook/code_writing.ipynb | 4 +- .../cookbook/embedding_router.ipynb | 3 +- .../expression_language/cookbook/memory.ipynb | 4 +- .../cookbook/moderation.ipynb | 4 +- .../cookbook/multiple_chains.ipynb | 4 +- .../cookbook/prompt_llm_parser.ipynb | 4 +- .../cookbook/prompt_size.ipynb | 6 +-- .../cookbook/retrieval.ipynb | 7 ++- .../expression_language/cookbook/sql_db.ipynb | 4 +- .../expression_language/cookbook/tools.ipynb | 6 +-- .../expression_language/get_started.ipynb | 46 +++++++++---------- .../expression_language/how_to/binding.ipynb | 6 +-- .../how_to/configure.ipynb | 7 +-- .../how_to/decorator.ipynb | 4 +- .../how_to/fallbacks.ipynb | 9 ++-- .../how_to/functions.ipynb | 4 +- .../how_to/generators.ipynb | 2 +- .../expression_language/how_to/inspect.ipynb | 5 +- .../docs/expression_language/how_to/map.ipynb | 14 +++--- .../how_to/message_history.ipynb | 2 +- .../how_to/passthrough.ipynb | 5 +- docs/docs/expression_language/interface.ipynb | 6 +-- docs/docs/expression_language/why.ipynb | 11 +++-- docs/docs/get_started/quickstart.mdx | 24 +++++----- docs/docs/guides/debugging.md | 2 +- .../evaluation/examples/comparisons.ipynb | 2 +- .../string/scoring_eval_chain.ipynb | 2 +- .../guides/evaluation/trajectory/custom.ipynb | 2 +- .../trajectory/trajectory_eval.ipynb | 2 +- docs/docs/guides/fallbacks.ipynb | 7 +-- docs/docs/guides/model_laboratory.ipynb | 3 +- .../presidio_data_anonymization/index.ipynb | 2 +- .../qa_privacy_protection.ipynb | 6 +-- .../reversible.ipynb | 2 +- .../guides/safety/constitutional_chain.mdx | 2 +- .../hugging_face_prompt_injection.ipynb | 2 +- .../guides/safety/logical_fallacy_chain.mdx | 2 +- docs/docs/guides/safety/moderation.mdx | 2 +- .../docs/integrations/callbacks/argilla.ipynb | 6 +-- .../integrations/callbacks/confident.ipynb | 5 +- .../docs/integrations/callbacks/context.ipynb | 4 +- docs/docs/integrations/callbacks/infino.ipynb | 4 +- .../integrations/callbacks/labelstudio.ipynb | 4 +- docs/docs/integrations/callbacks/llmonitor.md | 8 ++-- .../integrations/callbacks/promptlayer.ipynb | 4 +- .../callbacks/sagemaker_tracking.ipynb | 2 +- docs/docs/integrations/callbacks/streamlit.md | 2 +- .../integrations/callbacks/trubrics.ipynb | 4 +- .../integrations/chat/azure_chat_openai.ipynb | 2 +- docs/docs/integrations/chat/fireworks.ipynb | 2 +- .../chat/google_vertex_ai_palm.ipynb | 4 +- docs/docs/integrations/chat/openai.ipynb | 2 +- docs/docs/integrations/chat/vllm.ipynb | 2 +- .../integrations/chat_loaders/discord.ipynb | 2 +- .../integrations/chat_loaders/facebook.ipynb | 4 +- .../integrations/chat_loaders/imessage.ipynb | 4 +- .../chat_loaders/langsmith_dataset.ipynb | 2 +- .../chat_loaders/langsmith_llm_runs.ipynb | 6 +-- .../integrations/chat_loaders/slack.ipynb | 2 +- .../integrations/chat_loaders/telegram.ipynb | 2 +- .../integrations/chat_loaders/wechat.ipynb | 2 +- .../integrations/chat_loaders/whatsapp.ipynb | 2 +- .../document_loaders/amazon_textract.ipynb | 2 +- .../document_loaders/docugami.ipynb | 5 +- .../integrations/document_loaders/figma.ipynb | 4 +- .../document_loaders/github.ipynb | 2 +- .../document_loaders/psychic.ipynb | 5 +- .../document_loaders/youtube_audio.ipynb | 5 +- .../openai_metadata_tagger.ipynb | 6 +-- .../docs/integrations/llms/azure_openai.ipynb | 2 +- docs/docs/integrations/llms/llm_caching.ipynb | 8 ++-- .../integrations/llms/opaqueprompts.ipynb | 3 +- docs/docs/integrations/llms/openai.ipynb | 2 +- .../integrations/memory/aws_dynamodb.ipynb | 4 +- .../memory/motorhead_memory.ipynb | 2 +- docs/docs/integrations/memory/remembrall.md | 4 +- .../memory/sql_chat_message_history.ipynb | 4 +- docs/docs/integrations/memory/sqlite.ipynb | 2 +- .../streamlit_chat_message_history.ipynb | 2 +- .../memory/xata_chat_message_history.ipynb | 4 +- .../docs/integrations/memory/zep_memory.ipynb | 2 +- .../docs/integrations/platforms/anthropic.mdx | 2 +- .../docs/integrations/platforms/microsoft.mdx | 6 +-- docs/docs/integrations/platforms/openai.mdx | 10 ++-- .../integrations/providers/aim_tracking.ipynb | 2 +- .../providers/arthur_tracking.ipynb | 2 +- .../providers/clearml_tracking.ipynb | 2 +- docs/docs/integrations/providers/cnosdb.mdx | 2 +- .../providers/comet_tracking.ipynb | 8 ++-- docs/docs/integrations/providers/flyte.mdx | 2 +- .../integrations/providers/google_serper.mdx | 2 +- docs/docs/integrations/providers/helicone.mdx | 4 +- .../providers/javelin_ai_gateway.mdx | 2 +- .../providers/langchain_decorators.mdx | 2 +- docs/docs/integrations/providers/log10.mdx | 8 ++-- .../providers/mlflow_tracking.ipynb | 2 +- .../integrations/providers/motherduck.mdx | 4 +- .../integrations/providers/portkey/index.md | 4 +- .../portkey/logging_tracing_portkey.ipynb | 4 +- .../integrations/providers/ray_serve.ipynb | 2 +- docs/docs/integrations/providers/rebuff.ipynb | 2 +- .../docs/integrations/providers/searchapi.mdx | 2 +- .../integrations/providers/shaleprotocol.md | 2 +- .../providers/vectara/vectara_chat.ipynb | 4 +- .../providers/vectara/vectara_summary.ipynb | 4 +- .../providers/wandb_tracing.ipynb | 2 +- .../providers/wandb_tracking.ipynb | 2 +- .../providers/whylabs_profiling.ipynb | 2 +- .../integrations/retrievers/activeloop.ipynb | 9 ++-- docs/docs/integrations/retrievers/arxiv.ipynb | 2 +- .../retrievers/cohere-reranker.ipynb | 4 +- .../retrievers/docarray_retriever.ipynb | 2 +- .../retrievers/fleet_context.ipynb | 14 +++--- .../docs/integrations/retrievers/jaguar.ipynb | 4 +- docs/docs/integrations/retrievers/kay.ipynb | 2 +- docs/docs/integrations/retrievers/knn.ipynb | 2 +- .../retrievers/merger_retriever.ipynb | 3 +- .../integrations/retrievers/outline.ipynb | 8 ++-- .../retrievers/pinecone_hybrid_search.ipynb | 2 +- .../integrations/retrievers/ragatouille.ipynb | 2 +- .../integrations/retrievers/re_phrase.ipynb | 5 +- .../integrations/retrievers/sec_filings.ipynb | 2 +- .../activeloop_deeplake_self_query.ipynb | 4 +- .../self_query/chroma_self_query.ipynb | 4 +- .../self_query/elasticsearch_self_query.ipynb | 4 +- .../self_query/milvus_self_query.ipynb | 4 +- .../retrievers/self_query/mongodb_atlas.ipynb | 4 +- .../self_query/myscale_self_query.ipynb | 4 +- .../self_query/opensearch_self_query.ipynb | 4 +- .../retrievers/self_query/pinecone.ipynb | 4 +- .../self_query/qdrant_self_query.ipynb | 4 +- .../self_query/redis_self_query.ipynb | 4 +- .../self_query/supabase_self_query.ipynb | 4 +- .../timescalevector_self_query.ipynb | 4 +- .../self_query/vectara_self_query.ipynb | 6 +-- .../self_query/weaviate_self_query.ipynb | 4 +- .../retrievers/singlestoredb.ipynb | 2 +- docs/docs/integrations/retrievers/svm.ipynb | 2 +- .../docs/integrations/retrievers/tavily.ipynb | 2 +- .../integrations/retrievers/wikipedia.ipynb | 2 +- .../retrievers/you-retriever.ipynb | 2 +- .../text_embedding/azureopenai.ipynb | 4 +- .../integrations/text_embedding/openai.ipynb | 4 +- .../integrations/toolkits/ainetwork.ipynb | 2 +- .../toolkits/airbyte_structured_qa.ipynb | 2 +- docs/docs/integrations/toolkits/amadeus.ipynb | 2 +- .../toolkits/azure_cognitive_services.ipynb | 2 +- docs/docs/integrations/toolkits/clickup.ipynb | 4 +- docs/docs/integrations/toolkits/csv.ipynb | 5 +- .../document_comparison_toolkit.ipynb | 3 +- docs/docs/integrations/toolkits/github.ipynb | 6 +-- docs/docs/integrations/toolkits/gitlab.ipynb | 4 +- docs/docs/integrations/toolkits/gmail.ipynb | 2 +- docs/docs/integrations/toolkits/jira.ipynb | 4 +- docs/docs/integrations/toolkits/json.ipynb | 4 +- docs/docs/integrations/toolkits/multion.ipynb | 2 +- docs/docs/integrations/toolkits/nasa.ipynb | 2 +- .../integrations/toolkits/office365.ipynb | 2 +- docs/docs/integrations/toolkits/openapi.ipynb | 10 ++-- .../integrations/toolkits/openapi_nla.ipynb | 2 +- docs/docs/integrations/toolkits/pandas.ipynb | 6 +-- docs/docs/integrations/toolkits/powerbi.ipynb | 4 +- docs/docs/integrations/toolkits/python.ipynb | 2 +- docs/docs/integrations/toolkits/slack.ipynb | 2 +- docs/docs/integrations/toolkits/spark.ipynb | 4 +- .../integrations/toolkits/spark_sql.ipynb | 4 +- .../integrations/toolkits/sql_database.ipynb | 2 +- docs/docs/integrations/toolkits/steam.ipynb | 4 +- docs/docs/integrations/toolkits/xorbits.ipynb | 6 +-- docs/docs/integrations/tools/arxiv.ipynb | 2 +- docs/docs/integrations/tools/awslambda.ipynb | 2 +- docs/docs/integrations/tools/bash.ipynb | 2 +- docs/docs/integrations/tools/bearly.ipynb | 2 +- .../integrations/tools/chatgpt_plugins.ipynb | 2 +- .../tools/dalle_image_generator.ipynb | 4 +- .../tools/e2b_data_analysis.ipynb | 2 +- .../integrations/tools/eleven_labs_tts.ipynb | 2 +- .../integrations/tools/google_drive.ipynb | 2 +- .../integrations/tools/google_finance.ipynb | 2 +- .../docs/integrations/tools/google_jobs.ipynb | 2 +- .../integrations/tools/google_serper.ipynb | 2 +- .../integrations/tools/gradio_tools.ipynb | 2 +- docs/docs/integrations/tools/graphql.ipynb | 2 +- .../docs/integrations/tools/human_tools.ipynb | 3 +- docs/docs/integrations/tools/lemonai.ipynb | 2 +- .../integrations/tools/metaphor_search.ipynb | 4 +- .../integrations/tools/openweathermap.ipynb | 2 +- .../integrations/tools/reddit_search.ipynb | 2 +- .../docs/integrations/tools/sceneXplain.ipynb | 2 +- .../integrations/tools/search_tools.ipynb | 2 +- docs/docs/integrations/tools/searchapi.ipynb | 2 +- .../integrations/tools/semanticscholar.ipynb | 2 +- .../integrations/tools/tavily_search.ipynb | 2 +- .../tools/yahoo_finance_news.ipynb | 2 +- docs/docs/integrations/tools/zapier.ipynb | 8 ++-- .../vectorstores/activeloop_deeplake.ipynb | 8 ++-- .../alibabacloud_opensearch.ipynb | 4 +- .../vectorstores/analyticdb.ipynb | 4 +- .../integrations/vectorstores/astradb.ipynb | 7 ++- .../vectorstores/azure_cosmos_db.ipynb | 2 +- .../vectorstores/azuresearch.ipynb | 4 +- .../integrations/vectorstores/chroma.ipynb | 2 +- .../vectorstores/clickhouse.ipynb | 4 +- .../databricks_vector_search.ipynb | 2 +- .../integrations/vectorstores/dingo.ipynb | 8 ++-- .../vectorstores/docarray_hnsw.ipynb | 4 +- .../vectorstores/docarray_in_memory.ipynb | 4 +- .../vectorstores/elasticsearch.ipynb | 10 ++-- .../integrations/vectorstores/epsilla.ipynb | 4 +- .../integrations/vectorstores/faiss.ipynb | 2 +- .../vectorstores/faiss_async.ipynb | 2 +- .../integrations/vectorstores/hippo.ipynb | 5 +- .../integrations/vectorstores/hologres.ipynb | 4 +- .../integrations/vectorstores/jaguar.ipynb | 8 ++-- .../integrations/vectorstores/lancedb.ipynb | 4 +- .../integrations/vectorstores/marqo.ipynb | 2 +- .../vectorstores/meilisearch.ipynb | 2 +- .../integrations/vectorstores/milvus.ipynb | 4 +- .../vectorstores/momento_vector_index.ipynb | 6 +-- .../vectorstores/mongodb_atlas.ipynb | 6 +-- .../integrations/vectorstores/myscale.ipynb | 4 +- .../vectorstores/neo4jvector.ipynb | 6 +-- .../vectorstores/opensearch.ipynb | 4 +- .../vectorstores/pgembedding.ipynb | 4 +- .../vectorstores/pgvecto_rs.ipynb | 4 +- .../integrations/vectorstores/pgvector.ipynb | 4 +- .../integrations/vectorstores/pinecone.ipynb | 4 +- .../integrations/vectorstores/qdrant.ipynb | 4 +- .../integrations/vectorstores/redis.ipynb | 2 +- .../integrations/vectorstores/rockset.ipynb | 2 +- .../vectorstores/singlestoredb.ipynb | 4 +- .../integrations/vectorstores/sklearn.ipynb | 2 +- .../integrations/vectorstores/starrocks.ipynb | 3 +- .../integrations/vectorstores/supabase.ipynb | 2 +- .../integrations/vectorstores/tigris.ipynb | 4 +- .../vectorstores/timescalevector.ipynb | 10 ++-- .../integrations/vectorstores/typesense.ipynb | 4 +- .../integrations/vectorstores/usearch.ipynb | 4 +- .../integrations/vectorstores/weaviate.ipynb | 12 ++--- .../docs/integrations/vectorstores/xata.ipynb | 4 +- .../vectorstores/yellowbrick.ipynb | 3 +- .../integrations/vectorstores/zilliz.ipynb | 4 +- docs/docs/langsmith/walkthrough.ipynb | 4 +- .../agents/agent_types/json_agent.ipynb | 4 +- .../agent_types/openai_functions_agent.ipynb | 4 +- .../agents/agent_types/openai_tools.ipynb | 4 +- .../modules/agents/agent_types/react.ipynb | 4 +- .../agents/agent_types/structured_chat.ipynb | 4 +- .../modules/agents/how_to/agent_iter.ipynb | 4 +- .../agents/how_to/agent_structured.ipynb | 10 ++-- .../modules/agents/how_to/custom_agent.ipynb | 4 +- .../agents/how_to/handle_parsing_errors.ipynb | 2 +- .../agents/how_to/intermediate_steps.ipynb | 2 +- .../agents/how_to/max_iterations.ipynb | 2 +- .../agents/how_to/max_time_limit.ipynb | 2 +- .../modules/agents/how_to/streaming.ipynb | 2 +- docs/docs/modules/agents/quick_start.ipynb | 4 +- .../tools/tools_as_openai_functions.ipynb | 2 +- .../modules/callbacks/async_callbacks.ipynb | 2 +- .../modules/callbacks/custom_callbacks.ipynb | 2 +- .../callbacks/filecallbackhandler.ipynb | 2 +- docs/docs/modules/callbacks/index.mdx | 2 +- .../callbacks/multiple_callbacks.ipynb | 2 +- .../modules/callbacks/token_counting.ipynb | 2 +- .../data_connection/document_loaders/pdf.mdx | 2 +- .../modules/data_connection/indexing.ipynb | 4 +- .../retrievers/MultiQueryRetriever.ipynb | 4 +- .../retrievers/contextual_compression.ipynb | 6 +-- .../data_connection/retrievers/ensemble.ipynb | 4 +- .../data_connection/retrievers/index.mdx | 4 +- .../retrievers/long_context_reorder.ipynb | 2 +- .../retrievers/multi_vector.ipynb | 12 ++--- .../parent_document_retriever.ipynb | 4 +- .../retrievers/self_query.ipynb | 4 +- .../time_weighted_vectorstore.ipynb | 4 +- .../retrievers/vectorstore.ipynb | 2 +- .../text_embedding/caching_embeddings.ipynb | 2 +- .../data_connection/text_embedding/index.mdx | 4 +- .../data_connection/vectorstores/index.mdx | 6 +-- docs/docs/modules/memory/adding_memory.ipynb | 4 +- .../adding_memory_chain_multiple_inputs.ipynb | 6 +-- .../modules/memory/agent_with_memory.ipynb | 4 +- .../memory/agent_with_memory_in_db.ipynb | 4 +- .../memory/conversational_customization.ipynb | 2 +- docs/docs/modules/memory/custom_memory.ipynb | 2 +- docs/docs/modules/memory/index.mdx | 4 +- .../docs/modules/memory/multiple_memory.ipynb | 2 +- docs/docs/modules/memory/types/buffer.mdx | 2 +- .../modules/memory/types/buffer_window.mdx | 2 +- .../memory/types/entity_summary_memory.mdx | 2 +- docs/docs/modules/memory/types/kg.ipynb | 2 +- docs/docs/modules/memory/types/summary.mdx | 4 +- .../modules/memory/types/summary_buffer.ipynb | 2 +- .../modules/memory/types/token_buffer.ipynb | 2 +- .../types/vectorstore_retriever_memory.mdx | 4 +- .../model_io/chat/chat_model_caching.ipynb | 2 +- .../modules/model_io/chat/quick_start.ipynb | 4 +- .../model_io/chat/token_usage_tracking.ipynb | 4 +- .../modules/model_io/llms/llm_caching.ipynb | 2 +- .../modules/model_io/llms/quick_start.ipynb | 4 +- .../modules/model_io/llms/streaming_llm.ipynb | 2 +- .../model_io/llms/token_usage_tracking.ipynb | 4 +- .../model_io/output_parsers/quick_start.ipynb | 2 +- .../model_io/output_parsers/types/csv.ipynb | 2 +- .../output_parsers/types/datetime.ipynb | 2 +- .../model_io/output_parsers/types/enum.ipynb | 2 +- .../model_io/output_parsers/types/json.ipynb | 4 +- .../types/openai_functions.ipynb | 4 +- .../output_parsers/types/output_fixing.ipynb | 4 +- .../types/pandas_dataframe.ipynb | 2 +- .../output_parsers/types/pydantic.ipynb | 4 +- .../model_io/output_parsers/types/retry.ipynb | 3 +- .../output_parsers/types/structured.ipynb | 2 +- .../model_io/output_parsers/types/yaml.ipynb | 4 +- .../model_io/prompts/composition.ipynb | 4 +- .../prompts/example_selector_types/mmr.ipynb | 2 +- .../example_selector_types/similarity.ipynb | 2 +- .../model_io/prompts/few_shot_examples.ipynb | 2 +- .../prompts/few_shot_examples_chat.ipynb | 4 +- .../model_io/prompts/quick_start.ipynb | 4 +- docs/docs/modules/model_io/quick_start.mdx | 6 +-- docs/docs/use_cases/apis.ipynb | 6 +-- docs/docs/use_cases/chatbots.ipynb | 8 ++-- docs/docs/use_cases/code_understanding.ipynb | 4 +- docs/docs/use_cases/data_generation.ipynb | 10 ++-- docs/docs/use_cases/extraction.ipynb | 6 +-- .../graph/diffbot_graphtransformer.ipynb | 2 +- .../use_cases/graph/graph_arangodb_qa.ipynb | 2 +- .../use_cases/graph/graph_cypher_qa.ipynb | 4 +- .../use_cases/graph/graph_falkordb_qa.ipynb | 4 +- .../use_cases/graph/graph_hugegraph_qa.ipynb | 4 +- docs/docs/use_cases/graph/graph_kuzu_qa.ipynb | 4 +- .../use_cases/graph/graph_memgraph_qa.ipynb | 4 +- .../use_cases/graph/graph_nebula_qa.ipynb | 4 +- .../use_cases/graph/graph_networkx_qa.ipynb | 2 +- .../use_cases/graph/graph_sparql_qa.ipynb | 4 +- .../use_cases/graph/neptune_cypher_qa.ipynb | 2 +- docs/docs/use_cases/qa_structured/sql.ipynb | 16 +++---- .../question_answering/chat_history.ipynb | 5 +- .../conversational_retrieval_agents.ipynb | 4 +- .../question_answering/per_user.ipynb | 11 ++--- .../question_answering/quickstart.ipynb | 9 ++-- .../question_answering/sources.ipynb | 5 +- .../question_answering/streaming.ipynb | 5 +- docs/docs/use_cases/summarization.ipynb | 2 +- docs/docs/use_cases/tagging.ipynb | 2 +- docs/docs/use_cases/web_scraping.ipynb | 7 ++- .../package_template/chain.py | 4 +- .../package_template/pyproject.toml | 5 +- libs/core/langchain_core/runnables/base.py | 5 +- 414 files changed, 808 insertions(+), 834 deletions(-) diff --git a/cookbook/LLaMA2_sql_chat.ipynb b/cookbook/LLaMA2_sql_chat.ipynb index f31a7fd58f..3b697f314d 100644 --- a/cookbook/LLaMA2_sql_chat.ipynb +++ b/cookbook/LLaMA2_sql_chat.ipynb @@ -149,7 +149,7 @@ ], "source": [ "# Prompt\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "# Update the template based on the type of SQL Database like MySQL, Microsoft SQL Server and so on\n", "template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n", @@ -278,7 +278,7 @@ "source": [ "# Prompt\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "\n", "template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index c2c12ef87e..79f311328a 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -198,9 +198,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "# Generate summaries of text elements\n", @@ -355,9 +355,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def create_multi_vector_retriever(\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index 0a9117337d..2429413558 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -235,9 +235,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -320,9 +320,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index ffcf351d09..82ce6faf7f 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -211,9 +211,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -375,9 +375,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 2a57c329bf..19b9218ae7 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -209,9 +209,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_community.chat_models import ChatOllama\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate" ] }, { diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index 1f8d84c41b..45d424b452 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -132,8 +132,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "baseline = Chroma.from_texts(\n", " texts=all_splits_pypdf_texts,\n", @@ -160,9 +160,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Prompt\n", "prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n", diff --git a/cookbook/agent_vectorstore.ipynb b/cookbook/agent_vectorstore.ipynb index 6b9a88e448..388e4702a3 100644 --- a/cookbook/agent_vectorstore.ipynb +++ b/cookbook/agent_vectorstore.ipynb @@ -29,9 +29,8 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -161,7 +160,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/analyze_document.ipynb b/cookbook/analyze_document.ipynb index 9b61507c1e..4b872d823a 100644 --- a/cookbook/analyze_document.ipynb +++ b/cookbook/analyze_document.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "from langchain.chains import AnalyzeDocumentChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/cookbook/autogpt/autogpt.ipynb b/cookbook/autogpt/autogpt.ipynb index 911c8c3c86..0d4930c483 100644 --- a/cookbook/autogpt/autogpt.ipynb +++ b/cookbook/autogpt/autogpt.ipynb @@ -62,8 +62,8 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -100,8 +100,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_experimental.autonomous_agents import AutoGPT" + "from langchain_experimental.autonomous_agents import AutoGPT\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index d998630b70..44f2445e64 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -41,8 +41,8 @@ "import pandas as pd\n", "from langchain.docstore.document import Document\n", "from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.autonomous_agents import AutoGPT\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Needed synce jupyter runs an async eventloop\n", "nest_asyncio.apply()" @@ -311,8 +311,8 @@ "# Memory\n", "import faiss\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()\n", "embedding_size = 1536\n", diff --git a/cookbook/baby_agi.ipynb b/cookbook/baby_agi.ipynb index 9583eadba6..9545632a42 100644 --- a/cookbook/baby_agi.ipynb +++ b/cookbook/baby_agi.ipynb @@ -31,9 +31,8 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_experimental.autonomous_agents import BabyAGI" + "from langchain_experimental.autonomous_agents import BabyAGI\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { diff --git a/cookbook/baby_agi_with_agent.ipynb b/cookbook/baby_agi_with_agent.ipynb index 393b026b87..13476e5319 100644 --- a/cookbook/baby_agi_with_agent.ipynb +++ b/cookbook/baby_agi_with_agent.ipynb @@ -29,9 +29,8 @@ "\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_experimental.autonomous_agents import BabyAGI" + "from langchain_experimental.autonomous_agents import BabyAGI\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { @@ -108,8 +107,8 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import SerpAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "todo_prompt = PromptTemplate.from_template(\n", " \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n", diff --git a/cookbook/camel_role_playing.ipynb b/cookbook/camel_role_playing.ipynb index 2feffde200..ab8f44adf9 100644 --- a/cookbook/camel_role_playing.ipynb +++ b/cookbook/camel_role_playing.ipynb @@ -46,7 +46,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/causal_program_aided_language_model.ipynb b/cookbook/causal_program_aided_language_model.ipynb index 5e5b3c0b57..0f1e5fb8c3 100644 --- a/cookbook/causal_program_aided_language_model.ipynb +++ b/cookbook/causal_program_aided_language_model.ipynb @@ -47,9 +47,9 @@ "outputs": [], "source": [ "from IPython.display import SVG\n", - "from langchain_community.llms import OpenAI\n", "from langchain_experimental.cpal.base import CPALChain\n", "from langchain_experimental.pal_chain import PALChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0, max_tokens=512)\n", "cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)\n", diff --git a/cookbook/code-analysis-deeplake.ipynb b/cookbook/code-analysis-deeplake.ipynb index 4b5ea3ae4c..67c1ecbe39 100644 --- a/cookbook/code-analysis-deeplake.ipynb +++ b/cookbook/code-analysis-deeplake.ipynb @@ -657,7 +657,7 @@ } ], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "embeddings" @@ -834,7 +834,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model_name=\"gpt-3.5-turbo-0613\"\n", diff --git a/cookbook/custom_agent_with_plugin_retrieval.ipynb b/cookbook/custom_agent_with_plugin_retrieval.ipynb index 7046a9fba6..9131599da0 100644 --- a/cookbook/custom_agent_with_plugin_retrieval.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval.ipynb @@ -44,8 +44,8 @@ "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain_community.agent_toolkits import NLAToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.tools.plugin import AIPlugin" + "from langchain_community.tools.plugin import AIPlugin\n", + "from langchain_openai import OpenAI" ] }, { @@ -115,8 +115,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb index e8b3611761..30fc61712d 100644 --- a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb @@ -69,8 +69,8 @@ "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain_community.agent_toolkits import NLAToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.tools.plugin import AIPlugin" + "from langchain_community.tools.plugin import AIPlugin\n", + "from langchain_openai import OpenAI" ] }, { @@ -139,8 +139,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/cookbook/custom_agent_with_tool_retrieval.ipynb b/cookbook/custom_agent_with_tool_retrieval.ipynb index 28932f59b2..7981a13716 100644 --- a/cookbook/custom_agent_with_tool_retrieval.ipynb +++ b/cookbook/custom_agent_with_tool_retrieval.ipynb @@ -41,8 +41,8 @@ "from langchain.chains import LLMChain\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities import SerpAPIWrapper" + "from langchain_community.utilities import SerpAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { @@ -104,8 +104,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/cookbook/databricks_sql_db.ipynb b/cookbook/databricks_sql_db.ipynb index 6cb5da4fe9..08faf00965 100644 --- a/cookbook/databricks_sql_db.ipynb +++ b/cookbook/databricks_sql_db.ipynb @@ -93,7 +93,7 @@ "outputs": [], "source": [ "# Creating a OpenAI Chat LLM wrapper\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")" ] diff --git a/cookbook/deeplake_semantic_search_over_chat.ipynb b/cookbook/deeplake_semantic_search_over_chat.ipynb index 042cdf7399..3dd2c92004 100644 --- a/cookbook/deeplake_semantic_search_over_chat.ipynb +++ b/cookbook/deeplake_semantic_search_over_chat.ipynb @@ -56,9 +56,8 @@ " CharacterTextSplitter,\n", " RecursiveCharacterTextSplitter,\n", ")\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import DeepLake\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", diff --git a/cookbook/docugami_xml_kg_rag.ipynb b/cookbook/docugami_xml_kg_rag.ipynb index 2a9837ddec..a9c8607935 100644 --- a/cookbook/docugami_xml_kg_rag.ipynb +++ b/cookbook/docugami_xml_kg_rag.ipynb @@ -475,8 +475,8 @@ " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -547,9 +547,9 @@ "\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores.chroma import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def build_retriever(text_elements, tables, table_summaries):\n", diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index af4bc6d71e..3a38446a30 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -39,7 +39,7 @@ "source": [ "from elasticsearch import Elasticsearch\n", "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/extraction_openai_tools.ipynb b/cookbook/extraction_openai_tools.ipynb index fd251b35d0..dae98315f7 100644 --- a/cookbook/extraction_openai_tools.ipynb +++ b/cookbook/extraction_openai_tools.ipynb @@ -22,8 +22,8 @@ "from typing import List, Optional\n", "\n", "from langchain.chains.openai_tools import create_extraction_chain_pydantic\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.pydantic_v1 import BaseModel" + "from langchain_core.pydantic_v1 import BaseModel\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -153,7 +153,7 @@ "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", "from langchain_core.runnables import Runnable\n", "from langchain_core.pydantic_v1 import BaseModel\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.messages import SystemMessage\n", "from langchain_core.language_models import BaseLanguageModel\n", "\n", diff --git a/cookbook/forward_looking_retrieval_augmented_generation.ipynb b/cookbook/forward_looking_retrieval_augmented_generation.ipynb index e7b3ecda22..0abfe0bfef 100644 --- a/cookbook/forward_looking_retrieval_augmented_generation.ipynb +++ b/cookbook/forward_looking_retrieval_augmented_generation.ipynb @@ -74,9 +74,8 @@ " CallbackManagerForRetrieverRun,\n", ")\n", "from langchain.schema import BaseRetriever, Document\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities import GoogleSerperAPIWrapper" + "from langchain_community.utilities import GoogleSerperAPIWrapper\n", + "from langchain_openai import ChatOpenAI, OpenAI" ] }, { diff --git a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb index f7570fd7f2..e2e6694405 100644 --- a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb +++ b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb @@ -49,9 +49,8 @@ "\n", "from langchain.docstore import InMemoryDocstore\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from termcolor import colored" ] }, diff --git a/cookbook/hugginggpt.ipynb b/cookbook/hugginggpt.ipynb index d94076cbf3..751948e88d 100644 --- a/cookbook/hugginggpt.ipynb +++ b/cookbook/hugginggpt.ipynb @@ -75,8 +75,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import HuggingGPT\n", + "from langchain_openai import OpenAI\n", "\n", "# %env OPENAI_API_BASE=http://localhost:8000/v1" ] diff --git a/cookbook/human_approval.ipynb b/cookbook/human_approval.ipynb index aae360250f..59e46bbc4e 100644 --- a/cookbook/human_approval.ipynb +++ b/cookbook/human_approval.ipynb @@ -159,7 +159,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index ea997869ad..58cde25fe9 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -22,8 +22,7 @@ "source": [ "from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { diff --git a/cookbook/learned_prompt_optimization.ipynb b/cookbook/learned_prompt_optimization.ipynb index 3f4d02dd46..b7894d4482 100644 --- a/cookbook/learned_prompt_optimization.ipynb +++ b/cookbook/learned_prompt_optimization.ipynb @@ -49,7 +49,7 @@ "source": [ "# pick and configure the LLM of your choice\n", "\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/llm_bash.ipynb b/cookbook/llm_bash.ipynb index 9a345df74f..61a56f1783 100644 --- a/cookbook/llm_bash.ipynb +++ b/cookbook/llm_bash.ipynb @@ -43,8 +43,8 @@ } ], "source": [ - "from langchain_community.llms import OpenAI\n", "from langchain_experimental.llm_bash.base import LLMBashChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/cookbook/llm_checker.ipynb b/cookbook/llm_checker.ipynb index cfc5f2356a..4c128fdc2a 100644 --- a/cookbook/llm_checker.ipynb +++ b/cookbook/llm_checker.ipynb @@ -42,7 +42,7 @@ ], "source": [ "from langchain.chains import LLMCheckerChain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0.7)\n", "\n", diff --git a/cookbook/llm_math.ipynb b/cookbook/llm_math.ipynb index e0a026ba35..6260be2f03 100644 --- a/cookbook/llm_math.ipynb +++ b/cookbook/llm_math.ipynb @@ -46,7 +46,7 @@ ], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_math = LLMMathChain.from_llm(llm, verbose=True)\n", diff --git a/cookbook/llm_summarization_checker.ipynb b/cookbook/llm_summarization_checker.ipynb index 8501c98daf..ed3f108716 100644 --- a/cookbook/llm_summarization_checker.ipynb +++ b/cookbook/llm_summarization_checker.ipynb @@ -331,7 +331,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n", @@ -822,7 +822,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n", @@ -1096,7 +1096,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n", diff --git a/cookbook/llm_symbolic_math.ipynb b/cookbook/llm_symbolic_math.ipynb index 10275f83ca..69ccbaf072 100644 --- a/cookbook/llm_symbolic_math.ipynb +++ b/cookbook/llm_symbolic_math.ipynb @@ -14,8 +14,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", "from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)" diff --git a/cookbook/meta_prompt.ipynb b/cookbook/meta_prompt.ipynb index f0e78ab197..746d3a4203 100644 --- a/cookbook/meta_prompt.ipynb +++ b/cookbook/meta_prompt.ipynb @@ -59,7 +59,7 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferWindowMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/multi_modal_QA.ipynb b/cookbook/multi_modal_QA.ipynb index 1e316cdb07..160b721116 100644 --- a/cookbook/multi_modal_QA.ipynb +++ b/cookbook/multi_modal_QA.ipynb @@ -91,8 +91,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.messages import HumanMessage, SystemMessage" + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index 17d49ffe8a..0af89590bf 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -315,10 +315,10 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "def prompt_func(data_dict):\n", diff --git a/cookbook/multi_modal_output_agent.ipynb b/cookbook/multi_modal_output_agent.ipynb index 8626c3bcf2..e5929ead11 100644 --- a/cookbook/multi_modal_output_agent.ipynb +++ b/cookbook/multi_modal_output_agent.ipynb @@ -44,7 +44,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.tools import SteamshipImageGenerationTool\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/multi_player_dnd.ipynb b/cookbook/multi_player_dnd.ipynb index c03bb6ad22..05c4d45914 100644 --- a/cookbook/multi_player_dnd.ipynb +++ b/cookbook/multi_player_dnd.ipynb @@ -32,7 +32,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_authoritarian.ipynb b/cookbook/multiagent_authoritarian.ipynb index 8e9a82a062..893b35f7c7 100644 --- a/cookbook/multiagent_authoritarian.ipynb +++ b/cookbook/multiagent_authoritarian.ipynb @@ -41,7 +41,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_bidding.ipynb b/cookbook/multiagent_bidding.ipynb index 1ee6383d92..fbb9f03f53 100644 --- a/cookbook/multiagent_bidding.ipynb +++ b/cookbook/multiagent_bidding.ipynb @@ -33,7 +33,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/myscale_vector_sql.ipynb b/cookbook/myscale_vector_sql.ipynb index b02a19f723..d26ac19d73 100644 --- a/cookbook/myscale_vector_sql.ipynb +++ b/cookbook/myscale_vector_sql.ipynb @@ -32,9 +32,9 @@ "\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import SQLDatabase\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", + "from langchain_openai import OpenAI\n", "from sqlalchemy import MetaData, create_engine\n", "\n", "MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n", @@ -75,10 +75,10 @@ "outputs": [], "source": [ "from langchain.callbacks import StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities.sql_database import SQLDatabase\n", "from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", + "from langchain_openai import OpenAI\n", "\n", "chain = VectorSQLDatabaseChain(\n", " llm_chain=LLMChain(\n", @@ -117,7 +117,6 @@ "outputs": [], "source": [ "from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.retrievers.vector_sql_database import (\n", " VectorSQLDatabaseChainRetriever,\n", ")\n", @@ -126,6 +125,7 @@ " VectorSQLDatabaseChain,\n", " VectorSQLRetrieveAllOutputParser,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n", " output_parser.model\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index c214377e79..648b28b5e2 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -22,8 +22,8 @@ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -53,7 +53,7 @@ "from langchain.chains import create_qa_with_sources_chain\n", "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index 8e0b95020a..298c6c8aa3 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.messages import HumanMessage, SystemMessage" + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -414,7 +414,7 @@ "BREAKING CHANGES:\n", "- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n", "```python\n", - "from langchain_community.embeddings import AzureOpenAIEmbeddings\n", + "from langchain_openai import AzureOpenAIEmbeddings\n", "```\n", "\n", "\n", @@ -456,8 +456,8 @@ "from typing import Literal\n", "\n", "from langchain.output_parsers.openai_tools import PydanticToolsParser\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", "\n", "\n", diff --git a/cookbook/petting_zoo.ipynb b/cookbook/petting_zoo.ipynb index 5c269b1a67..c0db7653b0 100644 --- a/cookbook/petting_zoo.ipynb +++ b/cookbook/petting_zoo.ipynb @@ -52,7 +52,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/plan_and_execute_agent.ipynb b/cookbook/plan_and_execute_agent.ipynb index 2bbdcc6bb0..d710514658 100644 --- a/cookbook/plan_and_execute_agent.ipynb +++ b/cookbook/plan_and_execute_agent.ipynb @@ -30,15 +30,14 @@ "outputs": [], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_experimental.plan_and_execute import (\n", " PlanAndExecute,\n", " load_agent_executor,\n", " load_chat_planner,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI, OpenAI" ] }, { diff --git a/cookbook/press_releases.ipynb b/cookbook/press_releases.ipynb index a86927f7af..30aba0a68d 100644 --- a/cookbook/press_releases.ipynb +++ b/cookbook/press_releases.ipynb @@ -82,7 +82,7 @@ "source": [ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.retrievers import KayAiRetriever\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", diff --git a/cookbook/program_aided_language_model.ipynb b/cookbook/program_aided_language_model.ipynb index 5eed7766ea..17320ab8c0 100644 --- a/cookbook/program_aided_language_model.ipynb +++ b/cookbook/program_aided_language_model.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", - "from langchain_experimental.pal_chain import PALChain" + "from langchain_experimental.pal_chain import PALChain\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/qa_citations.ipynb b/cookbook/qa_citations.ipynb index 2ca389a063..a8dbd1c613 100644 --- a/cookbook/qa_citations.ipynb +++ b/cookbook/qa_citations.ipynb @@ -27,7 +27,7 @@ ], "source": [ "from langchain.chains import create_citation_fuzzy_match_chain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/rag_fusion.ipynb b/cookbook/rag_fusion.ipynb index a340e97ed0..976e8cfab4 100644 --- a/cookbook/rag_fusion.ipynb +++ b/cookbook/rag_fusion.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "import pinecone\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Pinecone\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "pinecone.init(api_key=\"...\", environment=\"...\")" ] @@ -86,8 +86,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/retrieval_in_sql.ipynb b/cookbook/retrieval_in_sql.ipynb index 32d2384cad..998e9aa8dd 100644 --- a/cookbook/retrieval_in_sql.ipynb +++ b/cookbook/retrieval_in_sql.ipynb @@ -43,7 +43,7 @@ "outputs": [], "source": [ "from langchain.sql_database import SQLDatabase\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n", "db = SQLDatabase.from_uri(CONNECTION_STRING)" @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()" ] @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n", "Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n", @@ -267,9 +267,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "db = SQLDatabase.from_uri(\n", " CONNECTION_STRING\n", diff --git a/cookbook/rewrite.ipynb b/cookbook/rewrite.ipynb index b60ee96b95..270d7d964e 100644 --- a/cookbook/rewrite.ipynb +++ b/cookbook/rewrite.ipynb @@ -31,11 +31,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index 11cb7afd8b..158329a5f0 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -53,10 +53,9 @@ "from langchain.prompts.base import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import BaseLLM, OpenAI\n", + "from langchain_community.llms import BaseLLM\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n", "from pydantic import BaseModel, Field" ] }, diff --git a/cookbook/selecting_llms_based_on_context_length.ipynb b/cookbook/selecting_llms_based_on_context_length.ipynb index ae885f5e0b..d4e22100a9 100644 --- a/cookbook/selecting_llms_based_on_context_length.ipynb +++ b/cookbook/selecting_llms_based_on_context_length.ipynb @@ -18,9 +18,9 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompt_values import PromptValue" + "from langchain_core.prompt_values import PromptValue\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/self_query_hotel_search.ipynb b/cookbook/self_query_hotel_search.ipynb index a349bd7f9b..d38192c5a2 100644 --- a/cookbook/self_query_hotel_search.ipynb +++ b/cookbook/self_query_hotel_search.ipynb @@ -255,7 +255,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model=\"gpt-4\")\n", "res = model.predict(\n", @@ -1083,8 +1083,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import ElasticsearchStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/cookbook/sharedmemory_for_tools.ipynb b/cookbook/sharedmemory_for_tools.ipynb index 9134a263ae..3b8efc7359 100644 --- a/cookbook/sharedmemory_for_tools.ipynb +++ b/cookbook/sharedmemory_for_tools.ipynb @@ -26,8 +26,8 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities import GoogleSearchAPIWrapper" + "from langchain_community.utilities import GoogleSearchAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/smart_llm.ipynb b/cookbook/smart_llm.ipynb index b8bb31c97e..0e617617e3 100644 --- a/cookbook/smart_llm.ipynb +++ b/cookbook/smart_llm.ipynb @@ -52,8 +52,8 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_experimental.smart_llm import SmartLLMChain" + "from langchain_experimental.smart_llm import SmartLLMChain\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/sql_db_qa.mdx b/cookbook/sql_db_qa.mdx index 851fae5202..73cdd953f3 100644 --- a/cookbook/sql_db_qa.mdx +++ b/cookbook/sql_db_qa.mdx @@ -9,7 +9,7 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` @@ -200,7 +200,7 @@ result["intermediate_steps"] How to add memory to a SQLDatabaseChain: ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` diff --git a/cookbook/stepback-qa.ipynb b/cookbook/stepback-qa.ipynb index e06652e80e..6827b04da7 100644 --- a/cookbook/stepback-qa.ipynb +++ b/cookbook/stepback-qa.ipynb @@ -23,10 +23,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableLambda" + "from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", + "from langchain_core.runnables import RunnableLambda\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/tree_of_thought.ipynb b/cookbook/tree_of_thought.ipynb index 6fead2e0cc..63ff323ec6 100644 --- a/cookbook/tree_of_thought.ipynb +++ b/cookbook/tree_of_thought.ipynb @@ -24,7 +24,7 @@ } ], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 28942b32b4..4f540fa5ab 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -37,8 +37,8 @@ "import getpass\n", "import os\n", "\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import DeepLake\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", @@ -3809,7 +3809,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index 9fc9d1a757..b31e769dee 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -30,7 +30,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/two_player_dnd.ipynb b/cookbook/two_player_dnd.ipynb index c36a59b477..d90e4f9365 100644 --- a/cookbook/two_player_dnd.ipynb +++ b/cookbook/two_player_dnd.ipynb @@ -28,7 +28,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/wikibase_agent.ipynb b/cookbook/wikibase_agent.ipynb index 71c5294b8c..692193b022 100644 --- a/cookbook/wikibase_agent.ipynb +++ b/cookbook/wikibase_agent.ipynb @@ -599,7 +599,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)" ] diff --git a/docs/docs/expression_language/cookbook/code_writing.ipynb b/docs/docs/expression_language/cookbook/code_writing.ipynb index e3f59f39ea..ae3e348e69 100644 --- a/docs/docs/expression_language/cookbook/code_writing.ipynb +++ b/docs/docs/expression_language/cookbook/code_writing.ipynb @@ -20,9 +20,9 @@ "from langchain.prompts import (\n", " ChatPromptTemplate,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_experimental.utilities import PythonREPL" + "from langchain_experimental.utilities import PythonREPL\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/embedding_router.ipynb b/docs/docs/expression_language/cookbook/embedding_router.ipynb index 51e193709d..8ee6515f29 100644 --- a/docs/docs/expression_language/cookbook/embedding_router.ipynb +++ b/docs/docs/expression_language/cookbook/embedding_router.ipynb @@ -21,10 +21,9 @@ "source": [ "from langchain.prompts import PromptTemplate\n", "from langchain.utils.math import cosine_similarity\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "physics_template = \"\"\"You are a very smart physics professor. \\\n", "You are great at answering questions about physics in a concise and easy to understand manner. \\\n", diff --git a/docs/docs/expression_language/cookbook/memory.ipynb b/docs/docs/expression_language/cookbook/memory.ipynb index 6fd8a69264..efeb25f42d 100644 --- a/docs/docs/expression_language/cookbook/memory.ipynb +++ b/docs/docs/expression_language/cookbook/memory.ipynb @@ -20,9 +20,9 @@ "from operator import itemgetter\n", "\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_messages(\n", diff --git a/docs/docs/expression_language/cookbook/moderation.ipynb b/docs/docs/expression_language/cookbook/moderation.ipynb index 1ab1c117de..f18153850d 100644 --- a/docs/docs/expression_language/cookbook/moderation.ipynb +++ b/docs/docs/expression_language/cookbook/moderation.ipynb @@ -18,8 +18,8 @@ "outputs": [], "source": [ "from langchain.chains import OpenAIModerationChain\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/multiple_chains.ipynb b/docs/docs/expression_language/cookbook/multiple_chains.ipynb index 2aecbb9cb3..d9bb7ba07e 100644 --- a/docs/docs/expression_language/cookbook/multiple_chains.ipynb +++ b/docs/docs/expression_language/cookbook/multiple_chains.ipynb @@ -39,9 +39,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n", "prompt2 = ChatPromptTemplate.from_template(\n", diff --git a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb index 6abaf835b3..e8f33a5037 100644 --- a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/cookbook/prompt_size.ipynb b/docs/docs/expression_language/cookbook/prompt_size.ipynb index b19f027829..f2a7132dfb 100644 --- a/docs/docs/expression_language/cookbook/prompt_size.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_size.ipynb @@ -26,12 +26,12 @@ "from langchain.agents import AgentExecutor, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain.prompts.chat import ChatPromptValue\n", "from langchain.tools import WikipediaQueryRun\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n", - "from langchain_community.utilities import WikipediaAPIWrapper" + "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/retrieval.ipynb b/docs/docs/expression_language/cookbook/retrieval.ipynb index 6cca1d011a..a64a970f75 100644 --- a/docs/docs/expression_language/cookbook/retrieval.ipynb +++ b/docs/docs/expression_language/cookbook/retrieval.ipynb @@ -38,12 +38,11 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/expression_language/cookbook/sql_db.ipynb b/docs/docs/expression_language/cookbook/sql_db.ipynb index 039d2f26ba..f2ed565ead 100644 --- a/docs/docs/expression_language/cookbook/sql_db.ipynb +++ b/docs/docs/expression_language/cookbook/sql_db.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", @@ -93,9 +93,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "\n", diff --git a/docs/docs/expression_language/cookbook/tools.ipynb b/docs/docs/expression_language/cookbook/tools.ipynb index 8d5ab06005..cd7fdff328 100644 --- a/docs/docs/expression_language/cookbook/tools.ipynb +++ b/docs/docs/expression_language/cookbook/tools.ipynb @@ -27,10 +27,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.tools import DuckDuckGoSearchRun\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/get_started.ipynb b/docs/docs/expression_language/get_started.ipynb index d44947de19..f4f963a62a 100644 --- a/docs/docs/expression_language/get_started.ipynb +++ b/docs/docs/expression_language/get_started.ipynb @@ -32,28 +32,28 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 1, "id": "466b65b3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Why did the ice cream go to therapy?\\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\"" + "\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always drip when things heat up!\"" ] }, - "execution_count": 7, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n", - "model = ChatOpenAI()\n", + "model = ChatOpenAI(model=\"gpt-4\")\n", "output_parser = StrOutputParser()\n", "\n", "chain = prompt | model | output_parser\n", @@ -89,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, "id": "b8656990", "metadata": {}, "outputs": [ @@ -99,7 +99,7 @@ "ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])" ] }, - "execution_count": 8, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -111,7 +111,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, "id": "e6034488", "metadata": {}, "outputs": [ @@ -121,7 +121,7 @@ "[HumanMessage(content='tell me a short joke about ice cream')]" ] }, - "execution_count": 9, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -132,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 4, "id": "60565463", "metadata": {}, "outputs": [ @@ -142,7 +142,7 @@ "'Human: tell me a short joke about ice cream'" ] }, - "execution_count": 10, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -163,17 +163,17 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 5, "id": "33cf5f72", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content=\"Why did the ice cream go to therapy? \\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\")" + "AIMessage(content=\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always bring a melt down!\")" ] }, - "execution_count": 11, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -193,23 +193,23 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 6, "id": "8feb05da", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'\\n\\nRobot: Why did the ice cream go to therapy? Because it had a rocky road.'" + "'\\n\\nRobot: Why did the ice cream truck break down? Because it had a meltdown!'" ] }, - "execution_count": 12, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm.invoke(prompt_value)" @@ -324,12 +324,12 @@ "# Requires:\n", "# pip install langchain docarray tiktoken\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import DocArrayInMemorySearch\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", + "from langchain_openai.chat_models import ChatOpenAI\n", + "from langchain_openai.embeddings import OpenAIEmbeddings\n", "\n", "vectorstore = DocArrayInMemorySearch.from_texts(\n", " [\"harrison worked at kensho\", \"bears like to eat honey\"],\n", @@ -486,7 +486,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.10.1" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/binding.ipynb b/docs/docs/expression_language/how_to/binding.ipynb index 087850902f..f81709d627 100644 --- a/docs/docs/expression_language/how_to/binding.ipynb +++ b/docs/docs/expression_language/how_to/binding.ipynb @@ -19,10 +19,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/how_to/configure.ipynb b/docs/docs/expression_language/how_to/configure.ipynb index 9e208ad25c..120ac3c54b 100644 --- a/docs/docs/expression_language/how_to/configure.ipynb +++ b/docs/docs/expression_language/how_to/configure.ipynb @@ -42,8 +42,8 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.runnables import ConfigurableField\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(temperature=0).configurable_fields(\n", " temperature=ConfigurableField(\n", @@ -264,8 +264,9 @@ "outputs": [], "source": [ "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n", - "from langchain_core.runnables import ConfigurableField" + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_core.runnables import ConfigurableField\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/how_to/decorator.ipynb b/docs/docs/expression_language/how_to/decorator.ipynb index 6c7680210a..700354c8f5 100644 --- a/docs/docs/expression_language/how_to/decorator.ipynb +++ b/docs/docs/expression_language/how_to/decorator.ipynb @@ -23,10 +23,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import chain" + "from langchain_core.runnables import chain\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/how_to/fallbacks.ipynb b/docs/docs/expression_language/how_to/fallbacks.ipynb index e736c984e4..42a94667de 100644 --- a/docs/docs/expression_language/how_to/fallbacks.ipynb +++ b/docs/docs/expression_language/how_to/fallbacks.ipynb @@ -31,7 +31,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -141,7 +142,7 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -241,7 +242,7 @@ "source": [ "# Now lets create a chain with the normal OpenAI model\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", @@ -291,7 +292,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/functions.ipynb b/docs/docs/expression_language/how_to/functions.ipynb index 5df7681ec8..ede22edeeb 100644 --- a/docs/docs/expression_language/how_to/functions.ipynb +++ b/docs/docs/expression_language/how_to/functions.ipynb @@ -33,9 +33,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableLambda\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "def length_function(text):\n", diff --git a/docs/docs/expression_language/how_to/generators.ipynb b/docs/docs/expression_language/how_to/generators.ipynb index caf1bade4b..73fd2a099a 100644 --- a/docs/docs/expression_language/how_to/generators.ipynb +++ b/docs/docs/expression_language/how_to/generators.ipynb @@ -33,8 +33,8 @@ "from typing import Iterator, List\n", "\n", "from langchain.prompts.chat import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"Write a comma-separated list of 5 animals similar to: {animal}\"\n", diff --git a/docs/docs/expression_language/how_to/inspect.ipynb b/docs/docs/expression_language/how_to/inspect.ipynb index dea66727de..e61654b1f0 100644 --- a/docs/docs/expression_language/how_to/inspect.ipynb +++ b/docs/docs/expression_language/how_to/inspect.ipynb @@ -33,10 +33,9 @@ "\n", "from langchain.prompts import ChatPromptTemplate\n", "from langchain.vectorstores import FAISS\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" + "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index dd3f15aca9..d5dbaa76ca 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -44,12 +44,11 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", @@ -128,12 +127,11 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", @@ -192,9 +190,9 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableParallel\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n", diff --git a/docs/docs/expression_language/how_to/message_history.ipynb b/docs/docs/expression_language/how_to/message_history.ipynb index 929c2c53d2..55c631e82f 100644 --- a/docs/docs/expression_language/how_to/message_history.ipynb +++ b/docs/docs/expression_language/how_to/message_history.ipynb @@ -131,10 +131,10 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.chat_history import BaseChatMessageHistory\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables.history import RunnableWithMessageHistory" ] }, diff --git a/docs/docs/expression_language/how_to/passthrough.ipynb b/docs/docs/expression_language/how_to/passthrough.ipynb index 2399eb338f..dac8dd60d8 100644 --- a/docs/docs/expression_language/how_to/passthrough.ipynb +++ b/docs/docs/expression_language/how_to/passthrough.ipynb @@ -97,12 +97,11 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", diff --git a/docs/docs/expression_language/interface.ipynb b/docs/docs/expression_language/interface.ipynb index 1d0a92c7ec..701fe9208d 100644 --- a/docs/docs/expression_language/interface.ipynb +++ b/docs/docs/expression_language/interface.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n", @@ -659,10 +659,10 @@ } ], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "template = \"\"\"Answer the question based only on the following context:\n", "{context}\n", diff --git a/docs/docs/expression_language/why.ipynb b/docs/docs/expression_language/why.ipynb index 80f5c40e47..3c0eaf0e57 100644 --- a/docs/docs/expression_language/why.ipynb +++ b/docs/docs/expression_language/why.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "\n", @@ -389,7 +389,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm_chain = (\n", @@ -1002,8 +1002,9 @@ "source": [ "import os\n", "\n", - "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_openai import OpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx index fd9b5b2371..3db544f446 100644 --- a/docs/docs/get_started/quickstart.mdx +++ b/docs/docs/get_started/quickstart.mdx @@ -70,10 +70,10 @@ For this getting started guide, we will provide two options: using OpenAI (a pop -First we'll need to install their Python package: +First we'll need to the LangChain <> OpenAI integration package. ```shell -pip install openai +pip install langchain_openai ``` Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running: @@ -85,7 +85,7 @@ export OPENAI_API_KEY="..." We can then initialize the model: ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI() ``` @@ -93,7 +93,7 @@ llm = ChatOpenAI() If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI(openai_api_key="...") ``` @@ -128,7 +128,7 @@ We can also guide it's response with a prompt template. Prompt templates are used to convert raw user input to a better input to the LLM. ```python -from langchain.prompts import ChatPromptTemplate +from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages([ ("system", "You are world class technical documentation writer."), ("user", "{input}") @@ -199,10 +199,10 @@ For embedding models, we once again provide examples for accessing via OpenAI or -Make sure you have the openai package installed an the appropriate environment variables set (these are the same as needed for the LLM). +Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM). ```python -from langchain_community.embeddings import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() ``` @@ -416,7 +416,7 @@ pip install langchainhub Now we can use it to get a predefined prompt ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor @@ -479,15 +479,15 @@ To create a server for our application we'll make a `serve.py` file. This will c from typing import List from fastapi import FastAPI -from langchain.prompts import ChatPromptTemplate -from langchain_community.chat_models import ChatOpenAI +from langchain_core.prompts import ChatPromptTemplate +from langchain_openai import ChatOpenAI from langchain_community.document_loaders import WebBaseLoader -from langchain_community.embeddings import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from langchain_community.vectorstores import DocArrayInMemorySearch from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.tools.retriever import create_retriever_tool from langchain_community.tools.tavily_search import TavilySearchResults -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain import hub from langchain.agents import create_openai_functions_agent from langchain.agents import AgentExecutor diff --git a/docs/docs/guides/debugging.md b/docs/docs/guides/debugging.md index 7f9572104b..e2607ad847 100644 --- a/docs/docs/guides/debugging.md +++ b/docs/docs/guides/debugging.md @@ -25,7 +25,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes ```python from langchain.agents import AgentType, initialize_agent, load_tools -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI(model_name="gpt-4", temperature=0) tools = load_tools(["ddg-search", "llm-math"], llm=llm) diff --git a/docs/docs/guides/evaluation/examples/comparisons.ipynb b/docs/docs/guides/evaluation/examples/comparisons.ipynb index b49776d971..2d5105896b 100644 --- a/docs/docs/guides/evaluation/examples/comparisons.ipynb +++ b/docs/docs/guides/evaluation/examples/comparisons.ipynb @@ -99,8 +99,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utilities import SerpAPIWrapper\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Initialize the language model\n", "# You can add your own OpenAI API key by adding openai_api_key=\"\"\n", diff --git a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb index f7c42d5392..88ab1685ce 100644 --- a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb @@ -25,7 +25,7 @@ "outputs": [], "source": [ "from langchain.evaluation import load_evaluator\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "evaluator = load_evaluator(\"labeled_score_string\", llm=ChatOpenAI(model=\"gpt-4\"))" ] diff --git a/docs/docs/guides/evaluation/trajectory/custom.ipynb b/docs/docs/guides/evaluation/trajectory/custom.ipynb index 11823f4035..08c706552e 100644 --- a/docs/docs/guides/evaluation/trajectory/custom.ipynb +++ b/docs/docs/guides/evaluation/trajectory/custom.ipynb @@ -26,7 +26,7 @@ "from langchain.chains import LLMChain\n", "from langchain.evaluation import AgentTrajectoryEvaluator\n", "from langchain.schema import AgentAction\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "class StepNecessityEvaluator(AgentTrajectoryEvaluator):\n", diff --git a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb index 8c7a04cf0c..ef5e75a6da 100644 --- a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -76,7 +76,7 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.tools import tool\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "from pydantic import HttpUrl\n", "\n", "\n", diff --git a/docs/docs/guides/fallbacks.ipynb b/docs/docs/guides/fallbacks.ipynb index 2f354be3c2..57a201f844 100644 --- a/docs/docs/guides/fallbacks.ipynb +++ b/docs/docs/guides/fallbacks.ipynb @@ -33,7 +33,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatAnthropic, ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -143,7 +144,7 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -206,7 +207,7 @@ "source": [ "# Now lets create a chain with the normal OpenAI model\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", diff --git a/docs/docs/guides/model_laboratory.ipynb b/docs/docs/guides/model_laboratory.ipynb index 538d494281..785ac8dd90 100644 --- a/docs/docs/guides/model_laboratory.ipynb +++ b/docs/docs/guides/model_laboratory.ipynb @@ -21,7 +21,8 @@ "source": [ "from langchain.model_laboratory import ModelLaboratory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import Cohere, HuggingFaceHub, OpenAI" + "from langchain_community.llms import Cohere, HuggingFaceHub\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb index 539658b1ec..7a973beb7c 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb @@ -130,7 +130,7 @@ ], "source": [ "from langchain.prompts.prompt import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "anonymizer = PresidioAnonymizer()\n", "\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb index 728ef65cd9..c1bd2028a9 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb @@ -638,8 +638,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# 2. Load the data: In our case data's already loaded\n", "# 3. Anonymize the data before indexing\n", @@ -664,14 +664,14 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models.openai import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import (\n", " RunnableLambda,\n", " RunnableParallel,\n", " RunnablePassthrough,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# 6. Create anonymizer chain\n", "template = \"\"\"Answer the question based only on the following context:\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb index bcc66c0829..bf49bba762 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb @@ -208,7 +208,7 @@ ], "source": [ "from langchain.prompts.prompt import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "anonymizer = PresidioReversibleAnonymizer()\n", "\n", diff --git a/docs/docs/guides/safety/constitutional_chain.mdx b/docs/docs/guides/safety/constitutional_chain.mdx index 38356470ad..4b98250131 100644 --- a/docs/docs/guides/safety/constitutional_chain.mdx +++ b/docs/docs/guides/safety/constitutional_chain.mdx @@ -12,7 +12,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c ```python # Imports -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain.chains.constitutional_ai.base import ConstitutionalChain diff --git a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb index ac1eff92c6..f7c30d3e1e 100644 --- a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb +++ b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb @@ -207,7 +207,7 @@ ], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/guides/safety/logical_fallacy_chain.mdx b/docs/docs/guides/safety/logical_fallacy_chain.mdx index 108a852061..d25dd37cd3 100644 --- a/docs/docs/guides/safety/logical_fallacy_chain.mdx +++ b/docs/docs/guides/safety/logical_fallacy_chain.mdx @@ -21,7 +21,7 @@ Therefore, it is crucial that model developers proactively address logical falla ```python # Imports -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain_experimental.fallacy_removal.base import FallacyChain diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 72353fa231..94b6a7dc64 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -22,7 +22,7 @@ We'll show: ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate ``` diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb index 0fed211592..8953805e76 100644 --- a/docs/docs/integrations/callbacks/argilla.ipynb +++ b/docs/docs/integrations/callbacks/argilla.ipynb @@ -215,7 +215,7 @@ ], "source": [ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -281,7 +281,7 @@ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -363,7 +363,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", diff --git a/docs/docs/integrations/callbacks/confident.ipynb b/docs/docs/integrations/callbacks/confident.ipynb index 6ef644b33e..af880da0b6 100644 --- a/docs/docs/integrations/callbacks/confident.ipynb +++ b/docs/docs/integrations/callbacks/confident.ipynb @@ -152,7 +152,7 @@ } ], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0,\n", @@ -217,9 +217,8 @@ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/callbacks/context.ipynb b/docs/docs/integrations/callbacks/context.ipynb index ed65e084c5..015c636633 100644 --- a/docs/docs/integrations/callbacks/context.ipynb +++ b/docs/docs/integrations/callbacks/context.ipynb @@ -104,7 +104,7 @@ " HumanMessage,\n", " SystemMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", @@ -162,7 +162,7 @@ " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", diff --git a/docs/docs/integrations/callbacks/infino.ipynb b/docs/docs/integrations/callbacks/infino.ipynb index a148c88234..4b318b1378 100644 --- a/docs/docs/integrations/callbacks/infino.ipynb +++ b/docs/docs/integrations/callbacks/infino.ipynb @@ -54,7 +54,7 @@ "import matplotlib.pyplot as plt\n", "from infinopy import InfinoClient\n", "from langchain.callbacks import InfinoCallbackHandler\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -316,8 +316,8 @@ "# os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n", "\n", "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Create callback handler. This logs latency, errors, token usage, prompts, as well as prompt responses to Infino.\n", "handler = InfinoCallbackHandler(\n", diff --git a/docs/docs/integrations/callbacks/labelstudio.ipynb b/docs/docs/integrations/callbacks/labelstudio.ipynb index 9ca111ffd0..4e59c91dc1 100644 --- a/docs/docs/integrations/callbacks/labelstudio.ipynb +++ b/docs/docs/integrations/callbacks/labelstudio.ipynb @@ -171,7 +171,7 @@ "outputs": [], "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n", @@ -243,7 +243,7 @@ "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", "from langchain.schema import HumanMessage, SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " callbacks=[\n", diff --git a/docs/docs/integrations/callbacks/llmonitor.md b/docs/docs/integrations/callbacks/llmonitor.md index ee74df6db1..266332a7e3 100644 --- a/docs/docs/integrations/callbacks/llmonitor.md +++ b/docs/docs/integrations/callbacks/llmonitor.md @@ -27,8 +27,8 @@ handler = LLMonitorCallbackHandler(app_id="...") ## Usage with LLM/Chat models ```python -from langchain_community.llms import OpenAI -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import OpenAI +from langchain_openai import ChatOpenAI from langchain.callbacks import LLMonitorCallbackHandler handler = LLMonitorCallbackHandler() @@ -52,7 +52,7 @@ It is also recommended to pass `agent_name` in the metadata to be able to distin Example: ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.schema import SystemMessage, HumanMessage from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool from langchain.callbacks import LLMonitorCallbackHandler @@ -85,7 +85,7 @@ Another example: ```python from langchain.agents import load_tools, initialize_agent, AgentType -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.callbacks import LLMonitorCallbackHandler handler = LLMonitorCallbackHandler() diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index de7f52ffd3..1b847528bc 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -79,7 +79,7 @@ "from langchain.schema import (\n", " HumanMessage,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " temperature=0,\n", @@ -142,7 +142,7 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "\n", "def pl_id_callback(promptlayer_request_id):\n", diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index 82ecc3298b..9203d35351 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -83,7 +83,7 @@ "from langchain.callbacks import SageMakerCallbackHandler\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from sagemaker.analytics import ExperimentAnalytics\n", "from sagemaker.experiments.run import Run\n", "from sagemaker.session import Session" diff --git a/docs/docs/integrations/callbacks/streamlit.md b/docs/docs/integrations/callbacks/streamlit.md index 1f425d588e..776f0f6d9c 100644 --- a/docs/docs/integrations/callbacks/streamlit.md +++ b/docs/docs/integrations/callbacks/streamlit.md @@ -44,7 +44,7 @@ agent in your Streamlit app and simply pass the `StreamlitCallbackHandler` to `a thoughts and actions live in your app. ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.agents import AgentType, initialize_agent, load_tools from langchain.callbacks import StreamlitCallbackHandler import streamlit as st diff --git a/docs/docs/integrations/callbacks/trubrics.ipynb b/docs/docs/integrations/callbacks/trubrics.ipynb index cc09ad9db7..cafb536563 100644 --- a/docs/docs/integrations/callbacks/trubrics.ipynb +++ b/docs/docs/integrations/callbacks/trubrics.ipynb @@ -149,7 +149,7 @@ "outputs": [], "source": [ "from langchain.callbacks import TrubricsCallbackHandler\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -268,7 +268,7 @@ "source": [ "from langchain.callbacks import TrubricsCallbackHandler\n", "from langchain.schema import HumanMessage, SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/azure_chat_openai.ipynb b/docs/docs/integrations/chat/azure_chat_openai.ipynb index cce6508e16..399edd6ebf 100644 --- a/docs/docs/integrations/chat/azure_chat_openai.ipynb +++ b/docs/docs/integrations/chat/azure_chat_openai.ipynb @@ -32,7 +32,7 @@ "import os\n", "\n", "from langchain.schema import HumanMessage\n", - "from langchain_community.chat_models import AzureChatOpenAI" + "from langchain_openai import AzureChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/fireworks.ipynb b/docs/docs/integrations/chat/fireworks.ipynb index e5a058df5b..4be0d01d78 100644 --- a/docs/docs/integrations/chat/fireworks.ipynb +++ b/docs/docs/integrations/chat/fireworks.ipynb @@ -157,8 +157,8 @@ "outputs": [], "source": [ "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_community.chat_models import ChatFireworks\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", "llm = ChatFireworks(\n", diff --git a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb index 3b46fbddc1..fa1c97fa39 100644 --- a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb @@ -50,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatVertexAI" + "from langchain_community.chat_models import ChatVertexAI\n", + "from langchain_core.prompts import ChatPromptTemplate" ] }, { diff --git a/docs/docs/integrations/chat/openai.ipynb b/docs/docs/integrations/chat/openai.ipynb index 8b9619279b..bdfc034267 100644 --- a/docs/docs/integrations/chat/openai.ipynb +++ b/docs/docs/integrations/chat/openai.ipynb @@ -35,7 +35,7 @@ " SystemMessagePromptTemplate,\n", ")\n", "from langchain.schema import HumanMessage, SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat/vllm.ipynb b/docs/docs/integrations/chat/vllm.ipynb index 08bdeb6967..88333bcb01 100644 --- a/docs/docs/integrations/chat/vllm.ipynb +++ b/docs/docs/integrations/chat/vllm.ipynb @@ -37,7 +37,7 @@ " SystemMessagePromptTemplate,\n", ")\n", "from langchain.schema import HumanMessage, SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/chat_loaders/discord.ipynb b/docs/docs/integrations/chat_loaders/discord.ipynb index d01c5068a2..b4eb0263b1 100644 --- a/docs/docs/integrations/chat_loaders/discord.ipynb +++ b/docs/docs/integrations/chat_loaders/discord.ipynb @@ -284,7 +284,7 @@ } ], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/facebook.ipynb b/docs/docs/integrations/chat_loaders/facebook.ipynb index e5047cab7c..94632851b0 100644 --- a/docs/docs/integrations/chat_loaders/facebook.ipynb +++ b/docs/docs/integrations/chat_loaders/facebook.ipynb @@ -500,7 +500,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=job.fine_tuned_model,\n", @@ -515,8 +515,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", diff --git a/docs/docs/integrations/chat_loaders/imessage.ipynb b/docs/docs/integrations/chat_loaders/imessage.ipynb index 62963aea12..5524844238 100644 --- a/docs/docs/integrations/chat_loaders/imessage.ipynb +++ b/docs/docs/integrations/chat_loaders/imessage.ipynb @@ -344,7 +344,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=job.fine_tuned_model,\n", @@ -359,8 +359,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb index 6e0fb2310a..f7d899fe3d 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb @@ -230,7 +230,7 @@ "model_id = job.fine_tuned_model\n", "\n", "# Use the fine-tuned model in LangChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=model_id,\n", diff --git a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb index bc8f9c0c7e..384b085048 100644 --- a/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb +++ b/docs/docs/integrations/chat_loaders/langsmith_llm_runs.ipynb @@ -150,8 +150,8 @@ "outputs": [], "source": [ "from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -365,7 +365,7 @@ "model_id = job.fine_tuned_model\n", "\n", "# Use the fine-tuned model in LangChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model=model_id,\n", diff --git a/docs/docs/integrations/chat_loaders/slack.ipynb b/docs/docs/integrations/chat_loaders/slack.ipynb index bae9ebf886..9aeb484f4e 100644 --- a/docs/docs/integrations/chat_loaders/slack.ipynb +++ b/docs/docs/integrations/chat_loaders/slack.ipynb @@ -133,7 +133,7 @@ } ], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/telegram.ipynb b/docs/docs/integrations/chat_loaders/telegram.ipynb index 233eb297cd..b8da73aa5a 100644 --- a/docs/docs/integrations/chat_loaders/telegram.ipynb +++ b/docs/docs/integrations/chat_loaders/telegram.ipynb @@ -176,7 +176,7 @@ } ], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/wechat.ipynb b/docs/docs/integrations/chat_loaders/wechat.ipynb index db0fd8b000..bb81d8cc88 100644 --- a/docs/docs/integrations/chat_loaders/wechat.ipynb +++ b/docs/docs/integrations/chat_loaders/wechat.ipynb @@ -263,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/chat_loaders/whatsapp.ipynb b/docs/docs/integrations/chat_loaders/whatsapp.ipynb index 80215a95f9..2243a132ab 100644 --- a/docs/docs/integrations/chat_loaders/whatsapp.ipynb +++ b/docs/docs/integrations/chat_loaders/whatsapp.ipynb @@ -166,7 +166,7 @@ } ], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "\n", diff --git a/docs/docs/integrations/document_loaders/amazon_textract.ipynb b/docs/docs/integrations/document_loaders/amazon_textract.ipynb index 781121c35f..55fde6ea4f 100644 --- a/docs/docs/integrations/document_loaders/amazon_textract.ipynb +++ b/docs/docs/integrations/document_loaders/amazon_textract.ipynb @@ -267,7 +267,7 @@ ], "source": [ "from langchain.chains.question_answering import load_qa_chain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "chain = load_qa_chain(llm=OpenAI(), chain_type=\"map_reduce\")\n", "query = [\"Who are the autors?\"]\n", diff --git a/docs/docs/integrations/document_loaders/docugami.ipynb b/docs/docs/integrations/document_loaders/docugami.ipynb index 12626778a2..94c19d93a9 100644 --- a/docs/docs/integrations/document_loaders/docugami.ipynb +++ b/docs/docs/integrations/document_loaders/docugami.ipynb @@ -211,9 +211,8 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.llms.openai import OpenAI\n", "from langchain_community.vectorstores.chroma import Chroma\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "embedding = OpenAIEmbeddings()\n", "vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)\n", @@ -541,8 +540,8 @@ "source": [ "from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType\n", "from langchain.storage import InMemoryStore\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores.chroma import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"big2small\", embedding_function=OpenAIEmbeddings())\n", diff --git a/docs/docs/integrations/document_loaders/figma.ipynb b/docs/docs/integrations/document_loaders/figma.ipynb index 3e078ce984..a01dfbc1f0 100644 --- a/docs/docs/integrations/document_loaders/figma.ipynb +++ b/docs/docs/integrations/document_loaders/figma.ipynb @@ -29,8 +29,8 @@ " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.document_loaders.figma import FigmaFileLoader" + "from langchain_community.document_loaders.figma import FigmaFileLoader\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/document_loaders/github.ipynb b/docs/docs/integrations/document_loaders/github.ipynb index 9ee4627cbf..3d9f57243f 100644 --- a/docs/docs/integrations/document_loaders/github.ipynb +++ b/docs/docs/integrations/document_loaders/github.ipynb @@ -203,7 +203,7 @@ "os.environ[\"OPENAI_API_KEY\"] = \"...\"\r\n", "\r\n", "from langchain.chains import LLMChain\r\n", - "from langchain_community.chat_models import ChatOpenAI\r\n", + "from langchain_openai import ChatOpenAI\r\n", "from langchain.prompts import PromptTemplate\r\n", "from langchain.prompts.chat import ChatPromptTemplate\r\n", "from langchain.schema import messages_from_dict\r\n", diff --git a/docs/docs/integrations/document_loaders/psychic.ipynb b/docs/docs/integrations/document_loaders/psychic.ipynb index 052ac713eb..89e8d6487c 100644 --- a/docs/docs/integrations/document_loaders/psychic.ipynb +++ b/docs/docs/integrations/document_loaders/psychic.ipynb @@ -79,9 +79,8 @@ "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/document_loaders/youtube_audio.ipynb b/docs/docs/integrations/document_loaders/youtube_audio.ipynb index 41a416bded..8b8fe95949 100644 --- a/docs/docs/integrations/document_loaders/youtube_audio.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_audio.ipynb @@ -169,9 +169,8 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb index d5efe1d844..11a4b2ae96 100644 --- a/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb +++ b/docs/docs/integrations/document_transformers/openai_metadata_tagger.ipynb @@ -22,10 +22,10 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_transformers.openai_functions import (\n", " create_metadata_tagger,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -212,7 +212,7 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"\"\"Extract relevant information from the following text.\n", diff --git a/docs/docs/integrations/llms/azure_openai.ipynb b/docs/docs/integrations/llms/azure_openai.ipynb index b501f91c96..3f9dd72a9d 100644 --- a/docs/docs/integrations/llms/azure_openai.ipynb +++ b/docs/docs/integrations/llms/azure_openai.ipynb @@ -124,7 +124,7 @@ "outputs": [], "source": [ "# Import Azure OpenAI\n", - "from langchain_community.llms import AzureOpenAI" + "from langchain_openai import AzureOpenAI" ] }, { diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llms/llm_caching.ipynb index 6ba910f0c6..75c7cf8a64 100644 --- a/docs/docs/integrations/llms/llm_caching.ipynb +++ b/docs/docs/integrations/llms/llm_caching.ipynb @@ -18,7 +18,7 @@ "outputs": [], "source": [ "from langchain.globals import set_llm_cache\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "# To make the caching really obvious, lets use a slower model.\n", "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)" @@ -415,7 +415,7 @@ "outputs": [], "source": [ "from langchain.cache import RedisSemanticCache\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "set_llm_cache(\n", " RedisSemanticCache(redis_url=\"redis://localhost:6379\", embedding=OpenAIEmbeddings())\n", @@ -1059,7 +1059,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embedding = OpenAIEmbeddings()" ] @@ -1267,7 +1267,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embedding = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/llms/opaqueprompts.ipynb b/docs/docs/integrations/llms/opaqueprompts.ipynb index 1ac440c056..cdc15e680a 100644 --- a/docs/docs/integrations/llms/opaqueprompts.ipynb +++ b/docs/docs/integrations/llms/opaqueprompts.ipynb @@ -63,7 +63,8 @@ "from langchain.globals import set_debug, set_verbose\n", "from langchain.memory import ConversationBufferWindowMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpaquePrompts, OpenAI\n", + "from langchain_community.llms import OpaquePrompts\n", + "from langchain_openai import OpenAI\n", "\n", "set_debug(True)\n", "set_verbose(True)\n", diff --git a/docs/docs/integrations/llms/openai.ipynb b/docs/docs/integrations/llms/openai.ipynb index 72ad79b143..cbaab6002f 100644 --- a/docs/docs/integrations/llms/openai.ipynb +++ b/docs/docs/integrations/llms/openai.ipynb @@ -68,7 +68,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/memory/aws_dynamodb.ipynb b/docs/docs/integrations/memory/aws_dynamodb.ipynb index bd4e49096e..fc118f08e8 100644 --- a/docs/docs/integrations/memory/aws_dynamodb.ipynb +++ b/docs/docs/integrations/memory/aws_dynamodb.ipynb @@ -249,8 +249,8 @@ "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_experimental.utilities import PythonREPL" + "from langchain_experimental.utilities import PythonREPL\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/memory/motorhead_memory.ipynb b/docs/docs/integrations/memory/motorhead_memory.ipynb index 7468a69a94..e8c3ee989d 100644 --- a/docs/docs/integrations/memory/motorhead_memory.ipynb +++ b/docs/docs/integrations/memory/motorhead_memory.ipynb @@ -37,7 +37,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "template = \"\"\"You are a chatbot having a conversation with a human.\n", "\n", diff --git a/docs/docs/integrations/memory/remembrall.md b/docs/docs/integrations/memory/remembrall.md index dd46138d74..39581f591a 100644 --- a/docs/docs/integrations/memory/remembrall.md +++ b/docs/docs/integrations/memory/remembrall.md @@ -21,7 +21,7 @@ Any request that you send with the modified `openai_api_base` (see below) and Re In addition to setting the `openai_api_base` and Remembrall API key via `x-gp-api-key`, you should specify a UID to maintain memory for. This will usually be a unique user identifier (like email). ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI chat_model = ChatOpenAI(openai_api_base="https://remembrall.dev/api/openai/v1", model_kwargs={ "headers":{ @@ -40,7 +40,7 @@ print(chat_model.predict("What is my favorite color?")) First, create a document context in the [Remembrall dashboard](https://remembrall.dev/dashboard/spells). Paste in the document texts or upload documents as PDFs to be processed. Save the Document Context ID and insert it as shown below. ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI chat_model = ChatOpenAI(openai_api_base="https://remembrall.dev/api/openai/v1", model_kwargs={ "headers":{ diff --git a/docs/docs/integrations/memory/sql_chat_message_history.ipynb b/docs/docs/integrations/memory/sql_chat_message_history.ipynb index 00965e7a16..8593991567 100644 --- a/docs/docs/integrations/memory/sql_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/sql_chat_message_history.ipynb @@ -139,9 +139,9 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_core.runnables.history import RunnableWithMessageHistory" + "from langchain_core.runnables.history import RunnableWithMessageHistory\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/memory/sqlite.ipynb b/docs/docs/integrations/memory/sqlite.ipynb index 8a4be17974..d868acc45e 100644 --- a/docs/docs/integrations/memory/sqlite.ipynb +++ b/docs/docs/integrations/memory/sqlite.ipynb @@ -37,7 +37,7 @@ "from langchain.memory import ConversationEntityMemory\n", "from langchain.memory.entity import SQLiteEntityStore\n", "from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb b/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb index 32fb1bc00f..21de8c78ac 100644 --- a/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/streamlit_chat_message_history.ipynb @@ -84,7 +84,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "template = \"\"\"You are an AI chatbot having a conversation with a human.\n", "\n", diff --git a/docs/docs/integrations/memory/xata_chat_message_history.ipynb b/docs/docs/integrations/memory/xata_chat_message_history.ipynb index a44db96e8e..01e6cf62d6 100644 --- a/docs/docs/integrations/memory/xata_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/xata_chat_message_history.ipynb @@ -154,8 +154,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.xata import XataVectorStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", @@ -220,7 +220,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.agents.agent_toolkits import create_retriever_tool\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "tool = create_retriever_tool(\n", " vector_store.as_retriever(),\n", diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index 9be30d6936..e1018e1235 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -53,8 +53,8 @@ "from langchain.memory import ZepMemory\n", "from langchain.retrievers import ZepRetriever\n", "from langchain.schema import AIMessage, HumanMessage\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "# Set this to your Zep server URL\n", "ZEP_API_URL = \"http://localhost:8000\"\n", diff --git a/docs/docs/integrations/platforms/anthropic.mdx b/docs/docs/integrations/platforms/anthropic.mdx index 31eae89cba..14b2679049 100644 --- a/docs/docs/integrations/platforms/anthropic.mdx +++ b/docs/docs/integrations/platforms/anthropic.mdx @@ -60,7 +60,7 @@ When working with ChatModels, it is preferred that you design your prompts as `C Here is an example below of doing that: ``` -from langchain.prompts import ChatPromptTemplate +from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful chatbot"), diff --git a/docs/docs/integrations/platforms/microsoft.mdx b/docs/docs/integrations/platforms/microsoft.mdx index 4d62f783f2..d821454e9a 100644 --- a/docs/docs/integrations/platforms/microsoft.mdx +++ b/docs/docs/integrations/platforms/microsoft.mdx @@ -26,7 +26,7 @@ See a [usage example](/docs/integrations/chat/azure_chat_openai) ```python -from langchain_community.chat_models import AzureChatOpenAI +from langchain_openai import AzureChatOpenAI ``` ## Text Embedding Models @@ -35,7 +35,7 @@ from langchain_community.chat_models import AzureChatOpenAI See a [usage example](/docs/integrations/text_embedding/azureopenai) ```python -from langchain_community.embeddings import AzureOpenAIEmbeddings +from langchain_openai import AzureOpenAIEmbeddings ``` ## LLMs @@ -44,7 +44,7 @@ from langchain_community.embeddings import AzureOpenAIEmbeddings See a [usage example](/docs/integrations/llms/azure_openai). ```python -from langchain_community.llms import AzureOpenAI +from langchain_openai import AzureOpenAI ``` ## Document loaders diff --git a/docs/docs/integrations/platforms/openai.mdx b/docs/docs/integrations/platforms/openai.mdx index b2b162f5a6..edf4d48c72 100644 --- a/docs/docs/integrations/platforms/openai.mdx +++ b/docs/docs/integrations/platforms/openai.mdx @@ -29,12 +29,12 @@ pip install tiktoken See a [usage example](/docs/integrations/llms/openai). ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI ``` If you are using a model hosted on `Azure`, you should use different wrapper for that: ```python -from langchain_community.llms import AzureOpenAI +from langchain_openai import AzureOpenAI ``` For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integrations/llms/azure_openai) @@ -44,12 +44,12 @@ For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integra See a [usage example](/docs/integrations/chat/openai). ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI ``` If you are using a model hosted on `Azure`, you should use different wrapper for that: ```python -from langchain_community.llms import AzureChatOpenAI +from langchain_openai import AzureChatOpenAI ``` For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integrations/chat/azure_chat_openai) @@ -59,7 +59,7 @@ For a more detailed walkthrough of the `Azure` wrapper, see [here](/docs/integra See a [usage example](/docs/integrations/text_embedding/openai) ```python -from langchain_community.embeddings import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings ``` diff --git a/docs/docs/integrations/providers/aim_tracking.ipynb b/docs/docs/integrations/providers/aim_tracking.ipynb index 9f094db78f..cad086e7a3 100644 --- a/docs/docs/integrations/providers/aim_tracking.ipynb +++ b/docs/docs/integrations/providers/aim_tracking.ipynb @@ -66,7 +66,7 @@ "from datetime import datetime\n", "\n", "from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/arthur_tracking.ipynb b/docs/docs/integrations/providers/arthur_tracking.ipynb index bbf042f000..74114875f3 100644 --- a/docs/docs/integrations/providers/arthur_tracking.ipynb +++ b/docs/docs/integrations/providers/arthur_tracking.ipynb @@ -29,7 +29,7 @@ "from langchain.callbacks import ArthurCallbackHandler\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", "from langchain.schema import HumanMessage\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/providers/clearml_tracking.ipynb b/docs/docs/integrations/providers/clearml_tracking.ipynb index 3362826e11..61d7ba57f2 100644 --- a/docs/docs/integrations/providers/clearml_tracking.ipynb +++ b/docs/docs/integrations/providers/clearml_tracking.ipynb @@ -106,7 +106,7 @@ ], "source": [ "from langchain.callbacks import StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "# Setup and use the ClearML Callback\n", "clearml_callback = ClearMLCallbackHandler(\n", diff --git a/docs/docs/integrations/providers/cnosdb.mdx b/docs/docs/integrations/providers/cnosdb.mdx index cbb7dfcb07..4c5316a5e3 100644 --- a/docs/docs/integrations/providers/cnosdb.mdx +++ b/docs/docs/integrations/providers/cnosdb.mdx @@ -37,7 +37,7 @@ db = SQLDatabase.from_cnosdb() ``` ```python # Creating a OpenAI Chat LLM Wrapper -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") ``` diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb index 7fb7c97414..bfe3b0c1fc 100644 --- a/docs/docs/integrations/providers/comet_tracking.ipynb +++ b/docs/docs/integrations/providers/comet_tracking.ipynb @@ -119,7 +119,7 @@ "outputs": [], "source": [ "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "comet_callback = CometCallbackHandler(\n", " project_name=\"comet-example-langchain\",\n", @@ -152,7 +152,7 @@ "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "comet_callback = CometCallbackHandler(\n", " complexity_metrics=True,\n", @@ -189,7 +189,7 @@ "source": [ "from langchain.agents import initialize_agent, load_tools\n", "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "comet_callback = CometCallbackHandler(\n", " project_name=\"comet-example-langchain\",\n", @@ -249,7 +249,7 @@ "from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from rouge_score import rouge_scorer\n", "\n", "\n", diff --git a/docs/docs/integrations/providers/flyte.mdx b/docs/docs/integrations/providers/flyte.mdx index 04bc45cfb3..c9e727ffb0 100644 --- a/docs/docs/integrations/providers/flyte.mdx +++ b/docs/docs/integrations/providers/flyte.mdx @@ -28,7 +28,7 @@ from flytekit import ImageSpec, task from langchain.agents import AgentType, initialize_agent, load_tools from langchain.callbacks import FlyteCallbackHandler from langchain.chains import LLMChain -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.prompts import PromptTemplate from langchain.schema import HumanMessage ``` diff --git a/docs/docs/integrations/providers/google_serper.mdx b/docs/docs/integrations/providers/google_serper.mdx index ec3e4e83da..ef07f017fe 100644 --- a/docs/docs/integrations/providers/google_serper.mdx +++ b/docs/docs/integrations/providers/google_serper.mdx @@ -21,7 +21,7 @@ You can use it as part of a Self Ask chain: ```python from langchain_community.utilities import GoogleSerperAPIWrapper -from langchain_community.llms.openai import OpenAI +from langchain_openai import OpenAI from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType diff --git a/docs/docs/integrations/providers/helicone.mdx b/docs/docs/integrations/providers/helicone.mdx index 7adabba080..7e81000487 100644 --- a/docs/docs/integrations/providers/helicone.mdx +++ b/docs/docs/integrations/providers/helicone.mdx @@ -23,7 +23,7 @@ Now head over to [helicone.ai](https://helicone.ai/onboarding?step=2) to create ## How to enable Helicone caching ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI import openai openai.api_base = "https://oai.hconeai.com/v1" @@ -37,7 +37,7 @@ print(llm(text)) ## How to use Helicone custom properties ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI import openai openai.api_base = "https://oai.hconeai.com/v1" diff --git a/docs/docs/integrations/providers/javelin_ai_gateway.mdx b/docs/docs/integrations/providers/javelin_ai_gateway.mdx index 7b1c4ede21..d4f4ceeec0 100644 --- a/docs/docs/integrations/providers/javelin_ai_gateway.mdx +++ b/docs/docs/integrations/providers/javelin_ai_gateway.mdx @@ -52,7 +52,7 @@ print(result) ```python from langchain_community.embeddings import JavelinAIGatewayEmbeddings -from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings embeddings = JavelinAIGatewayEmbeddings( gateway_uri="http://localhost:8000", diff --git a/docs/docs/integrations/providers/langchain_decorators.mdx b/docs/docs/integrations/providers/langchain_decorators.mdx index bab28f2617..1c80bcc2bf 100644 --- a/docs/docs/integrations/providers/langchain_decorators.mdx +++ b/docs/docs/integrations/providers/langchain_decorators.mdx @@ -91,7 +91,7 @@ def write_a_complicated_code(app_idea:str)->str: 3. Define the settings **directly in the decorator** ``` python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI @llm_prompt( llm=OpenAI(temperature=0.7), diff --git a/docs/docs/integrations/providers/log10.mdx b/docs/docs/integrations/providers/log10.mdx index b5fe4ce4f3..bd5890bb1f 100644 --- a/docs/docs/integrations/providers/log10.mdx +++ b/docs/docs/integrations/providers/log10.mdx @@ -17,7 +17,7 @@ Log10 is an [open-source](https://github.com/log10-io/log10) proxiless LLM data Integration with log10 is a simple one-line `log10_callback` integration as shown below: ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.schema import HumanMessage from log10.langchain import Log10Callback @@ -40,9 +40,9 @@ llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback]) ## How to use tags with Log10 ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain_community.chat_models import ChatAnthropic -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.schema import HumanMessage from log10.langchain import Log10Callback @@ -74,7 +74,7 @@ You can also intermix direct OpenAI calls and Langchain LLM calls: import os from log10.load import log10, log10_session import openai -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI log10(openai) diff --git a/docs/docs/integrations/providers/mlflow_tracking.ipynb b/docs/docs/integrations/providers/mlflow_tracking.ipynb index 4b4dffd03f..13683b309c 100644 --- a/docs/docs/integrations/providers/mlflow_tracking.ipynb +++ b/docs/docs/integrations/providers/mlflow_tracking.ipynb @@ -78,7 +78,7 @@ "outputs": [], "source": [ "from langchain.callbacks import MlflowCallbackHandler\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/motherduck.mdx b/docs/docs/integrations/providers/motherduck.mdx index 88dd7af796..5ef5c65e9c 100644 --- a/docs/docs/integrations/providers/motherduck.mdx +++ b/docs/docs/integrations/providers/motherduck.mdx @@ -26,7 +26,9 @@ conn_str = f"duckdb:///md:{token}@my_db" You can use the SQLChain to query data in your Motherduck instance in natural language. ``` -from langchain_community.llms import OpenAI, SQLDatabase, SQLDatabaseChain +from langchain_openai import OpenAI +from langchain_community.utilities import SQLDatabase +from langchain_experimental.sql import SQLDatabaseChain db = SQLDatabase.from_uri(conn_str) db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db, verbose=True) ``` diff --git a/docs/docs/integrations/providers/portkey/index.md b/docs/docs/integrations/providers/portkey/index.md index 73b4ed4999..aa5cfcda35 100644 --- a/docs/docs/integrations/providers/portkey/index.md +++ b/docs/docs/integrations/providers/portkey/index.md @@ -20,7 +20,7 @@ To start, get your Portkey API key by [signing up here](https://app.portkey.ai/l For OpenAI, a simple integration with logging feature would look like this: ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain_community.utilities import Portkey # Add the Portkey API Key from your account @@ -39,7 +39,7 @@ A common Portkey X Langchain use case is to **trace a chain or an agent** and vi ```python from langchain.agents import AgentType, initialize_agent, load_tools -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain_community.utilities import Portkey # Add the Portkey API Key from your account diff --git a/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb b/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb index 0d3a8159c4..8ec8fae85a 100644 --- a/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb +++ b/docs/docs/integrations/providers/portkey/logging_tracing_portkey.ipynb @@ -27,8 +27,8 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities import Portkey" + "from langchain_community.utilities import Portkey\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/ray_serve.ipynb b/docs/docs/integrations/providers/ray_serve.ipynb index f8988c64c0..144fb57233 100644 --- a/docs/docs/integrations/providers/ray_serve.ipynb +++ b/docs/docs/integrations/providers/ray_serve.ipynb @@ -109,7 +109,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/rebuff.ipynb b/docs/docs/integrations/providers/rebuff.ipynb index 540e68ed51..7b8c07fbf9 100644 --- a/docs/docs/integrations/providers/rebuff.ipynb +++ b/docs/docs/integrations/providers/rebuff.ipynb @@ -105,7 +105,7 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "# Set up the LangChain SDK with the environment variable\n", "llm = OpenAI(temperature=0)" diff --git a/docs/docs/integrations/providers/searchapi.mdx b/docs/docs/integrations/providers/searchapi.mdx index 8370d0eca5..f5e55ca390 100644 --- a/docs/docs/integrations/providers/searchapi.mdx +++ b/docs/docs/integrations/providers/searchapi.mdx @@ -21,7 +21,7 @@ You can use it as part of a Self Ask chain: ```python from langchain_community.utilities import SearchApiAPIWrapper -from langchain_community.llms.openai import OpenAI +from langchain_openai import OpenAI from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType diff --git a/docs/docs/integrations/providers/shaleprotocol.md b/docs/docs/integrations/providers/shaleprotocol.md index bef048986d..2aced9cb63 100644 --- a/docs/docs/integrations/providers/shaleprotocol.md +++ b/docs/docs/integrations/providers/shaleprotocol.md @@ -19,7 +19,7 @@ As of June 2023, the API supports Vicuna-13B by default. We are going to support For example ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain diff --git a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb index ff1c640beb..1f46116c42 100644 --- a/docs/docs/integrations/providers/vectara/vectara_chat.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_chat.ipynb @@ -61,8 +61,8 @@ "import os\n", "\n", "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.vectorstores import Vectara" + "from langchain_community.vectorstores import Vectara\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/vectara/vectara_summary.ipynb b/docs/docs/integrations/providers/vectara/vectara_summary.ipynb index d519102d41..dd6d2c347a 100644 --- a/docs/docs/integrations/providers/vectara/vectara_summary.ipynb +++ b/docs/docs/integrations/providers/vectara/vectara_summary.ipynb @@ -74,10 +74,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_community.embeddings import FakeEmbeddings\n", "from langchain_community.vectorstores import Vectara\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" ] }, @@ -243,7 +243,7 @@ ], "source": [ "from langchain.retrievers.multi_query import MultiQueryRetriever\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "mqr = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)\n", diff --git a/docs/docs/integrations/providers/wandb_tracing.ipynb b/docs/docs/integrations/providers/wandb_tracing.ipynb index da4cd81d4f..5d8aa57d14 100644 --- a/docs/docs/integrations/providers/wandb_tracing.ipynb +++ b/docs/docs/integrations/providers/wandb_tracing.ipynb @@ -39,7 +39,7 @@ "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks import wandb_tracing_enabled\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/wandb_tracking.ipynb b/docs/docs/integrations/providers/wandb_tracking.ipynb index 12b056b8cf..312e1718c6 100644 --- a/docs/docs/integrations/providers/wandb_tracking.ipynb +++ b/docs/docs/integrations/providers/wandb_tracking.ipynb @@ -63,7 +63,7 @@ "from datetime import datetime\n", "\n", "from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/providers/whylabs_profiling.ipynb b/docs/docs/integrations/providers/whylabs_profiling.ipynb index c8ec8ac818..08c90857d8 100644 --- a/docs/docs/integrations/providers/whylabs_profiling.ipynb +++ b/docs/docs/integrations/providers/whylabs_profiling.ipynb @@ -100,7 +100,7 @@ } ], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "whylabs = WhyLabsCallbackHandler.from_params()\n", "llm = OpenAI(temperature=0, callbacks=[whylabs])\n", diff --git a/docs/docs/integrations/retrievers/activeloop.ipynb b/docs/docs/integrations/retrievers/activeloop.ipynb index cdf1b6cec5..405f010281 100644 --- a/docs/docs/integrations/retrievers/activeloop.ipynb +++ b/docs/docs/integrations/retrievers/activeloop.ipynb @@ -73,9 +73,8 @@ "import os\n", "\n", "from langchain.chains import RetrievalQA\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAIChat\n", "from langchain.vectorstores.deeplake import DeepLake\n", + "from langchain_openai import OpenAIChat, OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"Enter your OpenAI API token: \")\n", "# # activeloop token is needed if you are not signed in using CLI: `activeloop login -u -p `\n", @@ -277,9 +276,9 @@ "from langchain.chains.openai_functions import (\n", " create_structured_output_chain,\n", ")\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "from pydantic import BaseModel, Field" ] }, @@ -336,7 +335,7 @@ "source": [ "import random\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "from tqdm import tqdm\n", "\n", "\n", diff --git a/docs/docs/integrations/retrievers/arxiv.ipynb b/docs/docs/integrations/retrievers/arxiv.ipynb index ee6d406cb2..99f4dd00b5 100644 --- a/docs/docs/integrations/retrievers/arxiv.ipynb +++ b/docs/docs/integrations/retrievers/arxiv.ipynb @@ -201,7 +201,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/cohere-reranker.ipynb b/docs/docs/integrations/retrievers/cohere-reranker.ipynb index dc23c3e782..9b1fdbd6b9 100644 --- a/docs/docs/integrations/retrievers/cohere-reranker.ipynb +++ b/docs/docs/integrations/retrievers/cohere-reranker.ipynb @@ -327,8 +327,8 @@ "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "documents = TextLoader(\"../../modules/state_of_the_union.txt\").load()\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)\n", @@ -388,7 +388,7 @@ "source": [ "from langchain.retrievers import ContextualCompressionRetriever\n", "from langchain.retrievers.document_compressors import CohereRerank\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "compressor = CohereRerank()\n", diff --git a/docs/docs/integrations/retrievers/docarray_retriever.ipynb b/docs/docs/integrations/retrievers/docarray_retriever.ipynb index e02d1a7743..f220cf2708 100644 --- a/docs/docs/integrations/retrievers/docarray_retriever.ipynb +++ b/docs/docs/integrations/retrievers/docarray_retriever.ipynb @@ -569,7 +569,7 @@ "source": [ "from docarray import BaseDoc, DocList\n", "from docarray.typing import NdArray\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "# define schema for your movie documents\n", diff --git a/docs/docs/integrations/retrievers/fleet_context.ipynb b/docs/docs/integrations/retrievers/fleet_context.ipynb index e7622c9318..a96c0c2345 100644 --- a/docs/docs/integrations/retrievers/fleet_context.ipynb +++ b/docs/docs/integrations/retrievers/fleet_context.ipynb @@ -35,10 +35,10 @@ "import pandas as pd\n", "from langchain.retrievers import MultiVectorRetriever\n", "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "from langchain_core.stores import BaseStore\n", "from langchain_core.vectorstores import VectorStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def load_fleet_retriever(\n", @@ -191,9 +191,9 @@ { "data": { "text/plain": [ - "[Document(page_content='Vector store-backed retriever | 🦜️🔗 Langchain\\n# Vector store-backed retriever A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface. It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store. Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example.Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example. ``` from langchain_community.document_loaders import TextLoaderloader = TextLoader(\\'../../../state_of_the_union.txt\\') ``` ``` from langchain.text_splitter import CharacterTextSplitterfrom langchain_community.vectorstores import FAISSfrom langchain_community.embeddings import OpenAIEmbeddingsdocuments = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)embeddings = OpenAIEmbeddings()db = FAISS.from_documents(texts, embeddings) ``` ``` Exiting: Cleaning up .chroma directory ``` ``` retriever = db.as_retriever() ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Maximum marginal relevance retrieval[\\u200b](#maximum-marginal-relevance-retrieval) By default, the vector store retriever uses similarity search.If the underlying vector store supports maximum marginal relevance search, you can specify that as the search type. ``` retriever = db.as_retriever(search_type=\"mmr\") ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Similarity score threshold retrieval[\\u200b](#similarity-score-threshold-retrieval) You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold. ``` retriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Specifying top k[\\u200b](#specifying-top-k) You can also specify search kwargs like `k` to use when doing retrieval.``` retriever = db.as_retriever(search_kwargs={\"k\": 1}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ``` len(docs) ``` ``` 1 ```', metadata={'title': 'Vector store-backed retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore', 'id': 'c153ebd9-2611-4a43-9db6-daa1f5f214f6'}),\n", - " Document(page_content='MultiVector Retriever | 🦜️🔗 Langchain\\n# MultiVector Retriever It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: - Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever). - Summary: create a summary for each document, embed that along with (or instead of) the document. - Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually.Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. ``` from langchain.retrievers.multi_vector import MultiVectorRetriever ``` ``` from langchain_community.vectorstores import Chromafrom langchain_community.embeddings import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain.storage import InMemoryStorefrom langchain_community.document_loaders import TextLoader ``` ``` loaders = [ TextLoader(\\'../../paul_graham_essay.txt\\'), TextLoader(\\'../../state_of_the_union.txt\\'),]docs = []for l in loaders: docs.extend(l.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs) ``` ## Smaller chunks[\\u200b](#smaller-chunks) Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood.``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)import uuiddoc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) ``` ``` sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) ``` ``` retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # Vectorstore alone retrieves the small chunksretriever.vectorstore.similarity_search(\"justice breyer\")[0] ``` ``` Document(page_content=\\'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.Justice Breyer, thank you for your service. \\\\n\\\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\', metadata={\\'doc_id\\': \\'10e9cbc0-4ba5-4d79-a09b-c033d1ba7b01\\', \\'source\\': \\'../../state_of_the_union.txt\\'}) ``` ``` # Retriever returns larger chunkslen(retriever.get_relevant_documents(\"justice breyer\")[0].page_content) ``` ``` 9874 ``` ## Summary[\\u200b](#summary) Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserimport uuidfrom langchain_core.documents import Document ``` ``` chain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0) | StrOutputParser()) ``` ``` summaries = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` summary_docs = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)] ``` ``` retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs[0] ``` ``` Document(page_content=\"The document is a transcript of a speech given by the President of the United States.The President discusses several important issues and initiatives, including the nomination of a Supreme Court Justice, border security and immigration reform, protecting women\\'s rights, advancing LGBTQ+ equality, bipartisan legislation, addressing the opioid epidemic and mental health, supporting veterans, investigating the health effects of burn pits on military personnel, ending cancer, and the strength and resilience of the American people. \", metadata={\\'doc_id\\': \\'79fa2e9f-28d9-4372-8af3-2caf4f1de312\\'}) ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ``` ## Hypothetical Queries[\\u200b](#hypothetical-queries) An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.These questions can then be embedded ``` functions = [ { \"name\": \"hypothetical_questions\", \"description\": \"Generate hypothetical questions\", \"parameters\": { \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"items\": { \"type\": \"string\" }, }, }, \"required\": [\"questions\"] } } ] ``` ``` from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParserchain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template(\"Generate a list of 3 hypothetical questions that the below document could be used to answer:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(functions=functions, function_call={\"name\": \"hypothetical_questions\"}) | JsonKeyOutputFunctionsParser(key_name=\"questions\")) ``` ``` chain.invoke(docs[0]) ``` ``` [\"What was the author\\'s initial impression of philosophy as a field of study, and how did it change when they got to college?\", \\'Why did the author decide to switch their focus to Artificial Intelligence (AI)? \\', \"What led to the author\\'s disillusionment with the field of AI as it was practiced at the time?\"]``` ``` hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` question_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend([Document(page_content=s,metadata={id_key: doc_ids[i]}) for s in question_list]) ``` ``` retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs ``` ``` [Document(page_content=\"What is the President\\'s stance on immigration reform?\", metadata={\\'doc_id\\': \\'505d73e3-8350-46ec-a58e-3af032f04ab3\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'1c9618f0-7660-4b4f-a37c-509cbbbf6dba\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'}), Document(page_content=\\'What measures is the President proposing to protect the rights of LGBTQ+ Americans? \\', metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'})] ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ```', metadata={'title': 'MultiVector Retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector', 'id': 'beec5531-16a7-453c-80ab-c5628e0236ce'}),\n", - " Document(page_content='MultiQueryRetriever | 🦜️🔗 Langchain\\n# MultiQueryRetriever Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results. ``` # Build a sample vectorDBfrom langchain_community.vectorstores import Chromafrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_community.embeddings.openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) ``` #### Simple usage[\\u200b](#simple-usage) Specify the LLM to use for query generation, and the retriever will do the rest.``` from langchain_community.chat_models import ChatOpenAIfrom langchain.retrievers.multi_query import MultiQueryRetrieverquestion = \"What are the approaches to Task Decomposition? \"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) ``` ``` # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO) ``` ``` unique_docs = retriever_from_llm.get_relevant_documents(query=question)len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\\'1. How can Task Decomposition be approached? \\', \\'2. What are the different methods for Task Decomposition? \\', \\'3. What are the various approaches to decomposing tasks?\\'] 5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries.5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries. ``` from typing import Listfrom langchain.chains import LLMChainfrom pydantic import BaseModel, Fieldfrom langchain.prompts import PromptTemplatefrom langchain.output_parsers import PydanticOutputParser# Output parser will split the LLM result into a list of queriesclass LineList(BaseModel): # \"lines\" is the key (attribute name) of the parsed output lines: List[str] = Field(description=\"Lines of text\")class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split(\"\\\\n\") return LineList(lines=lines)output_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant.Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)# Other inputsquestion = \"What are the approaches to Task Decomposition?\" ``` ``` # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.get_relevant_documents( query=\"What does the course say about regression? \")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1.\")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1. What is the course\\'s perspective on regression? \", \\'2. Can you provide information on regression as discussed in the course? \\', \\'3. How does the course cover the topic of regression? \\', \"4. What are the course\\'s teachings on regression? \", \\'5. In relation to the course, what is mentioned about regression?\\'] 11 ```', metadata={'title': 'MultiQueryRetriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever', 'id': 'f7c20633-6a60-4ca3-96b1-13fee66e321d'}),\n", + "[Document(page_content='Vector store-backed retriever | 🦜️🔗 Langchain\\n# Vector store-backed retriever A vector store retriever is a retriever that uses a vector store to retrieve documents. It is a lightweight wrapper around the vector store class to make it conform to the retriever interface. It uses the search methods implemented by a vector store, like similarity search and MMR, to query the texts in the vector store. Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example.Once you construct a vector store, it\\'s very easy to construct a retriever. Let\\'s walk through an example. ``` from langchain_community.document_loaders import TextLoaderloader = TextLoader(\\'../../../state_of_the_union.txt\\') ``` ``` from langchain.text_splitter import CharacterTextSplitterfrom langchain_community.vectorstores import FAISSfrom langchain_openai import OpenAIEmbeddingsdocuments = loader.load()text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)texts = text_splitter.split_documents(documents)embeddings = OpenAIEmbeddings()db = FAISS.from_documents(texts, embeddings) ``` ``` Exiting: Cleaning up .chroma directory ``` ``` retriever = db.as_retriever() ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Maximum marginal relevance retrieval[\\u200b](#maximum-marginal-relevance-retrieval) By default, the vector store retriever uses similarity search.If the underlying vector store supports maximum marginal relevance search, you can specify that as the search type. ``` retriever = db.as_retriever(search_type=\"mmr\") ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Similarity score threshold retrieval[\\u200b](#similarity-score-threshold-retrieval) You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold. ``` retriever = db.as_retriever(search_type=\"similarity_score_threshold\", search_kwargs={\"score_threshold\": .5}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ## Specifying top k[\\u200b](#specifying-top-k) You can also specify search kwargs like `k` to use when doing retrieval.``` retriever = db.as_retriever(search_kwargs={\"k\": 1}) ``` ``` docs = retriever.get_relevant_documents(\"what did he say about ketanji brown jackson\") ``` ``` len(docs) ``` ``` 1 ```', metadata={'title': 'Vector store-backed retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/vectorstore', 'id': 'c153ebd9-2611-4a43-9db6-daa1f5f214f6'}),\n", + " Document(page_content='MultiVector Retriever | 🦜️🔗 Langchain\\n# MultiVector Retriever It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`. The methods to create multiple vectors per document include: - Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever). - Summary: create a summary for each document, embed that along with (or instead of) the document. - Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document. Note that this also enables another method of adding embeddings - manually.Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control. ``` from langchain.retrievers.multi_vector import MultiVectorRetriever ``` ``` from langchain_community.vectorstores import Chromafrom langchain_openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitterfrom langchain.storage import InMemoryStorefrom langchain_community.document_loaders import TextLoader ``` ``` loaders = [ TextLoader(\\'../../paul_graham_essay.txt\\'), TextLoader(\\'../../state_of_the_union.txt\\'),]docs = []for l in loaders: docs.extend(l.load())text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)docs = text_splitter.split_documents(docs) ``` ## Smaller chunks[\\u200b](#smaller-chunks) Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks.This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood.``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)import uuiddoc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` # The splitter to use to create smaller chunkschild_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400) ``` ``` sub_docs = []for i, doc in enumerate(docs): _id = doc_ids[i] _sub_docs = child_text_splitter.split_documents([doc]) for _doc in _sub_docs: _doc.metadata[id_key] = _id sub_docs.extend(_sub_docs) ``` ``` retriever.vectorstore.add_documents(sub_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # Vectorstore alone retrieves the small chunksretriever.vectorstore.similarity_search(\"justice breyer\")[0] ``` ``` Document(page_content=\\'Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.Justice Breyer, thank you for your service. \\\\n\\\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\', metadata={\\'doc_id\\': \\'10e9cbc0-4ba5-4d79-a09b-c033d1ba7b01\\', \\'source\\': \\'../../state_of_the_union.txt\\'}) ``` ``` # Retriever returns larger chunkslen(retriever.get_relevant_documents(\"justice breyer\")[0].page_content) ``` ``` 9874 ``` ## Summary[\\u200b](#summary) Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those.``` from langchain_openai import ChatOpenAIfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.output_parsers import StrOutputParserimport uuidfrom langchain_core.documents import Document ``` ``` chain = ( {\"doc\": lambda x: x.page_content} | ChatPromptTemplate.from_template(\"Summarize the following document:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0) | StrOutputParser()) ``` ``` summaries = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` summary_docs = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(summaries)] ``` ``` retriever.vectorstore.add_documents(summary_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` # # We can also add the original chunks to the vectorstore if we so want# for i, doc in enumerate(docs):# doc.metadata[id_key] = doc_ids[i]# retriever.vectorstore.add_documents(docs) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs[0] ``` ``` Document(page_content=\"The document is a transcript of a speech given by the President of the United States.The President discusses several important issues and initiatives, including the nomination of a Supreme Court Justice, border security and immigration reform, protecting women\\'s rights, advancing LGBTQ+ equality, bipartisan legislation, addressing the opioid epidemic and mental health, supporting veterans, investigating the health effects of burn pits on military personnel, ending cancer, and the strength and resilience of the American people. \", metadata={\\'doc_id\\': \\'79fa2e9f-28d9-4372-8af3-2caf4f1de312\\'}) ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ``` ## Hypothetical Queries[\\u200b](#hypothetical-queries) An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document.These questions can then be embedded ``` functions = [ { \"name\": \"hypothetical_questions\", \"description\": \"Generate hypothetical questions\", \"parameters\": { \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"items\": { \"type\": \"string\" }, }, }, \"required\": [\"questions\"] } } ] ``` ``` from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParserchain = ( {\"doc\": lambda x: x.page_content} # Only asking for 3 hypothetical questions, but this could be adjusted | ChatPromptTemplate.from_template(\"Generate a list of 3 hypothetical questions that the below document could be used to answer:\\\\n\\\\n{doc}\") | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(functions=functions, function_call={\"name\": \"hypothetical_questions\"}) | JsonKeyOutputFunctionsParser(key_name=\"questions\")) ``` ``` chain.invoke(docs[0]) ``` ``` [\"What was the author\\'s initial impression of philosophy as a field of study, and how did it change when they got to college?\", \\'Why did the author decide to switch their focus to Artificial Intelligence (AI)? \\', \"What led to the author\\'s disillusionment with the field of AI as it was practiced at the time?\"]``` ``` hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5}) ``` ``` # The vectorstore to use to index the child chunksvectorstore = Chroma( collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings())# The storage layer for the parent documentsstore = InMemoryStore()id_key = \"doc_id\"# The retriever (empty to start)retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key,)doc_ids = [str(uuid.uuid4()) for _ in docs] ``` ``` question_docs = []for i, question_list in enumerate(hypothetical_questions): question_docs.extend([Document(page_content=s,metadata={id_key: doc_ids[i]}) for s in question_list]) ``` ``` retriever.vectorstore.add_documents(question_docs)retriever.docstore.mset(list(zip(doc_ids, docs))) ``` ``` sub_docs = vectorstore.similarity_search(\"justice breyer\") ``` ``` sub_docs ``` ``` [Document(page_content=\"What is the President\\'s stance on immigration reform?\", metadata={\\'doc_id\\': \\'505d73e3-8350-46ec-a58e-3af032f04ab3\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'1c9618f0-7660-4b4f-a37c-509cbbbf6dba\\'}), Document(page_content=\"What is the President\\'s stance on immigration reform? \", metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'}), Document(page_content=\\'What measures is the President proposing to protect the rights of LGBTQ+ Americans? \\', metadata={\\'doc_id\\': \\'82c08209-b904-46a8-9532-edd2380950b7\\'})] ``` ``` retrieved_docs = retriever.get_relevant_documents(\"justice breyer\") ``` ``` len(retrieved_docs[0].page_content) ``` ``` 9194 ```', metadata={'title': 'MultiVector Retriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector', 'id': 'beec5531-16a7-453c-80ab-c5628e0236ce'}),\n", + " Document(page_content='MultiQueryRetriever | 🦜️🔗 Langchain\\n# MultiQueryRetriever Distance-based vector database retrieval embeds (represents) queries in high-dimensional space and finds similar embedded documents based on \"distance\". But, retrieval may produce different results with subtle changes in query wording or if the embeddings do not capture the semantics of the data well. Prompt engineering / tuning is sometimes done to manually address these problems, but can be tedious. The `MultiQueryRetriever` automates the process of prompt tuning by using an LLM to generate multiple queries from different perspectives for a given user input query. For each query, it retrieves a set of relevant documents and takes the unique union across all queries to get a larger set of potentially relevant documents. By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results.By generating multiple perspectives on the same question, the `MultiQueryRetriever` might be able to overcome some of the limitations of the distance-based retrieval and get a richer set of results. ``` # Build a sample vectorDBfrom langchain_community.vectorstores import Chromafrom langchain_community.document_loaders import WebBaseLoaderfrom langchain_openai import OpenAIEmbeddingsfrom langchain.text_splitter import RecursiveCharacterTextSplitter# Load blog postloader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")data = loader.load()# Splittext_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)splits = text_splitter.split_documents(data)# VectorDBembedding = OpenAIEmbeddings()vectordb = Chroma.from_documents(documents=splits, embedding=embedding) ``` #### Simple usage[\\u200b](#simple-usage) Specify the LLM to use for query generation, and the retriever will do the rest.``` from langchain_openai import ChatOpenAIfrom langchain.retrievers.multi_query import MultiQueryRetrieverquestion = \"What are the approaches to Task Decomposition? \"llm = ChatOpenAI(temperature=0)retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm) ``` ``` # Set logging for the queriesimport logginglogging.basicConfig()logging.getLogger(\"langchain.retrievers.multi_query\").setLevel(logging.INFO) ``` ``` unique_docs = retriever_from_llm.get_relevant_documents(query=question)len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\\'1. How can Task Decomposition be approached? \\', \\'2. What are the different methods for Task Decomposition? \\', \\'3. What are the various approaches to decomposing tasks?\\'] 5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries.5 ``` #### Supplying your own prompt[\\u200b](#supplying-your-own-prompt) You can also supply a prompt along with an output parser to split the results into a list of queries. ``` from typing import Listfrom langchain.chains import LLMChainfrom pydantic import BaseModel, Fieldfrom langchain.prompts import PromptTemplatefrom langchain.output_parsers import PydanticOutputParser# Output parser will split the LLM result into a list of queriesclass LineList(BaseModel): # \"lines\" is the key (attribute name) of the parsed output lines: List[str] = Field(description=\"Lines of text\")class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split(\"\\\\n\") return LineList(lines=lines)output_parser = LineListOutputParser()QUERY_PROMPT = PromptTemplate( input_variables=[\"question\"], template=\"\"\"You are an AI language model assistant.Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}\"\"\",)llm = ChatOpenAI(temperature=0)# Chainllm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)# Other inputsquestion = \"What are the approaches to Task Decomposition?\" ``` ``` # Runretriever = MultiQueryRetriever( retriever=vectordb.as_retriever(), llm_chain=llm_chain, parser_key=\"lines\") # \"lines\" is the key (attribute name) of the parsed output# Resultsunique_docs = retriever.get_relevant_documents( query=\"What does the course say about regression? \")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1.\")len(unique_docs) ``` ``` INFO:langchain.retrievers.multi_query:Generated queries: [\"1. What is the course\\'s perspective on regression? \", \\'2. Can you provide information on regression as discussed in the course? \\', \\'3. How does the course cover the topic of regression? \\', \"4. What are the course\\'s teachings on regression? \", \\'5. In relation to the course, what is mentioned about regression?\\'] 11 ```', metadata={'title': 'MultiQueryRetriever | 🦜️🔗 Langchain', 'type': None, 'url': 'https://python.langchain.com/docs/modules/data_connection/retrievers/MultiQueryRetriever', 'id': 'f7c20633-6a60-4ca3-96b1-13fee66e321d'}),\n", " Document(page_content='langchain.retrievers.multi_vector.MultiVectorRetriever — 🦜🔗 LangChain 0.0.322\\n# `langchain.retrievers.multi_vector`.MultiVectorRetriever[¶](#langchain-retrievers-multi-vector-multivectorretriever) *class *langchain.retrievers.multi_vector.MultiVectorRetriever[[source]](../_modules/langchain/retrievers/multi_vector.html#MultiVectorRetriever)[¶](#langchain.retrievers.multi_vector.MultiVectorRetriever) # Examples using MultiVectorRetriever[¶](#langchain-retrievers-multi-vector-multivectorretriever) - [MultiVector Retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector)', metadata={'title': 'langchain.retrievers.multi_vector.MultiVectorRetriever — 🦜🔗 LangChain 0.0.322', 'type': None, 'url': 'https://api.python.langchain.com/en/latest/retrievers/langchain.retrievers.multi_vector.MultiVectorRetriever.html#langchain-retrievers-multi-vector-multivectorretriever', 'id': '1820c44d-7783-4846-a11c-106b18da015d'})]" ] }, @@ -223,10 +223,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -275,7 +275,7 @@ "To create a FAISS vector store retriever that returns 10 documents per search query, you can use the following code:\n", "\n", "```python\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", "\n", "# Assuming you have already loaded and split your documents\n", diff --git a/docs/docs/integrations/retrievers/jaguar.ipynb b/docs/docs/integrations/retrievers/jaguar.ipynb index b7b1d8a00f..35b89bdb33 100644 --- a/docs/docs/integrations/retrievers/jaguar.ipynb +++ b/docs/docs/integrations/retrievers/jaguar.ipynb @@ -54,8 +54,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\"\"\" \n", "Load a text file into a set of documents \n", @@ -147,8 +147,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# Instantiate a Jaguar vector store object\n", "url = \"http://192.168.3.88:8080/fwww/\"\n", diff --git a/docs/docs/integrations/retrievers/kay.ipynb b/docs/docs/integrations/retrievers/kay.ipynb index b0f6237591..57dd0e2b9b 100644 --- a/docs/docs/integrations/retrievers/kay.ipynb +++ b/docs/docs/integrations/retrievers/kay.ipynb @@ -151,7 +151,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/knn.ipynb b/docs/docs/integrations/retrievers/knn.ipynb index 89c31026d0..d1582f66ff 100644 --- a/docs/docs/integrations/retrievers/knn.ipynb +++ b/docs/docs/integrations/retrievers/knn.ipynb @@ -22,7 +22,7 @@ "outputs": [], "source": [ "from langchain.retrievers import KNNRetriever\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/retrievers/merger_retriever.ipynb b/docs/docs/integrations/retrievers/merger_retriever.ipynb index 8ae9ccbf19..85f3741e93 100644 --- a/docs/docs/integrations/retrievers/merger_retriever.ipynb +++ b/docs/docs/integrations/retrievers/merger_retriever.ipynb @@ -30,8 +30,9 @@ " EmbeddingsClusteringFilter,\n", " EmbeddingsRedundantFilter,\n", ")\n", - "from langchain_community.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# Get 3 diff embeddings.\n", "all_mini = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/integrations/retrievers/outline.ipynb b/docs/docs/integrations/retrievers/outline.ipynb index e0360571ec..be0fd0dcba 100644 --- a/docs/docs/integrations/retrievers/outline.ipynb +++ b/docs/docs/integrations/retrievers/outline.ipynb @@ -89,9 +89,9 @@ { "data": { "text/plain": [ - "[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain_community.llms import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain_community.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n", - " Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain_community.chat_models import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGCHAIN_TRACING_V2=\"true\" export LANGCHAIN_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n", - " Document(page_content='This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic.\\n\\n```javascript\\nfrom langchain.agents import AgentType, initialize_agent, load_tools\\nfrom langchain_community.llms import OpenAI\\n```\\n\\nFirst, let\\'s load the language model we\\'re going to use to control the agent.\\n\\n```javascript\\nllm = OpenAI(temperature=0)\\n```\\n\\nNext, let\\'s load some tools to use. Note that the llm-math tool uses an LLM, so we need to pass that in.\\n\\n```javascript\\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\\n```\\n\\n## Using LCEL[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-lcel \"Direct link to Using LCEL\")\\n\\nWe will first show how to create the agent using LCEL\\n\\n```javascript\\nfrom langchain import hub\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\nfrom langchain.tools.render import render_text_description\\n```\\n\\n```javascript\\nprompt = hub.pull(\"hwchase17/react\")\\nprompt = prompt.partial(\\n tools=render_text_description(tools),\\n tool_names=\", \".join([t.name for t in tools]),\\n)\\n```\\n\\n```javascript\\nllm_with_stop = llm.bind(stop=[\"\\\\nObservation\"])\\n```\\n\\n```javascript\\nagent = (\\n {\\n \"input\": lambda x: x[\"input\"],\\n \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\\n }\\n | prompt\\n | llm_with_stop\\n | ReActSingleInputOutputParser()\\n)\\n```\\n\\n```javascript\\nfrom langchain.agents import AgentExecutor\\n```\\n\\n```javascript\\nagent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"model Vittoria Ceretti I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"25 years I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43Answer: 3.991298452658078 I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\",\\n \\'output\\': \"Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}\\n```\\n\\n## Using ZeroShotReactAgent[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-zeroshotreactagent \"Direct link to Using ZeroShotReactAgent\")\\n\\nWe will now show how to use the agent with an off-the-shelf agent implementation\\n\\n```javascript\\nagent_executor = initialize_agent(\\n tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\\n)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"\\n Observation: model Vittoria Ceretti\\n Thought: I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"\\n Observation: 25 years\\n Thought: I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43\\n Observation: Answer: 3.991298452658078\\n Thought: I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is L', metadata={'title': 'ReAct', 'source': 'https://d01.getoutline.com/doc/react-d6rxRS1MHk'})]" + "[Document(page_content='This walkthrough demonstrates how to use an agent optimized for conversation. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\\n\\nIf we compare it to the standard ReAct agent, the main difference is the prompt. We want it to be much more conversational.\\n\\nfrom langchain.agents import AgentType, Tool, initialize_agent\\n\\nfrom langchain_openai import OpenAI\\n\\nfrom langchain.memory import ConversationBufferMemory\\n\\nfrom langchain_community.utilities import SerpAPIWrapper\\n\\nsearch = SerpAPIWrapper() tools = \\\\[ Tool( name=\"Current Search\", func=search.run, description=\"useful for when you need to answer questions about current events or the current state of the world\", ), \\\\]\\n\\n\\\\\\nllm = OpenAI(temperature=0)\\n\\nUsing LCEL\\n\\nWe will first show how to create this agent using LCEL\\n\\nfrom langchain import hub\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\n\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\n\\nfrom langchain.tools.render import render_text_description\\n\\nprompt = hub.pull(\"hwchase17/react-chat\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nllm_with_stop = llm.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nagent = ( { \"input\": lambda x: x\\\\[\"input\"\\\\], \"agent_scratchpad\": lambda x: format_log_to_str(x\\\\[\"intermediate_steps\"\\\\]), \"chat_history\": lambda x: x\\\\[\"chat_history\"\\\\], } | prompt | llm_with_stop | ReActSingleInputOutputParser() )\\n\\nfrom langchain.agents import AgentExecutor\\n\\nmemory = ConversationBufferMemory(memory_key=\"chat_history\") agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)\\n\\nagent_executor.invoke({\"input\": \"hi, i am bob\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Hi Bob, nice to meet you! How can I help you today?\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Hi Bob, nice to meet you! How can I help you today?\\'\\n\\nagent_executor.invoke({\"input\": \"whats my name?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? No\\nFinal Answer: Your name is Bob.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'Your name is Bob.\\'\\n\\nagent_executor.invoke({\"input\": \"what are some movies showing 9/21/2023?\"})\\\\[\"output\"\\\\]\\n\\n```\\n> Entering new AgentExecutor chain...\\n\\nThought: Do I need to use a tool? Yes\\nAction: Current Search\\nAction Input: Movies showing 9/21/2023[\\'September 2023 Movies: The Creator • Dumb Money • Expend4bles • The Kill Room • The Inventor • The Equalizer 3 • PAW Patrol: The Mighty Movie, ...\\'] Do I need to use a tool? No\\nFinal Answer: According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\n\\n> Finished chain.\\n```\\n\\n\\\\\\n\\'According to current search, some movies showing on 9/21/2023 are The Creator, Dumb Money, Expend4bles, The Kill Room, The Inventor, The Equalizer 3, and PAW Patrol: The Mighty Movie.\\'\\n\\n\\\\\\nUse the off-the-shelf agent\\n\\nWe can also create this agent using the off-the-shelf agent class\\n\\nagent_executor = initialize_agent( tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, )\\n\\nUse a chat model\\n\\nWe can also use a chat model here. The main difference here is in the prompts used.\\n\\nfrom langchain import hub\\n\\nfrom langchain_openai import ChatOpenAI\\n\\nprompt = hub.pull(\"hwchase17/react-chat-json\") chat_model = ChatOpenAI(temperature=0, model=\"gpt-4\")\\n\\nprompt = prompt.partial( tools=render_text_description(tools), tool_names=\", \".join(\\\\[[t.name](http://t.name) for t in tools\\\\]), )\\n\\nchat_model_with_stop = chat_model.bind(stop=\\\\[\"\\\\nObservation\"\\\\])\\n\\nfrom langchain.agents.format_scratchpad import format_log_to_messages\\n\\nfrom langchain.agents.output_parsers import JSONAgentOutputParser\\n\\n# We need some extra steering, or the c', metadata={'title': 'Conversational', 'source': 'https://d01.getoutline.com/doc/conversational-B5dBkUgQ4b'}),\n", + " Document(page_content='Quickstart\\n\\nIn this quickstart we\\'ll show you how to:\\n\\nGet setup with LangChain, LangSmith and LangServe\\n\\nUse the most basic and common components of LangChain: prompt templates, models, and output parsers\\n\\nUse LangChain Expression Language, the protocol that LangChain is built on and which facilitates component chaining\\n\\nBuild a simple application with LangChain\\n\\nTrace your application with LangSmith\\n\\nServe your application with LangServe\\n\\nThat\\'s a fair amount to cover! Let\\'s dive in.\\n\\nSetup\\n\\nInstallation\\n\\nTo install LangChain run:\\n\\nPip\\n\\nConda\\n\\npip install langchain\\n\\nFor more details, see our Installation guide.\\n\\nEnvironment\\n\\nUsing LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we\\'ll use OpenAI\\'s model APIs.\\n\\nFirst we\\'ll need to install their Python package:\\n\\npip install openai\\n\\nAccessing the API requires an API key, which you can get by creating an account and heading here. Once we have a key we\\'ll want to set it as an environment variable by running:\\n\\nexport OPENAI_API_KEY=\"...\"\\n\\nIf you\\'d prefer not to set an environment variable you can pass the key in directly via the openai_api_key named parameter when initiating the OpenAI LLM class:\\n\\nfrom langchain_openai import ChatOpenAI\\n\\nllm = ChatOpenAI(openai_api_key=\"...\")\\n\\nLangSmith\\n\\nMany of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with LangSmith.\\n\\nNote that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\\n\\nexport LANGCHAIN_TRACING_V2=\"true\" export LANGCHAIN_API_KEY=...\\n\\nLangServe\\n\\nLangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we\\'ll show how you can deploy your app with LangServe.\\n\\nInstall with:\\n\\npip install \"langserve\\\\[all\\\\]\"\\n\\nBuilding with LangChain\\n\\nLangChain provides many modules that can be used to build language model applications. Modules can be used as standalones in simple applications and they can be composed for more complex use cases. Composition is powered by LangChain Expression Language (LCEL), which defines a unified Runnable interface that many modules implement, making it possible to seamlessly chain components.\\n\\nThe simplest and most common chain contains three things:\\n\\nLLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. In this guide we\\'ll cover those three components individually, and then go over how to combine them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler.\\n\\nLLM / Chat Model\\n\\nThere are two types of language models:\\n\\nLLM: underlying model takes a string as input and returns a string\\n\\nChatModel: underlying model takes a list of messages as input and returns a message\\n\\nStrings are simple, but what exactly are messages? The base message interface is defined by BaseMessage, which has two required attributes:\\n\\ncontent: The content of the message. Usually a string. role: The entity from which the BaseMessage is coming. LangChain provides several ob', metadata={'title': 'Quick Start', 'source': 'https://d01.getoutline.com/doc/quick-start-jGuGGGOTuL'}),\n", + " Document(page_content='This walkthrough showcases using an agent to implement the [ReAct](https://react-lm.github.io/) logic.\\n\\n```javascript\\nfrom langchain.agents import AgentType, initialize_agent, load_tools\\nfrom langchain_openai import OpenAI\\n```\\n\\nFirst, let\\'s load the language model we\\'re going to use to control the agent.\\n\\n```javascript\\nllm = OpenAI(temperature=0)\\n```\\n\\nNext, let\\'s load some tools to use. Note that the llm-math tool uses an LLM, so we need to pass that in.\\n\\n```javascript\\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\\n```\\n\\n## Using LCEL[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-lcel \"Direct link to Using LCEL\")\\n\\nWe will first show how to create the agent using LCEL\\n\\n```javascript\\nfrom langchain import hub\\nfrom langchain.agents.format_scratchpad import format_log_to_str\\nfrom langchain.agents.output_parsers import ReActSingleInputOutputParser\\nfrom langchain.tools.render import render_text_description\\n```\\n\\n```javascript\\nprompt = hub.pull(\"hwchase17/react\")\\nprompt = prompt.partial(\\n tools=render_text_description(tools),\\n tool_names=\", \".join([t.name for t in tools]),\\n)\\n```\\n\\n```javascript\\nllm_with_stop = llm.bind(stop=[\"\\\\nObservation\"])\\n```\\n\\n```javascript\\nagent = (\\n {\\n \"input\": lambda x: x[\"input\"],\\n \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\\n }\\n | prompt\\n | llm_with_stop\\n | ReActSingleInputOutputParser()\\n)\\n```\\n\\n```javascript\\nfrom langchain.agents import AgentExecutor\\n```\\n\\n```javascript\\nagent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"model Vittoria Ceretti I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"25 years I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43Answer: 3.991298452658078 I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\",\\n \\'output\\': \"Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\"}\\n```\\n\\n## Using ZeroShotReactAgent[\\u200b](https://python.langchain.com/docs/modules/agents/agent_types/react#using-zeroshotreactagent \"Direct link to Using ZeroShotReactAgent\")\\n\\nWe will now show how to use the agent with an off-the-shelf agent implementation\\n\\n```javascript\\nagent_executor = initialize_agent(\\n tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\\n)\\n```\\n\\n```javascript\\nagent_executor.invoke(\\n {\\n \"input\": \"Who is Leo DiCaprio\\'s girlfriend? What is her current age raised to the 0.43 power?\"\\n }\\n)\\n```\\n\\n```javascript\\n \\n \\n > Entering new AgentExecutor chain...\\n I need to find out who Leo DiCaprio\\'s girlfriend is and then calculate her age raised to the 0.43 power.\\n Action: Search\\n Action Input: \"Leo DiCaprio girlfriend\"\\n Observation: model Vittoria Ceretti\\n Thought: I need to find out Vittoria Ceretti\\'s age\\n Action: Search\\n Action Input: \"Vittoria Ceretti age\"\\n Observation: 25 years\\n Thought: I need to calculate 25 raised to the 0.43 power\\n Action: Calculator\\n Action Input: 25^0.43\\n Observation: Answer: 3.991298452658078\\n Thought: I now know the final answer\\n Final Answer: Leo DiCaprio\\'s girlfriend is Vittoria Ceretti and her current age raised to the 0.43 power is 3.991298452658078.\\n \\n > Finished chain.\\n\\n\\n\\n\\n\\n {\\'input\\': \"Who is L', metadata={'title': 'ReAct', 'source': 'https://d01.getoutline.com/doc/react-d6rxRS1MHk'})]" ] }, "execution_count": 4, @@ -129,7 +129,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb index 32937b7962..39ba19b841 100644 --- a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb +++ b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb @@ -180,7 +180,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/retrievers/ragatouille.ipynb b/docs/docs/integrations/retrievers/ragatouille.ipynb index 9b4240b63a..1747a4479f 100644 --- a/docs/docs/integrations/retrievers/ragatouille.ipynb +++ b/docs/docs/integrations/retrievers/ragatouille.ipynb @@ -413,8 +413,8 @@ "source": [ "from langchain.chains import create_retrieval_chain\n", "from langchain.chains.combine_documents import create_stuff_documents_chain\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"\"\"Answer the following question based only on the provided context:\n", diff --git a/docs/docs/integrations/retrievers/re_phrase.ipynb b/docs/docs/integrations/retrievers/re_phrase.ipynb index 7c43568bb6..630ac943b8 100644 --- a/docs/docs/integrations/retrievers/re_phrase.ipynb +++ b/docs/docs/integrations/retrievers/re_phrase.ipynb @@ -29,10 +29,9 @@ "\n", "from langchain.retrievers import RePhraseQueryRetriever\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/retrievers/sec_filings.ipynb b/docs/docs/integrations/retrievers/sec_filings.ipynb index 43958a3286..e46fffd921 100644 --- a/docs/docs/integrations/retrievers/sec_filings.ipynb +++ b/docs/docs/integrations/retrievers/sec_filings.ipynb @@ -79,7 +79,7 @@ "source": [ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.retrievers import KayAiRetriever\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", diff --git a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb index e222082a59..eb49b42dfa 100644 --- a/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/activeloop_deeplake_self_query.ipynb @@ -84,8 +84,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import DeepLake\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -194,7 +194,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb index 59728052e9..3d1e2f9891 100644 --- a/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/chroma_self_query.ipynb @@ -88,8 +88,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -165,7 +165,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb index 9cb67704f4..f86e21148c 100644 --- a/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/elasticsearch_self_query.ipynb @@ -61,8 +61,8 @@ "import os\n", "\n", "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import ElasticsearchStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -137,7 +137,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb index 31fe42b7ce..94cc863f59 100644 --- a/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/milvus_self_query.ipynb @@ -68,8 +68,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Milvus\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -130,7 +130,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb b/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb index 3119494393..284ce72a47 100644 --- a/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb +++ b/docs/docs/integrations/retrievers/self_query/mongodb_atlas.ipynb @@ -58,8 +58,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n", + "from langchain_openai import OpenAIEmbeddings\n", "from pymongo import MongoClient\n", "\n", "CONNECTION_STRING = \"Use your MongoDB Atlas connection string\"\n", @@ -162,7 +162,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb index 43ee52cb2c..ace025c285 100644 --- a/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/myscale_self_query.ipynb @@ -79,8 +79,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import MyScale\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -162,7 +162,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb index 9a8f0416cd..f470f70fd4 100644 --- a/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/opensearch_self_query.ipynb @@ -60,8 +60,8 @@ "import os\n", "\n", "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import OpenSearchVectorSearch\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "\n", @@ -136,7 +136,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb index 725df33737..52e02bc7c4 100644 --- a/docs/docs/integrations/retrievers/self_query/pinecone.ipynb +++ b/docs/docs/integrations/retrievers/self_query/pinecone.ipynb @@ -78,8 +78,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Pinecone\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "# create new index\n", @@ -147,7 +147,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb index 5e70a1db75..146364950f 100644 --- a/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/qdrant_self_query.ipynb @@ -71,8 +71,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Qdrant\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -146,7 +146,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb index 26299da969..031059d3ab 100644 --- a/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/redis_self_query.ipynb @@ -68,8 +68,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Redis\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -195,7 +195,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb index 82405ae695..d345eb7f73 100644 --- a/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/supabase_self_query.ipynb @@ -218,8 +218,8 @@ "import os\n", "\n", "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import SupabaseVectorStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", @@ -307,7 +307,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb index 64d939b6a0..7a095eda27 100644 --- a/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/timescalevector_self_query.ipynb @@ -144,8 +144,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.timescalevector import TimescaleVector\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -247,7 +247,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "# Give LLM info about the metadata fields\n", "metadata_field_info = [\n", diff --git a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb index ef0c1f0b30..d8171be1ca 100644 --- a/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/vectara_self_query.ipynb @@ -93,8 +93,8 @@ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.embeddings import FakeEmbeddings\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.vectorstores import Vectara" + "from langchain_community.vectorstores import Vectara\n", + "from langchain_openai import OpenAI" ] }, { @@ -167,7 +167,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb index d0fcb4cdf1..cc38b2cc46 100644 --- a/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb +++ b/docs/docs/integrations/retrievers/self_query/weaviate_self_query.ipynb @@ -46,8 +46,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Weaviate\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] @@ -117,7 +117,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/integrations/retrievers/singlestoredb.ipynb b/docs/docs/integrations/retrievers/singlestoredb.ipynb index a2949b8a4f..a8d7bb692f 100644 --- a/docs/docs/integrations/retrievers/singlestoredb.ipynb +++ b/docs/docs/integrations/retrievers/singlestoredb.ipynb @@ -52,8 +52,8 @@ "\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import SingleStoreDB\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/retrievers/svm.ipynb b/docs/docs/integrations/retrievers/svm.ipynb index 9704aba73e..cac73ed81c 100644 --- a/docs/docs/integrations/retrievers/svm.ipynb +++ b/docs/docs/integrations/retrievers/svm.ipynb @@ -81,7 +81,7 @@ "outputs": [], "source": [ "from langchain.retrievers import SVMRetriever\n", - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/retrievers/tavily.ipynb b/docs/docs/integrations/retrievers/tavily.ipynb index 99d4699d28..9f7896b818 100644 --- a/docs/docs/integrations/retrievers/tavily.ipynb +++ b/docs/docs/integrations/retrievers/tavily.ipynb @@ -99,10 +99,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"\"\"Answer the question based only on the context provided.\n", diff --git a/docs/docs/integrations/retrievers/wikipedia.ipynb b/docs/docs/integrations/retrievers/wikipedia.ipynb index 8efd5809d6..0a4cad6db9 100644 --- a/docs/docs/integrations/retrievers/wikipedia.ipynb +++ b/docs/docs/integrations/retrievers/wikipedia.ipynb @@ -200,7 +200,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/docs/docs/integrations/retrievers/you-retriever.ipynb b/docs/docs/integrations/retrievers/you-retriever.ipynb index 36d3137826..447111de5a 100644 --- a/docs/docs/integrations/retrievers/you-retriever.ipynb +++ b/docs/docs/integrations/retrievers/you-retriever.ipynb @@ -20,7 +20,7 @@ "source": [ "from langchain.chains import RetrievalQA\n", "from langchain.retrievers.you_retriever import YouRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "yr = YouRetriever()\n", "qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type=\"map_reduce\", retriever=yr)" diff --git a/docs/docs/integrations/text_embedding/azureopenai.ipynb b/docs/docs/integrations/text_embedding/azureopenai.ipynb index 162eee99dd..4f9cb5e824 100644 --- a/docs/docs/integrations/text_embedding/azureopenai.ipynb +++ b/docs/docs/integrations/text_embedding/azureopenai.ipynb @@ -40,7 +40,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import AzureOpenAIEmbeddings\n", + "from langchain_openai import AzureOpenAIEmbeddings\n", "\n", "embeddings = AzureOpenAIEmbeddings(\n", " azure_deployment=\"\",\n", @@ -134,7 +134,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings(deployment=\"your-embeddings-deployment-name\")" ] diff --git a/docs/docs/integrations/text_embedding/openai.ipynb b/docs/docs/integrations/text_embedding/openai.ipynb index 6410a61bb7..d44de992a6 100644 --- a/docs/docs/integrations/text_embedding/openai.ipynb +++ b/docs/docs/integrations/text_embedding/openai.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings" + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -125,7 +125,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings" + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/toolkits/ainetwork.ipynb b/docs/docs/integrations/toolkits/ainetwork.ipynb index 30ea069e2d..bc94c11065 100644 --- a/docs/docs/integrations/toolkits/ainetwork.ipynb +++ b/docs/docs/integrations/toolkits/ainetwork.ipynb @@ -131,7 +131,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb index 737f24d53d..a916301f33 100644 --- a/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb +++ b/docs/docs/integrations/toolkits/airbyte_structured_qa.ipynb @@ -29,8 +29,8 @@ "\n", "import pandas as pd\n", "from langchain.agents import AgentType, create_pandas_dataframe_agent\n", - "from langchain_community.chat_models.openai import ChatOpenAI\n", "from langchain_community.document_loaders.airbyte import AirbyteStripeLoader\n", + "from langchain_openai import ChatOpenAI\n", "\n", "stream_name = \"customers\"\n", "config = {\n", diff --git a/docs/docs/integrations/toolkits/amadeus.ipynb b/docs/docs/integrations/toolkits/amadeus.ipynb index cba9f016d1..e73fa6bf94 100644 --- a/docs/docs/integrations/toolkits/amadeus.ipynb +++ b/docs/docs/integrations/toolkits/amadeus.ipynb @@ -87,7 +87,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb b/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb index d0ac001ed1..2414bb5372 100644 --- a/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb +++ b/docs/docs/integrations/toolkits/azure_cognitive_services.ipynb @@ -108,7 +108,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/clickup.ipynb b/docs/docs/integrations/toolkits/clickup.ipynb index 700c5ba42f..fa55dfa63e 100644 --- a/docs/docs/integrations/toolkits/clickup.ipynb +++ b/docs/docs/integrations/toolkits/clickup.ipynb @@ -23,8 +23,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities.clickup import ClickupAPIWrapper" + "from langchain_community.utilities.clickup import ClickupAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/csv.ipynb b/docs/docs/integrations/toolkits/csv.ipynb index 7e766e2df6..0046ab35ac 100644 --- a/docs/docs/integrations/toolkits/csv.ipynb +++ b/docs/docs/integrations/toolkits/csv.ipynb @@ -21,9 +21,8 @@ "outputs": [], "source": [ "from langchain.agents.agent_types import AgentType\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_experimental.agents.agent_toolkits import create_csv_agent" + "from langchain_experimental.agents.agent_toolkits import create_csv_agent\n", + "from langchain_openai import ChatOpenAI, OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb index 51966e24bc..ad8634a07d 100644 --- a/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb +++ b/docs/docs/integrations/toolkits/document_comparison_toolkit.ipynb @@ -22,10 +22,9 @@ "from langchain.agents import Tool\n", "from langchain.chains import RetrievalQA\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import PyPDFLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/integrations/toolkits/github.ipynb b/docs/docs/integrations/toolkits/github.ipynb index fc0af1a813..6890cee43c 100644 --- a/docs/docs/integrations/toolkits/github.ipynb +++ b/docs/docs/integrations/toolkits/github.ipynb @@ -108,8 +108,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.utilities.github import GitHubAPIWrapper" + "from langchain_community.utilities.github import GitHubAPIWrapper\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -703,7 +703,7 @@ "source": [ "from langchain.agents import Tool\n", "from langchain.tools import DuckDuckGoSearchRun\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "tools = []\n", "unwanted_tools = [\"Get Issue\", \"Delete File\", \"Create File\", \"Create Pull Request\"]\n", diff --git a/docs/docs/integrations/toolkits/gitlab.ipynb b/docs/docs/integrations/toolkits/gitlab.ipynb index 8d8d9afde3..3b1a3c5f85 100644 --- a/docs/docs/integrations/toolkits/gitlab.ipynb +++ b/docs/docs/integrations/toolkits/gitlab.ipynb @@ -102,8 +102,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities.gitlab import GitLabAPIWrapper" + "from langchain_community.utilities.gitlab import GitLabAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/gmail.ipynb b/docs/docs/integrations/toolkits/gmail.ipynb index 3880711031..870a5d6260 100644 --- a/docs/docs/integrations/toolkits/gmail.ipynb +++ b/docs/docs/integrations/toolkits/gmail.ipynb @@ -169,7 +169,7 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/jira.ipynb b/docs/docs/integrations/toolkits/jira.ipynb index dd2ff47aab..080a8e5bca 100644 --- a/docs/docs/integrations/toolkits/jira.ipynb +++ b/docs/docs/integrations/toolkits/jira.ipynb @@ -51,8 +51,8 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities.jira import JiraAPIWrapper" + "from langchain_community.utilities.jira import JiraAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/json.ipynb b/docs/docs/integrations/toolkits/json.ipynb index 92b55925f2..972149326e 100644 --- a/docs/docs/integrations/toolkits/json.ipynb +++ b/docs/docs/integrations/toolkits/json.ipynb @@ -35,8 +35,8 @@ "import yaml\n", "from langchain.agents import create_json_agent\n", "from langchain_community.agent_toolkits import JsonToolkit\n", - "from langchain_community.llms.openai import OpenAI\n", - "from langchain_community.tools.json.tool import JsonSpec" + "from langchain_community.tools.json.tool import JsonSpec\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/multion.ipynb b/docs/docs/integrations/toolkits/multion.ipynb index 7e45205642..28d17f9e45 100644 --- a/docs/docs/integrations/toolkits/multion.ipynb +++ b/docs/docs/integrations/toolkits/multion.ipynb @@ -80,7 +80,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "from langchain_community.agent_toolkits import MultionToolkit\n", diff --git a/docs/docs/integrations/toolkits/nasa.ipynb b/docs/docs/integrations/toolkits/nasa.ipynb index 66786a6b30..0e3b73c4cc 100644 --- a/docs/docs/integrations/toolkits/nasa.ipynb +++ b/docs/docs/integrations/toolkits/nasa.ipynb @@ -31,8 +31,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities.nasa import NasaAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0, openai_api_key=\"\")\n", "nasa = NasaAPIWrapper()\n", diff --git a/docs/docs/integrations/toolkits/office365.ipynb b/docs/docs/integrations/toolkits/office365.ipynb index afebf423ea..ab5ab975cd 100644 --- a/docs/docs/integrations/toolkits/office365.ipynb +++ b/docs/docs/integrations/toolkits/office365.ipynb @@ -100,7 +100,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/openapi.ipynb b/docs/docs/integrations/toolkits/openapi.ipynb index 129fd32136..75875ac456 100644 --- a/docs/docs/integrations/toolkits/openapi.ipynb +++ b/docs/docs/integrations/toolkits/openapi.ipynb @@ -252,16 +252,16 @@ "name": "stderr", "output_type": "stream", "text": [ - "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:169: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`\n", + "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:169: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_openai import ChatOpenAI`\n", " warnings.warn(\n", - "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:608: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`\n", + "/Users/jeremywelborn/src/langchain/langchain/llms/openai.py:608: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_openai import ChatOpenAI`\n", " warnings.warn(\n" ] } ], "source": [ "from langchain_community.agent_toolkits.openapi import planner\n", - "from langchain_community.llms.openai import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(model_name=\"gpt-4\", temperature=0.0)" ] @@ -584,8 +584,8 @@ "source": [ "from langchain.agents import create_openapi_agent\n", "from langchain_community.agent_toolkits import OpenAPIToolkit\n", - "from langchain_community.llms.openai import OpenAI\n", - "from langchain_community.tools.json.tool import JsonSpec" + "from langchain_community.tools.json.tool import JsonSpec\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/openapi_nla.ipynb b/docs/docs/integrations/toolkits/openapi_nla.ipynb index 97120fe0b6..16c07c27aa 100644 --- a/docs/docs/integrations/toolkits/openapi_nla.ipynb +++ b/docs/docs/integrations/toolkits/openapi_nla.ipynb @@ -26,7 +26,7 @@ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.requests import Requests\n", "from langchain_community.agent_toolkits import NLAToolkit\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/pandas.ipynb b/docs/docs/integrations/toolkits/pandas.ipynb index 312e88ec6c..5d4cf3e6fc 100644 --- a/docs/docs/integrations/toolkits/pandas.ipynb +++ b/docs/docs/integrations/toolkits/pandas.ipynb @@ -20,8 +20,8 @@ "outputs": [], "source": [ "from langchain.agents.agent_types import AgentType\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent" + "from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -32,7 +32,7 @@ "outputs": [], "source": [ "import pandas as pd\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "df = pd.read_csv(\"titanic.csv\")" ] diff --git a/docs/docs/integrations/toolkits/powerbi.ipynb b/docs/docs/integrations/toolkits/powerbi.ipynb index fc90997e41..13a28bc105 100644 --- a/docs/docs/integrations/toolkits/powerbi.ipynb +++ b/docs/docs/integrations/toolkits/powerbi.ipynb @@ -39,8 +39,8 @@ "source": [ "from azure.identity import DefaultAzureCredential\n", "from langchain_community.agent_toolkits import PowerBIToolkit, create_pbi_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.utilities.powerbi import PowerBIDataset" + "from langchain_community.utilities.powerbi import PowerBIDataset\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/python.ipynb b/docs/docs/integrations/toolkits/python.ipynb index e3328b5ae8..5949c1a6db 100644 --- a/docs/docs/integrations/toolkits/python.ipynb +++ b/docs/docs/integrations/toolkits/python.ipynb @@ -70,7 +70,7 @@ "outputs": [], "source": [ "from langchain.agents import create_openai_functions_agent\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/slack.ipynb b/docs/docs/integrations/toolkits/slack.ipynb index a211fce6a3..22f2ea1099 100644 --- a/docs/docs/integrations/toolkits/slack.ipynb +++ b/docs/docs/integrations/toolkits/slack.ipynb @@ -75,7 +75,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/spark.ipynb b/docs/docs/integrations/toolkits/spark.ipynb index d5fe6b1cf8..f36a6bc1ba 100644 --- a/docs/docs/integrations/toolkits/spark.ipynb +++ b/docs/docs/integrations/toolkits/spark.ipynb @@ -79,8 +79,8 @@ } ], "source": [ - "from langchain_community.llms import OpenAI\n", "from langchain_experimental.agents.agent_toolkits import create_spark_dataframe_agent\n", + "from langchain_openai import OpenAI\n", "from pyspark.sql import SparkSession\n", "\n", "spark = SparkSession.builder.getOrCreate()\n", @@ -335,7 +335,7 @@ "import os\n", "\n", "from langchain.agents import create_spark_dataframe_agent\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"...input your openai api key here...\"\n", "\n", diff --git a/docs/docs/integrations/toolkits/spark_sql.ipynb b/docs/docs/integrations/toolkits/spark_sql.ipynb index f6dfa6daa6..d4d57aab3b 100644 --- a/docs/docs/integrations/toolkits/spark_sql.ipynb +++ b/docs/docs/integrations/toolkits/spark_sql.ipynb @@ -26,8 +26,8 @@ "source": [ "from langchain.agents import create_spark_sql_agent\n", "from langchain_community.agent_toolkits import SparkSQLToolkit\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.utilities.spark_sql import SparkSQL" + "from langchain_community.utilities.spark_sql import SparkSQL\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/sql_database.ipynb b/docs/docs/integrations/toolkits/sql_database.ipynb index 8df121c90f..e5bc16cd95 100644 --- a/docs/docs/integrations/toolkits/sql_database.ipynb +++ b/docs/docs/integrations/toolkits/sql_database.ipynb @@ -38,7 +38,7 @@ "from langchain.agents.agent_types import AgentType\n", "from langchain.sql_database import SQLDatabase\n", "from langchain_community.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain_community.llms.openai import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/steam.ipynb b/docs/docs/integrations/toolkits/steam.ipynb index b85c8b1031..fedab952b8 100644 --- a/docs/docs/integrations/toolkits/steam.ipynb +++ b/docs/docs/integrations/toolkits/steam.ipynb @@ -76,8 +76,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities.steam import SteamWebAPIWrapper" + "from langchain_community.utilities.steam import SteamWebAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/toolkits/xorbits.ipynb b/docs/docs/integrations/toolkits/xorbits.ipynb index 44fc61496d..50e88e16a1 100644 --- a/docs/docs/integrations/toolkits/xorbits.ipynb +++ b/docs/docs/integrations/toolkits/xorbits.ipynb @@ -35,8 +35,8 @@ "outputs": [], "source": [ "import xorbits.pandas as pd\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_experimental.agents.agent_toolkits import create_xorbits_agent" + "from langchain_experimental.agents.agent_toolkits import create_xorbits_agent\n", + "from langchain_openai import OpenAI" ] }, { @@ -381,7 +381,7 @@ "source": [ "import xorbits.numpy as np\n", "from langchain.agents import create_xorbits_agent\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "arr = np.array([1, 2, 3, 4, 5, 6])\n", "agent = create_xorbits_agent(OpenAI(temperature=0), arr, verbose=True)" diff --git a/docs/docs/integrations/tools/arxiv.ipynb b/docs/docs/integrations/tools/arxiv.ipynb index 5b57135ce6..402858c59e 100644 --- a/docs/docs/integrations/tools/arxiv.ipynb +++ b/docs/docs/integrations/tools/arxiv.ipynb @@ -37,7 +37,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = load_tools(\n", diff --git a/docs/docs/integrations/tools/awslambda.ipynb b/docs/docs/integrations/tools/awslambda.ipynb index d88083eec3..77e54b3e83 100644 --- a/docs/docs/integrations/tools/awslambda.ipynb +++ b/docs/docs/integrations/tools/awslambda.ipynb @@ -62,7 +62,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/bash.ipynb b/docs/docs/integrations/tools/bash.ipynb index 5b3d311040..e031012a53 100644 --- a/docs/docs/integrations/tools/bash.ipynb +++ b/docs/docs/integrations/tools/bash.ipynb @@ -145,7 +145,7 @@ ], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/bearly.ipynb b/docs/docs/integrations/tools/bearly.ipynb index 1580ff6c3f..1bc163eb7d 100644 --- a/docs/docs/integrations/tools/bearly.ipynb +++ b/docs/docs/integrations/tools/bearly.ipynb @@ -29,7 +29,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.tools import BearlyInterpreterTool\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/tools/chatgpt_plugins.ipynb b/docs/docs/integrations/tools/chatgpt_plugins.ipynb index 4d4b8ee0de..ea7e4ab9de 100644 --- a/docs/docs/integrations/tools/chatgpt_plugins.ipynb +++ b/docs/docs/integrations/tools/chatgpt_plugins.ipynb @@ -23,7 +23,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.tools import AIPluginTool\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/tools/dalle_image_generator.ipynb b/docs/docs/integrations/tools/dalle_image_generator.ipynb index 939f6fb461..6237272367 100644 --- a/docs/docs/integrations/tools/dalle_image_generator.ipynb +++ b/docs/docs/integrations/tools/dalle_image_generator.ipynb @@ -32,7 +32,7 @@ "source": [ "import os\n", "\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" ] @@ -53,8 +53,8 @@ "source": [ "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0.9)\n", "prompt = PromptTemplate(\n", diff --git a/docs/docs/integrations/tools/e2b_data_analysis.ipynb b/docs/docs/integrations/tools/e2b_data_analysis.ipynb index a37d93d98f..408b66b951 100644 --- a/docs/docs/integrations/tools/e2b_data_analysis.ipynb +++ b/docs/docs/integrations/tools/e2b_data_analysis.ipynb @@ -59,7 +59,7 @@ "\n", "from langchain.agents import AgentType, initialize_agent\n", "from langchain.tools import E2BDataAnalysisTool\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "os.environ[\"E2B_API_KEY\"] = \"\"\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" diff --git a/docs/docs/integrations/tools/eleven_labs_tts.ipynb b/docs/docs/integrations/tools/eleven_labs_tts.ipynb index adc1af099b..5fbd39c4cb 100644 --- a/docs/docs/integrations/tools/eleven_labs_tts.ipynb +++ b/docs/docs/integrations/tools/eleven_labs_tts.ipynb @@ -127,7 +127,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/tools/google_drive.ipynb b/docs/docs/integrations/tools/google_drive.ipynb index 7f87b4b8a9..c77faab78d 100644 --- a/docs/docs/integrations/tools/google_drive.ipynb +++ b/docs/docs/integrations/tools/google_drive.ipynb @@ -172,7 +172,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", diff --git a/docs/docs/integrations/tools/google_finance.ipynb b/docs/docs/integrations/tools/google_finance.ipynb index 7e3be7f944..b178a80e5a 100644 --- a/docs/docs/integrations/tools/google_finance.ipynb +++ b/docs/docs/integrations/tools/google_finance.ipynb @@ -75,7 +75,7 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"SERP_API_KEY\"] = \"\"\n", diff --git a/docs/docs/integrations/tools/google_jobs.ipynb b/docs/docs/integrations/tools/google_jobs.ipynb index a64289fcb5..0a97096a57 100644 --- a/docs/docs/integrations/tools/google_jobs.ipynb +++ b/docs/docs/integrations/tools/google_jobs.ipynb @@ -194,7 +194,7 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "OpenAI.api_key = os.environ[\"OPENAI_API_KEY\"]\n", "llm = OpenAI()\n", diff --git a/docs/docs/integrations/tools/google_serper.ipynb b/docs/docs/integrations/tools/google_serper.ipynb index f476019505..d93ce03449 100644 --- a/docs/docs/integrations/tools/google_serper.ipynb +++ b/docs/docs/integrations/tools/google_serper.ipynb @@ -159,8 +159,8 @@ ], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain_community.llms.openai import OpenAI\n", "from langchain_community.utilities import GoogleSerperAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "search = GoogleSerperAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/gradio_tools.ipynb b/docs/docs/integrations/tools/gradio_tools.ipynb index 26410ca106..0bee3896f2 100644 --- a/docs/docs/integrations/tools/gradio_tools.ipynb +++ b/docs/docs/integrations/tools/gradio_tools.ipynb @@ -185,7 +185,7 @@ ")\n", "from langchain.agents import initialize_agent\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", diff --git a/docs/docs/integrations/tools/graphql.ipynb b/docs/docs/integrations/tools/graphql.ipynb index 09cba1a022..a33ad9b232 100644 --- a/docs/docs/integrations/tools/graphql.ipynb +++ b/docs/docs/integrations/tools/graphql.ipynb @@ -44,7 +44,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/human_tools.ipynb b/docs/docs/integrations/tools/human_tools.ipynb index ae69520dc3..1e89a0b91b 100644 --- a/docs/docs/integrations/tools/human_tools.ipynb +++ b/docs/docs/integrations/tools/human_tools.ipynb @@ -19,8 +19,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import ChatOpenAI, OpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "math_llm = OpenAI(temperature=0.0)\n", diff --git a/docs/docs/integrations/tools/lemonai.ipynb b/docs/docs/integrations/tools/lemonai.ipynb index fffdda472a..d169c62a97 100644 --- a/docs/docs/integrations/tools/lemonai.ipynb +++ b/docs/docs/integrations/tools/lemonai.ipynb @@ -126,7 +126,7 @@ "source": [ "import os\n", "\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from lemonai import execute_workflow" ] }, diff --git a/docs/docs/integrations/tools/metaphor_search.ipynb b/docs/docs/integrations/tools/metaphor_search.ipynb index 3da3b28b92..3b8d7631c1 100644 --- a/docs/docs/integrations/tools/metaphor_search.ipynb +++ b/docs/docs/integrations/tools/metaphor_search.ipynb @@ -124,7 +124,7 @@ "source": [ "from langchain.agents import AgentExecutor, OpenAIFunctionsAgent\n", "from langchain.schema import SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", @@ -311,7 +311,7 @@ "source": [ "from langchain.agents import AgentExecutor, OpenAIFunctionsAgent\n", "from langchain.schema import SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-4\")\n", "\n", diff --git a/docs/docs/integrations/tools/openweathermap.ipynb b/docs/docs/integrations/tools/openweathermap.ipynb index 9f81f50cbd..0535fe6e0c 100644 --- a/docs/docs/integrations/tools/openweathermap.ipynb +++ b/docs/docs/integrations/tools/openweathermap.ipynb @@ -84,7 +84,7 @@ "import os\n", "\n", "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\"\n", diff --git a/docs/docs/integrations/tools/reddit_search.ipynb b/docs/docs/integrations/tools/reddit_search.ipynb index 5de2743334..14ba150b54 100644 --- a/docs/docs/integrations/tools/reddit_search.ipynb +++ b/docs/docs/integrations/tools/reddit_search.ipynb @@ -173,9 +173,9 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools.reddit_search.tool import RedditSearchRun\n", "from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Provide keys for Reddit\n", "client_id = \"\"\n", diff --git a/docs/docs/integrations/tools/sceneXplain.ipynb b/docs/docs/integrations/tools/sceneXplain.ipynb index ae5e6dc2e9..ee91d47b59 100644 --- a/docs/docs/integrations/tools/sceneXplain.ipynb +++ b/docs/docs/integrations/tools/sceneXplain.ipynb @@ -96,7 +96,7 @@ "source": [ "from langchain.agents import initialize_agent\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", diff --git a/docs/docs/integrations/tools/search_tools.ipynb b/docs/docs/integrations/tools/search_tools.ipynb index 6e1ad5dfe7..98dede42ed 100644 --- a/docs/docs/integrations/tools/search_tools.ipynb +++ b/docs/docs/integrations/tools/search_tools.ipynb @@ -22,7 +22,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/tools/searchapi.ipynb b/docs/docs/integrations/tools/searchapi.ipynb index e48e3014b4..acd5af2c25 100644 --- a/docs/docs/integrations/tools/searchapi.ipynb +++ b/docs/docs/integrations/tools/searchapi.ipynb @@ -125,8 +125,8 @@ ], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain_community.llms.openai import OpenAI\n", "from langchain_community.utilities import SearchApiAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "search = SearchApiAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/semanticscholar.ipynb b/docs/docs/integrations/tools/semanticscholar.ipynb index 0749799711..50a9128b21 100644 --- a/docs/docs/integrations/tools/semanticscholar.ipynb +++ b/docs/docs/integrations/tools/semanticscholar.ipynb @@ -27,7 +27,7 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/tools/tavily_search.ipynb b/docs/docs/integrations/tools/tavily_search.ipynb index 79b611a62e..f5789e4271 100644 --- a/docs/docs/integrations/tools/tavily_search.ipynb +++ b/docs/docs/integrations/tools/tavily_search.ipynb @@ -150,7 +150,7 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "instructions = \"\"\"You are an assistant.\"\"\"\n", "base_prompt = hub.pull(\"langchain-ai/openai-functions-template\")\n", diff --git a/docs/docs/integrations/tools/yahoo_finance_news.ipynb b/docs/docs/integrations/tools/yahoo_finance_news.ipynb index 291079504b..d6afd4251b 100644 --- a/docs/docs/integrations/tools/yahoo_finance_news.ipynb +++ b/docs/docs/integrations/tools/yahoo_finance_news.ipynb @@ -53,8 +53,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = [YahooFinanceNewsTool()]\n", diff --git a/docs/docs/integrations/tools/zapier.ipynb b/docs/docs/integrations/tools/zapier.ipynb index 4629ee2344..5ef000319f 100644 --- a/docs/docs/integrations/tools/zapier.ipynb +++ b/docs/docs/integrations/tools/zapier.ipynb @@ -62,8 +62,8 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits import ZapierToolkit\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities.zapier import ZapierNLAWrapper" + "from langchain_community.utilities.zapier import ZapierNLAWrapper\n", + "from langchain_openai import OpenAI" ] }, { @@ -162,9 +162,9 @@ "source": [ "from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.tools.zapier.tool import ZapierNLARunAction\n", - "from langchain_community.utilities.zapier import ZapierNLAWrapper" + "from langchain_community.utilities.zapier import ZapierNLAWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb index 6253598c63..9c9d692d9d 100644 --- a/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb +++ b/docs/docs/integrations/vectorstores/activeloop_deeplake.ipynb @@ -52,8 +52,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import DeepLake" + "from langchain_community.vectorstores import DeepLake\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -227,14 +227,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/ubuntu/langchain_activeloop/langchain/libs/langchain/langchain/llms/openai.py:786: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`\n", + "/home/ubuntu/langchain_activeloop/langchain/libs/langchain/langchain/llms/openai.py:786: UserWarning: You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_openai import ChatOpenAI`\n", " warnings.warn(\n" ] } ], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain_community.llms import OpenAIChat\n", + "from langchain_openai import OpenAIChat\n", "\n", "qa = RetrievalQA.from_chain_type(\n", " llm=OpenAIChat(model=\"gpt-3.5-turbo\"),\n", diff --git a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb index 1ae7f88b79..fad2ce77d2 100644 --- a/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/alibabacloud_opensearch.ipynb @@ -130,11 +130,11 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import (\n", " AlibabaCloudOpenSearch,\n", " AlibabaCloudOpenSearchSettings,\n", - ")" + ")\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/analyticdb.ipynb b/docs/docs/integrations/vectorstores/analyticdb.ipynb index 331fc23cff..84a2480ddf 100644 --- a/docs/docs/integrations/vectorstores/analyticdb.ipynb +++ b/docs/docs/integrations/vectorstores/analyticdb.ipynb @@ -24,8 +24,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import AnalyticDB" + "from langchain_community.vectorstores import AnalyticDB\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/astradb.ipynb b/docs/docs/integrations/vectorstores/astradb.ipynb index ace4c990db..37340560c4 100644 --- a/docs/docs/integrations/vectorstores/astradb.ipynb +++ b/docs/docs/integrations/vectorstores/astradb.ipynb @@ -60,14 +60,13 @@ "from datasets import (\n", " load_dataset,\n", ")\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import PyPDFLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb index 7fed5acdb9..533d670a84 100644 --- a/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb +++ b/docs/docs/integrations/vectorstores/azure_cosmos_db.ipynb @@ -132,11 +132,11 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores.azure_cosmos_db_vector_search import (\n", " AzureCosmosDBVectorSearch,\n", " CosmosDBSimilarityType,\n", ")\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "SOURCE_FILE_NAME = \"../../modules/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/vectorstores/azuresearch.ipynb b/docs/docs/integrations/vectorstores/azuresearch.ipynb index a4d2d29955..1882f4cb37 100644 --- a/docs/docs/integrations/vectorstores/azuresearch.ipynb +++ b/docs/docs/integrations/vectorstores/azuresearch.ipynb @@ -45,8 +45,8 @@ "source": [ "import os\n", "\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores.azuresearch import AzureSearch" + "from langchain_community.vectorstores.azuresearch import AzureSearch\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/chroma.ipynb b/docs/docs/integrations/vectorstores/chroma.ipynb index 5fd390ed37..aa97faac82 100644 --- a/docs/docs/integrations/vectorstores/chroma.ipynb +++ b/docs/docs/integrations/vectorstores/chroma.ipynb @@ -361,7 +361,7 @@ "\n", "from getpass import getpass\n", "\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "OPENAI_API_KEY = getpass()" ] diff --git a/docs/docs/integrations/vectorstores/clickhouse.ipynb b/docs/docs/integrations/vectorstores/clickhouse.ipynb index 3b639da02a..540229016b 100644 --- a/docs/docs/integrations/vectorstores/clickhouse.ipynb +++ b/docs/docs/integrations/vectorstores/clickhouse.ipynb @@ -102,8 +102,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Clickhouse, ClickhouseSettings" + "from langchain_community.vectorstores import Clickhouse, ClickhouseSettings\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb b/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb index adef3bda51..eec9614252 100644 --- a/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb +++ b/docs/docs/integrations/vectorstores/databricks_vector_search.ipynb @@ -61,7 +61,7 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/dingo.ipynb b/docs/docs/integrations/vectorstores/dingo.ipynb index bd23461639..b2a4d3888e 100644 --- a/docs/docs/integrations/vectorstores/dingo.ipynb +++ b/docs/docs/integrations/vectorstores/dingo.ipynb @@ -70,8 +70,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Dingo" + "from langchain_community.vectorstores import Dingo\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -132,8 +132,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Dingo" + "from langchain_community.vectorstores import Dingo\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb index 0b3213bd98..f8e59d293d 100644 --- a/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_hnsw.ipynb @@ -75,8 +75,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import DocArrayHnswSearch" + "from langchain_community.vectorstores import DocArrayHnswSearch\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb index 93ce327b9a..bd6511bb5a 100644 --- a/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb +++ b/docs/docs/integrations/vectorstores/docarray_in_memory.ipynb @@ -72,8 +72,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import DocArrayInMemorySearch" + "from langchain_community.vectorstores import DocArrayInMemorySearch\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/elasticsearch.ipynb b/docs/docs/integrations/vectorstores/elasticsearch.ipynb index 72726384f8..3e119994d3 100644 --- a/docs/docs/integrations/vectorstores/elasticsearch.ipynb +++ b/docs/docs/integrations/vectorstores/elasticsearch.ipynb @@ -65,7 +65,7 @@ "Example:\n", "```python\n", " from langchain_community.vectorstores.elasticsearch import ElasticsearchStore\n", - " from langchain_community.embeddings.openai import OpenAIEmbeddings\n", + " from langchain_openai import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", " elastic_vector_search = ElasticsearchStore(\n", @@ -80,7 +80,7 @@ "Example:\n", "```python\n", " from langchain_community.vectorstores import ElasticsearchStore\n", - " from langchain_community.embeddings import OpenAIEmbeddings\n", + " from langchain_openai import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", " elastic_vector_search = ElasticsearchStore(\n", @@ -116,7 +116,7 @@ "Example:\n", "```python\n", " from langchain_community.vectorstores.elasticsearch import ElasticsearchStore\n", - " from langchain_community.embeddings import OpenAIEmbeddings\n", + " from langchain_openai import OpenAIEmbeddings\n", "\n", " embedding = OpenAIEmbeddings()\n", " elastic_vector_search = ElasticsearchStore(\n", @@ -180,8 +180,8 @@ }, "outputs": [], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import ElasticsearchStore" + "from langchain_community.vectorstores import ElasticsearchStore\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/epsilla.ipynb b/docs/docs/integrations/vectorstores/epsilla.ipynb index 07aa440664..8cc4795ec1 100644 --- a/docs/docs/integrations/vectorstores/epsilla.ipynb +++ b/docs/docs/integrations/vectorstores/epsilla.ipynb @@ -57,8 +57,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Epsilla" + "from langchain_community.vectorstores import Epsilla\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/faiss.ipynb b/docs/docs/integrations/vectorstores/faiss.ipynb index e277be120f..6c4a693688 100644 --- a/docs/docs/integrations/vectorstores/faiss.ipynb +++ b/docs/docs/integrations/vectorstores/faiss.ipynb @@ -90,8 +90,8 @@ "\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/faiss_async.ipynb b/docs/docs/integrations/vectorstores/faiss_async.ipynb index dde2b6a3a4..13e269a9e9 100644 --- a/docs/docs/integrations/vectorstores/faiss_async.ipynb +++ b/docs/docs/integrations/vectorstores/faiss_async.ipynb @@ -58,8 +58,8 @@ "\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/hippo.ipynb b/docs/docs/integrations/vectorstores/hippo.ipynb index ac119ddc6c..fa412cea52 100644 --- a/docs/docs/integrations/vectorstores/hippo.ipynb +++ b/docs/docs/integrations/vectorstores/hippo.ipynb @@ -98,10 +98,9 @@ "import os\n", "\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores.hippo import Hippo" + "from langchain_community.vectorstores.hippo import Hippo\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/hologres.ipynb b/docs/docs/integrations/vectorstores/hologres.ipynb index 1de91e14ce..58fa58bd34 100644 --- a/docs/docs/integrations/vectorstores/hologres.ipynb +++ b/docs/docs/integrations/vectorstores/hologres.ipynb @@ -34,8 +34,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Hologres" + "from langchain_community.vectorstores import Hologres\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/jaguar.ipynb b/docs/docs/integrations/vectorstores/jaguar.ipynb index 83be4ace3f..3cf75bd9e0 100644 --- a/docs/docs/integrations/vectorstores/jaguar.ipynb +++ b/docs/docs/integrations/vectorstores/jaguar.ipynb @@ -53,15 +53,13 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n", "\n", "\"\"\" \n", "Load a text file into a set of documents \n", @@ -172,8 +170,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores.jaguar import Jaguar\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# Instantiate a Jaguar vector store object\n", "url = \"http://192.168.3.88:8080/fwww/\"\n", diff --git a/docs/docs/integrations/vectorstores/lancedb.ipynb b/docs/docs/integrations/vectorstores/lancedb.ipynb index c7fa077ed9..f74fced435 100644 --- a/docs/docs/integrations/vectorstores/lancedb.ipynb +++ b/docs/docs/integrations/vectorstores/lancedb.ipynb @@ -64,8 +64,8 @@ }, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import LanceDB" + "from langchain_community.vectorstores import LanceDB\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/marqo.ipynb b/docs/docs/integrations/vectorstores/marqo.ipynb index 9b9da57c7b..6bb539253e 100644 --- a/docs/docs/integrations/vectorstores/marqo.ipynb +++ b/docs/docs/integrations/vectorstores/marqo.ipynb @@ -477,7 +477,7 @@ "import os\n", "\n", "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" ] diff --git a/docs/docs/integrations/vectorstores/meilisearch.ipynb b/docs/docs/integrations/vectorstores/meilisearch.ipynb index d1c1130dbe..aa73def8ff 100644 --- a/docs/docs/integrations/vectorstores/meilisearch.ipynb +++ b/docs/docs/integrations/vectorstores/meilisearch.ipynb @@ -127,8 +127,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Meilisearch\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/vectorstores/milvus.ipynb b/docs/docs/integrations/vectorstores/milvus.ipynb index adc5dd2d86..45a2699188 100644 --- a/docs/docs/integrations/vectorstores/milvus.ipynb +++ b/docs/docs/integrations/vectorstores/milvus.ipynb @@ -68,8 +68,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Milvus" + "from langchain_community.vectorstores import Milvus\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb index e003a2179e..174b352245 100644 --- a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb +++ b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb @@ -145,8 +145,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import MomentoVectorIndex" + "from langchain_community.vectorstores import MomentoVectorIndex\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -376,7 +376,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb index 150b7f5100..57ea3f05fe 100644 --- a/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb +++ b/docs/docs/integrations/vectorstores/mongodb_atlas.ipynb @@ -194,8 +194,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# insert the documents in MongoDB Atlas with their embedding\n", "vector_search = MongoDBAtlasVectorSearch.from_documents(\n", @@ -243,8 +243,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import MongoDBAtlasVectorSearch\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "vector_search = MongoDBAtlasVectorSearch.from_connection_string(\n", " MONGODB_ATLAS_CLUSTER_URI,\n", @@ -389,7 +389,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "qa = RetrievalQA.from_chain_type(\n", " llm=OpenAI(),\n", diff --git a/docs/docs/integrations/vectorstores/myscale.ipynb b/docs/docs/integrations/vectorstores/myscale.ipynb index 78abda403b..72a958b078 100644 --- a/docs/docs/integrations/vectorstores/myscale.ipynb +++ b/docs/docs/integrations/vectorstores/myscale.ipynb @@ -100,8 +100,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import MyScale" + "from langchain_community.vectorstores import MyScale\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/neo4jvector.ipynb b/docs/docs/integrations/vectorstores/neo4jvector.ipynb index 755bb9bb59..0aebe3bc36 100644 --- a/docs/docs/integrations/vectorstores/neo4jvector.ipynb +++ b/docs/docs/integrations/vectorstores/neo4jvector.ipynb @@ -75,8 +75,8 @@ "from langchain.docstore.document import Document\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Neo4jVector" + "from langchain_community.vectorstores import Neo4jVector\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -480,7 +480,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/integrations/vectorstores/opensearch.ipynb b/docs/docs/integrations/vectorstores/opensearch.ipynb index f8bd48ddb9..24e8c2e20f 100644 --- a/docs/docs/integrations/vectorstores/opensearch.ipynb +++ b/docs/docs/integrations/vectorstores/opensearch.ipynb @@ -70,8 +70,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import OpenSearchVectorSearch" + "from langchain_community.vectorstores import OpenSearchVectorSearch\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgembedding.ipynb b/docs/docs/integrations/vectorstores/pgembedding.ipynb index 56a6268552..048ecad985 100644 --- a/docs/docs/integrations/vectorstores/pgembedding.ipynb +++ b/docs/docs/integrations/vectorstores/pgembedding.ipynb @@ -84,8 +84,8 @@ "from langchain.docstore.document import Document\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import PGEmbedding" + "from langchain_community.vectorstores import PGEmbedding\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb index 7210b276e6..e216d530fc 100644 --- a/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb +++ b/docs/docs/integrations/vectorstores/pgvecto_rs.ipynb @@ -32,8 +32,8 @@ "from langchain.docstore.document import Document\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs" + "from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pgvector.ipynb b/docs/docs/integrations/vectorstores/pgvector.ipynb index b02ae38d52..1481dcc0d3 100644 --- a/docs/docs/integrations/vectorstores/pgvector.ipynb +++ b/docs/docs/integrations/vectorstores/pgvector.ipynb @@ -103,8 +103,8 @@ "from langchain.docstore.document import Document\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores.pgvector import PGVector" + "from langchain_community.vectorstores.pgvector import PGVector\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/pinecone.ipynb b/docs/docs/integrations/vectorstores/pinecone.ipynb index 4de6d019cd..3346d3535f 100644 --- a/docs/docs/integrations/vectorstores/pinecone.ipynb +++ b/docs/docs/integrations/vectorstores/pinecone.ipynb @@ -81,8 +81,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Pinecone" + "from langchain_community.vectorstores import Pinecone\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/qdrant.ipynb b/docs/docs/integrations/vectorstores/qdrant.ipynb index 14d4b7bdd6..73c7f1eaf2 100644 --- a/docs/docs/integrations/vectorstores/qdrant.ipynb +++ b/docs/docs/integrations/vectorstores/qdrant.ipynb @@ -80,8 +80,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Qdrant" + "from langchain_community.vectorstores import Qdrant\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/redis.ipynb b/docs/docs/integrations/vectorstores/redis.ipynb index 0e1ccd85d8..a66cf20f0f 100644 --- a/docs/docs/integrations/vectorstores/redis.ipynb +++ b/docs/docs/integrations/vectorstores/redis.ipynb @@ -163,7 +163,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/integrations/vectorstores/rockset.ipynb b/docs/docs/integrations/vectorstores/rockset.ipynb index be6f06ba79..f5afbb405a 100644 --- a/docs/docs/integrations/vectorstores/rockset.ipynb +++ b/docs/docs/integrations/vectorstores/rockset.ipynb @@ -110,8 +110,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Rockset\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/singlestoredb.ipynb b/docs/docs/integrations/vectorstores/singlestoredb.ipynb index 70e5e5ef0b..9de88f2ab0 100644 --- a/docs/docs/integrations/vectorstores/singlestoredb.ipynb +++ b/docs/docs/integrations/vectorstores/singlestoredb.ipynb @@ -48,8 +48,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import SingleStoreDB" + "from langchain_community.vectorstores import SingleStoreDB\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/sklearn.ipynb b/docs/docs/integrations/vectorstores/sklearn.ipynb index 516ecae361..6b34ab01e1 100644 --- a/docs/docs/integrations/vectorstores/sklearn.ipynb +++ b/docs/docs/integrations/vectorstores/sklearn.ipynb @@ -62,8 +62,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import SKLearnVectorStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n", "documents = loader.load()\n", diff --git a/docs/docs/integrations/vectorstores/starrocks.ipynb b/docs/docs/integrations/vectorstores/starrocks.ipynb index 88d0566653..432678e719 100644 --- a/docs/docs/integrations/vectorstores/starrocks.ipynb +++ b/docs/docs/integrations/vectorstores/starrocks.ipynb @@ -63,10 +63,9 @@ " DirectoryLoader,\n", " UnstructuredMarkdownLoader,\n", ")\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import StarRocks\n", "from langchain_community.vectorstores.starrocks import StarRocksSettings\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "update_vectordb = False" ] diff --git a/docs/docs/integrations/vectorstores/supabase.ipynb b/docs/docs/integrations/vectorstores/supabase.ipynb index 7b84f595a0..ef188472cc 100644 --- a/docs/docs/integrations/vectorstores/supabase.ipynb +++ b/docs/docs/integrations/vectorstores/supabase.ipynb @@ -155,8 +155,8 @@ "source": [ "import os\n", "\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import SupabaseVectorStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "from supabase.client import Client, create_client\n", "\n", "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", diff --git a/docs/docs/integrations/vectorstores/tigris.ipynb b/docs/docs/integrations/vectorstores/tigris.ipynb index 0ac07a4a28..e2027f158a 100644 --- a/docs/docs/integrations/vectorstores/tigris.ipynb +++ b/docs/docs/integrations/vectorstores/tigris.ipynb @@ -87,8 +87,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Tigris" + "from langchain_community.vectorstores import Tigris\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/timescalevector.ipynb b/docs/docs/integrations/vectorstores/timescalevector.ipynb index be4a9693cd..84d3d37028 100644 --- a/docs/docs/integrations/vectorstores/timescalevector.ipynb +++ b/docs/docs/integrations/vectorstores/timescalevector.ipynb @@ -126,8 +126,8 @@ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", "from langchain_community.document_loaders.json_loader import JSONLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores.timescalevector import TimescaleVector" + "from langchain_community.vectorstores.timescalevector import TimescaleVector\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -356,7 +356,7 @@ "outputs": [], "source": [ "# Initialize GPT3.5 model\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.1, model=\"gpt-3.5-turbo-16k\")\n", "\n", @@ -1050,7 +1050,7 @@ } ], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.1, model=\"gpt-3.5-turbo-16k\")\n", "\n", @@ -1263,7 +1263,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "# Give LLM info about the metadata fields\n", "metadata_field_info = [\n", diff --git a/docs/docs/integrations/vectorstores/typesense.ipynb b/docs/docs/integrations/vectorstores/typesense.ipynb index d7ecd59d1f..0e2be56daa 100644 --- a/docs/docs/integrations/vectorstores/typesense.ipynb +++ b/docs/docs/integrations/vectorstores/typesense.ipynb @@ -86,8 +86,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Typesense" + "from langchain_community.vectorstores import Typesense\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/usearch.ipynb b/docs/docs/integrations/vectorstores/usearch.ipynb index daf9661595..fc4bbdcbb9 100644 --- a/docs/docs/integrations/vectorstores/usearch.ipynb +++ b/docs/docs/integrations/vectorstores/usearch.ipynb @@ -57,8 +57,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import USearch" + "from langchain_community.vectorstores import USearch\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/docs/integrations/vectorstores/weaviate.ipynb index b486ee777e..cd821d0649 100644 --- a/docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/docs/integrations/vectorstores/weaviate.ipynb @@ -121,8 +121,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Weaviate" + "from langchain_community.vectorstores import Weaviate\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -368,7 +368,7 @@ } ], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", "llm.predict(\"What did the president say about Justice Breyer\")" @@ -398,7 +398,7 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQAWithSourcesChain\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -521,7 +521,7 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\n", "Question: {question} \n", @@ -540,7 +540,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/docs/docs/integrations/vectorstores/xata.ipynb b/docs/docs/integrations/vectorstores/xata.ipynb index eb20b76458..b4f707fbe1 100644 --- a/docs/docs/integrations/vectorstores/xata.ipynb +++ b/docs/docs/integrations/vectorstores/xata.ipynb @@ -109,8 +109,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores.xata import XataVectorStore" + "from langchain_community.vectorstores.xata import XataVectorStore\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/integrations/vectorstores/yellowbrick.ipynb b/docs/docs/integrations/vectorstores/yellowbrick.ipynb index 7518af2d3e..e3b6a6fa69 100644 --- a/docs/docs/integrations/vectorstores/yellowbrick.ipynb +++ b/docs/docs/integrations/vectorstores/yellowbrick.ipynb @@ -100,9 +100,8 @@ "from langchain.chains import LLMChain, RetrievalQAWithSourcesChain\n", "from langchain.docstore.document import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Yellowbrick\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "# Establish connection parameters to Yellowbrick. If you've signed up for Sandbox, fill in the information from your welcome mail here:\n", "yellowbrick_connection_string = (\n", diff --git a/docs/docs/integrations/vectorstores/zilliz.ipynb b/docs/docs/integrations/vectorstores/zilliz.ipynb index fa97283669..362ac0b51b 100644 --- a/docs/docs/integrations/vectorstores/zilliz.ipynb +++ b/docs/docs/integrations/vectorstores/zilliz.ipynb @@ -78,8 +78,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Milvus" + "from langchain_community.vectorstores import Milvus\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/langsmith/walkthrough.ipynb b/docs/docs/langsmith/walkthrough.ipynb index 0f7eeef1ea..a3b109eef9 100644 --- a/docs/docs/langsmith/walkthrough.ipynb +++ b/docs/docs/langsmith/walkthrough.ipynb @@ -143,8 +143,8 @@ "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", "from langchain.tools import DuckDuckGoSearchResults\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Fetches the latest version of this prompt\n", "prompt = hub.pull(\"wfh/langsmith-agent-prompt:latest\")\n", @@ -337,8 +337,8 @@ "from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "# Since chains can be stateful (e.g. they can have memory), we provide\n", diff --git a/docs/docs/modules/agents/agent_types/json_agent.ipynb b/docs/docs/modules/agents/agent_types/json_agent.ipynb index f9dda27d0d..eede046685 100644 --- a/docs/docs/modules/agents/agent_types/json_agent.ipynb +++ b/docs/docs/modules/agents/agent_types/json_agent.ipynb @@ -29,8 +29,8 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_json_chat_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.tools.tavily_search import TavilySearchResults" + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb b/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb index d60a4b607a..d37f4bcccd 100644 --- a/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb +++ b/docs/docs/modules/agents/agent_types/openai_functions_agent.ipynb @@ -53,8 +53,8 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.tools.tavily_search import TavilySearchResults" + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/agent_types/openai_tools.ipynb b/docs/docs/modules/agents/agent_types/openai_tools.ipynb index 7192fdd8da..4c171aa178 100644 --- a/docs/docs/modules/agents/agent_types/openai_tools.ipynb +++ b/docs/docs/modules/agents/agent_types/openai_tools.ipynb @@ -39,8 +39,8 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_tools_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.tools.tavily_search import TavilySearchResults" + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/agent_types/react.ipynb b/docs/docs/modules/agents/agent_types/react.ipynb index e78f7b6590..2a8e604d48 100644 --- a/docs/docs/modules/agents/agent_types/react.ipynb +++ b/docs/docs/modules/agents/agent_types/react.ipynb @@ -29,8 +29,8 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_react_agent\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.tools.tavily_search import TavilySearchResults" + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/modules/agents/agent_types/structured_chat.ipynb b/docs/docs/modules/agents/agent_types/structured_chat.ipynb index b6d4cb2ec6..2ed2a9586e 100644 --- a/docs/docs/modules/agents/agent_types/structured_chat.ipynb +++ b/docs/docs/modules/agents/agent_types/structured_chat.ipynb @@ -29,8 +29,8 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_structured_chat_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.tools.tavily_search import TavilySearchResults" + "from langchain_community.tools.tavily_search import TavilySearchResults\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/how_to/agent_iter.ipynb b/docs/docs/modules/agents/how_to/agent_iter.ipynb index 697c044ac7..81af02145a 100644 --- a/docs/docs/modules/agents/how_to/agent_iter.ipynb +++ b/docs/docs/modules/agents/how_to/agent_iter.ipynb @@ -26,9 +26,9 @@ "source": [ "from langchain.agents import AgentType, initialize_agent\n", "from langchain.chains import LLMMathChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", - "from langchain_core.tools import Tool" + "from langchain_core.tools import Tool\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/how_to/agent_structured.ipynb b/docs/docs/modules/agents/how_to/agent_structured.ipynb index d7f5c0f1a1..d2451f2a07 100644 --- a/docs/docs/modules/agents/how_to/agent_structured.ipynb +++ b/docs/docs/modules/agents/how_to/agent_structured.ipynb @@ -55,8 +55,8 @@ "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -231,9 +231,9 @@ "source": [ "from langchain.agents import AgentExecutor\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function" + "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/agents/how_to/custom_agent.ipynb b/docs/docs/modules/agents/how_to/custom_agent.ipynb index a2426e4386..665db61683 100644 --- a/docs/docs/modules/agents/how_to/custom_agent.ipynb +++ b/docs/docs/modules/agents/how_to/custom_agent.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", diff --git a/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb b/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb index 399299fd4f..e3251a5a89 100644 --- a/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb +++ b/docs/docs/modules/agents/how_to/handle_parsing_errors.ipynb @@ -39,9 +39,9 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_react_agent\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.tools import WikipediaQueryRun\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n", "tool = WikipediaQueryRun(api_wrapper=api_wrapper)\n", diff --git a/docs/docs/modules/agents/how_to/intermediate_steps.ipynb b/docs/docs/modules/agents/how_to/intermediate_steps.ipynb index 2fd4b8f89b..c1956f4f0d 100644 --- a/docs/docs/modules/agents/how_to/intermediate_steps.ipynb +++ b/docs/docs/modules/agents/how_to/intermediate_steps.ipynb @@ -29,9 +29,9 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools import WikipediaQueryRun\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_openai import ChatOpenAI\n", "\n", "api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n", "tool = WikipediaQueryRun(api_wrapper=api_wrapper)\n", diff --git a/docs/docs/modules/agents/how_to/max_iterations.ipynb b/docs/docs/modules/agents/how_to/max_iterations.ipynb index 4855e30518..afdf3762e3 100644 --- a/docs/docs/modules/agents/how_to/max_iterations.ipynb +++ b/docs/docs/modules/agents/how_to/max_iterations.ipynb @@ -19,9 +19,9 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_react_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools import WikipediaQueryRun\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_openai import ChatOpenAI\n", "\n", "api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n", "tool = WikipediaQueryRun(api_wrapper=api_wrapper)\n", diff --git a/docs/docs/modules/agents/how_to/max_time_limit.ipynb b/docs/docs/modules/agents/how_to/max_time_limit.ipynb index 9c23149ee9..99cf4ef7d4 100644 --- a/docs/docs/modules/agents/how_to/max_time_limit.ipynb +++ b/docs/docs/modules/agents/how_to/max_time_limit.ipynb @@ -29,9 +29,9 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_react_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.tools import WikipediaQueryRun\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_openai import ChatOpenAI\n", "\n", "api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n", "tool = WikipediaQueryRun(api_wrapper=api_wrapper)\n", diff --git a/docs/docs/modules/agents/how_to/streaming.ipynb b/docs/docs/modules/agents/how_to/streaming.ipynb index 98c9b90842..deb3e35891 100644 --- a/docs/docs/modules/agents/how_to/streaming.ipynb +++ b/docs/docs/modules/agents/how_to/streaming.ipynb @@ -66,7 +66,7 @@ "source": [ "from langchain import hub\n", "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Get the prompt to use - you can modify this!\n", "# If you want to see the prompt in full, you can at: https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n", diff --git a/docs/docs/modules/agents/quick_start.ipynb b/docs/docs/modules/agents/quick_start.ipynb index ba8d5d4071..65e5e2aa30 100644 --- a/docs/docs/modules/agents/quick_start.ipynb +++ b/docs/docs/modules/agents/quick_start.ipynb @@ -117,8 +117,8 @@ "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import DocArrayInMemorySearch\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "loader = WebBaseLoader(\"https://docs.smith.langchain.com/overview\")\n", "docs = loader.load()\n", @@ -221,7 +221,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb b/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb index a8c9437b60..c7043243c1 100644 --- a/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb +++ b/docs/docs/modules/agents/tools/tools_as_openai_functions.ipynb @@ -18,7 +18,7 @@ "outputs": [], "source": [ "from langchain.schema import HumanMessage\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/callbacks/async_callbacks.ipynb b/docs/docs/modules/callbacks/async_callbacks.ipynb index 29ae822ad5..244fc1e776 100644 --- a/docs/docs/modules/callbacks/async_callbacks.ipynb +++ b/docs/docs/modules/callbacks/async_callbacks.ipynb @@ -63,7 +63,7 @@ "\n", "from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler\n", "from langchain.schema import HumanMessage, LLMResult\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "class MyCustomSyncHandler(BaseCallbackHandler):\n", diff --git a/docs/docs/modules/callbacks/custom_callbacks.ipynb b/docs/docs/modules/callbacks/custom_callbacks.ipynb index 239d163cf2..185a249dd9 100644 --- a/docs/docs/modules/callbacks/custom_callbacks.ipynb +++ b/docs/docs/modules/callbacks/custom_callbacks.ipynb @@ -54,7 +54,7 @@ "source": [ "from langchain.callbacks.base import BaseCallbackHandler\n", "from langchain.schema import HumanMessage\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "class MyCustomHandler(BaseCallbackHandler):\n", diff --git a/docs/docs/modules/callbacks/filecallbackhandler.ipynb b/docs/docs/modules/callbacks/filecallbackhandler.ipynb index faa263efa7..e8fa5c6b31 100644 --- a/docs/docs/modules/callbacks/filecallbackhandler.ipynb +++ b/docs/docs/modules/callbacks/filecallbackhandler.ipynb @@ -48,7 +48,7 @@ "from langchain.callbacks import FileCallbackHandler\n", "from langchain.chains import LLMChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from loguru import logger\n", "\n", "logfile = \"output.log\"\n", diff --git a/docs/docs/modules/callbacks/index.mdx b/docs/docs/modules/callbacks/index.mdx index fe9e4b9e1f..9d0a1c78c7 100644 --- a/docs/docs/modules/callbacks/index.mdx +++ b/docs/docs/modules/callbacks/index.mdx @@ -86,7 +86,7 @@ LangChain provides a few built-in handlers that you can use to get started. Thes ```python from langchain.callbacks import StdOutCallbackHandler from langchain.chains import LLMChain -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate handler = StdOutCallbackHandler() diff --git a/docs/docs/modules/callbacks/multiple_callbacks.ipynb b/docs/docs/modules/callbacks/multiple_callbacks.ipynb index ad7a90b3df..208c529225 100644 --- a/docs/docs/modules/callbacks/multiple_callbacks.ipynb +++ b/docs/docs/modules/callbacks/multiple_callbacks.ipynb @@ -129,7 +129,7 @@ "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks.base import BaseCallbackHandler\n", "from langchain.schema import AgentAction\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "\n", "# First, define custom callback handler implementations\n", diff --git a/docs/docs/modules/callbacks/token_counting.ipynb b/docs/docs/modules/callbacks/token_counting.ipynb index 26aa8b828e..8bc7a4f091 100644 --- a/docs/docs/modules/callbacks/token_counting.ipynb +++ b/docs/docs/modules/callbacks/token_counting.ipynb @@ -19,7 +19,7 @@ "import asyncio\n", "\n", "from langchain.callbacks import get_openai_callback\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "with get_openai_callback() as cb:\n", diff --git a/docs/docs/modules/data_connection/document_loaders/pdf.mdx b/docs/docs/modules/data_connection/document_loaders/pdf.mdx index ab7672b031..d335aab7c5 100644 --- a/docs/docs/modules/data_connection/document_loaders/pdf.mdx +++ b/docs/docs/modules/data_connection/document_loaders/pdf.mdx @@ -61,7 +61,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain_community.vectorstores import FAISS -from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings faiss_index = FAISS.from_documents(pages, OpenAIEmbeddings()) docs = faiss_index.similarity_search("How will the community be engaged?", k=2) diff --git a/docs/docs/modules/data_connection/indexing.ipynb b/docs/docs/modules/data_connection/indexing.ipynb index 0e850f9f46..6298847217 100644 --- a/docs/docs/modules/data_connection/indexing.ipynb +++ b/docs/docs/modules/data_connection/indexing.ipynb @@ -92,8 +92,8 @@ "source": [ "from langchain.indexes import SQLRecordManager, index\n", "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import ElasticsearchStore" + "from langchain_community.vectorstores import ElasticsearchStore\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb index 9eb0c369c9..76c8b0cbd4 100644 --- a/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/MultiQueryRetriever.ipynb @@ -22,8 +22,8 @@ "# Build a sample vectorDB\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# Load blog post\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", @@ -56,7 +56,7 @@ "outputs": [], "source": [ "from langchain.retrievers.multi_query import MultiQueryRetriever\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "question = \"What are the approaches to Task Decomposition?\"\n", "llm = ChatOpenAI(temperature=0)\n", diff --git a/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb b/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb index ede56e2b65..3cf335d931 100644 --- a/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb +++ b/docs/docs/modules/data_connection/retrievers/contextual_compression.ipynb @@ -120,8 +120,8 @@ "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "documents = TextLoader(\"../../state_of_the_union.txt\").load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", @@ -176,7 +176,7 @@ "source": [ "from langchain.retrievers import ContextualCompressionRetriever\n", "from langchain.retrievers.document_compressors import LLMChainExtractor\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "compressor = LLMChainExtractor.from_llm(llm)\n", @@ -313,7 +313,7 @@ ], "source": [ "from langchain.retrievers.document_compressors import EmbeddingsFilter\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)\n", diff --git a/docs/docs/modules/data_connection/retrievers/ensemble.ipynb b/docs/docs/modules/data_connection/retrievers/ensemble.ipynb index 0e27e91d25..788a950289 100644 --- a/docs/docs/modules/data_connection/retrievers/ensemble.ipynb +++ b/docs/docs/modules/data_connection/retrievers/ensemble.ipynb @@ -29,8 +29,8 @@ "outputs": [], "source": [ "from langchain.retrievers import BM25Retriever, EnsembleRetriever\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/index.mdx b/docs/docs/modules/data_connection/retrievers/index.mdx index e741d00c57..4f5ca07284 100644 --- a/docs/docs/modules/data_connection/retrievers/index.mdx +++ b/docs/docs/modules/data_connection/retrievers/index.mdx @@ -47,8 +47,8 @@ LangChain also integrates with many third-party retrieval services. For a full l Since retrievers are `Runnable`'s, we can easily compose them with other `Runnable` objects: ```python -from langchain_community.chat_models import ChatOpenAI -from langchain.prompts import ChatPromptTemplate +from langchain_openai import ChatOpenAI +from langchain_core.prompts import ChatPromptTemplate from langchain.schema import StrOutputParser from langchain_core.runnables import RunnablePassthrough diff --git a/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb b/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb index 52d161ee1c..a1c80387ad 100644 --- a/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb +++ b/docs/docs/modules/data_connection/retrievers/long_context_reorder.ipynb @@ -57,8 +57,8 @@ " LongContextReorder,\n", ")\n", "from langchain_community.embeddings import HuggingFaceEmbeddings\n", - "from langchain_community.llms import OpenAI\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAI\n", "\n", "# Get embeddings.\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", diff --git a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb index 8dad4fb50b..77ad852d88 100644 --- a/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb +++ b/docs/docs/modules/data_connection/retrievers/multi_vector.ipynb @@ -39,8 +39,8 @@ "from langchain.storage import InMemoryByteStore\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -230,10 +230,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.documents import Document\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -609,7 +609,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.1" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb index 753e9d41c2..42963d39c1 100644 --- a/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb +++ b/docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb @@ -45,8 +45,8 @@ "from langchain.storage import InMemoryStore\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/self_query.ipynb b/docs/docs/modules/data_connection/retrievers/self_query.ipynb index a9a9d0a08a..d73e88b68b 100644 --- a/docs/docs/modules/data_connection/retrievers/self_query.ipynb +++ b/docs/docs/modules/data_connection/retrievers/self_query.ipynb @@ -41,8 +41,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "docs = [\n", " Document(\n", @@ -97,7 +97,7 @@ "source": [ "from langchain.chains.query_constructor.base import AttributeInfo\n", "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "metadata_field_info = [\n", " AttributeInfo(\n", diff --git a/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb b/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb index 61b5997f7b..3f739f0f30 100644 --- a/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb +++ b/docs/docs/modules/data_connection/retrievers/time_weighted_vectorstore.ipynb @@ -31,8 +31,8 @@ "from langchain.docstore import InMemoryDocstore\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", "from langchain.schema import Document\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb b/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb index ea736209bc..751f935c8b 100644 --- a/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb +++ b/docs/docs/modules/data_connection/retrievers/vectorstore.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "documents = loader.load()\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", diff --git a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb index e812a9acbd..930ee2459a 100644 --- a/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb +++ b/docs/docs/modules/data_connection/text_embedding/caching_embeddings.ipynb @@ -73,8 +73,8 @@ "from langchain.storage import LocalFileStore\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain_community.document_loaders import TextLoader\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "underlying_embeddings = OpenAIEmbeddings()\n", "\n", diff --git a/docs/docs/modules/data_connection/text_embedding/index.mdx b/docs/docs/modules/data_connection/text_embedding/index.mdx index 921f06414f..44701055d6 100644 --- a/docs/docs/modules/data_connection/text_embedding/index.mdx +++ b/docs/docs/modules/data_connection/text_embedding/index.mdx @@ -32,14 +32,14 @@ export OPENAI_API_KEY="..." If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain_community.embeddings import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings embeddings_model = OpenAIEmbeddings(openai_api_key="...") ``` Otherwise you can initialize without any params: ```python -from langchain_community.embeddings import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings embeddings_model = OpenAIEmbeddings() ``` diff --git a/docs/docs/modules/data_connection/vectorstores/index.mdx b/docs/docs/modules/data_connection/vectorstores/index.mdx index d5eabdf69f..034f67b31a 100644 --- a/docs/docs/modules/data_connection/vectorstores/index.mdx +++ b/docs/docs/modules/data_connection/vectorstores/index.mdx @@ -44,7 +44,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain_community.document_loaders import TextLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain_community.vectorstores import Chroma @@ -76,7 +76,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain_community.document_loaders import TextLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain_community.vectorstores import FAISS @@ -108,7 +108,7 @@ os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:') ```python from langchain_community.document_loaders import TextLoader -from langchain_community.embeddings.openai import OpenAIEmbeddings +from langchain_openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain_community.vectorstores import LanceDB diff --git a/docs/docs/modules/memory/adding_memory.ipynb b/docs/docs/modules/memory/adding_memory.ipynb index 8997ccae21..e68e8437cc 100644 --- a/docs/docs/modules/memory/adding_memory.ipynb +++ b/docs/docs/modules/memory/adding_memory.ipynb @@ -26,7 +26,7 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -182,7 +182,7 @@ " MessagesPlaceholder,\n", ")\n", "from langchain.schema import SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb index d873f0efcf..f8a6d89476 100644 --- a/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb +++ b/docs/docs/modules/memory/adding_memory_chain_multiple_inputs.ipynb @@ -18,8 +18,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -79,7 +79,7 @@ "from langchain.chains.question_answering import load_qa_chain\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/agent_with_memory.ipynb b/docs/docs/modules/memory/agent_with_memory.ipynb index 8911149eba..4060b3b980 100644 --- a/docs/docs/modules/memory/agent_with_memory.ipynb +++ b/docs/docs/modules/memory/agent_with_memory.ipynb @@ -30,8 +30,8 @@ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities import GoogleSearchAPIWrapper" + "from langchain_community.utilities import GoogleSearchAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb index 9b60a056bb..21f3de84b7 100644 --- a/docs/docs/modules/memory/agent_with_memory_in_db.ipynb +++ b/docs/docs/modules/memory/agent_with_memory_in_db.ipynb @@ -37,8 +37,8 @@ "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", - "from langchain_community.llms import OpenAI\n", - "from langchain_community.utilities import GoogleSearchAPIWrapper" + "from langchain_community.utilities import GoogleSearchAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/conversational_customization.ipynb b/docs/docs/modules/memory/conversational_customization.ipynb index 247cf8677d..159938b4a0 100644 --- a/docs/docs/modules/memory/conversational_customization.ipynb +++ b/docs/docs/modules/memory/conversational_customization.ipynb @@ -19,7 +19,7 @@ "source": [ "from langchain.chains import ConversationChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)" ] diff --git a/docs/docs/modules/memory/custom_memory.ipynb b/docs/docs/modules/memory/custom_memory.ipynb index cfd9f8429c..f61e4dd4db 100644 --- a/docs/docs/modules/memory/custom_memory.ipynb +++ b/docs/docs/modules/memory/custom_memory.ipynb @@ -29,7 +29,7 @@ "\n", "from langchain.chains import ConversationChain\n", "from langchain.schema import BaseMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from pydantic import BaseModel" ] }, diff --git a/docs/docs/modules/memory/index.mdx b/docs/docs/modules/memory/index.mdx index e4e2583cfa..144afe7508 100644 --- a/docs/docs/modules/memory/index.mdx +++ b/docs/docs/modules/memory/index.mdx @@ -163,7 +163,7 @@ We'll use an `LLMChain`, and show working with both an LLM and a ChatModel. ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from langchain.memory import ConversationBufferMemory @@ -200,7 +200,7 @@ conversation({"question": "hi"}) ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, diff --git a/docs/docs/modules/memory/multiple_memory.ipynb b/docs/docs/modules/memory/multiple_memory.ipynb index 7fa86d6e59..9a1e420c2c 100644 --- a/docs/docs/modules/memory/multiple_memory.ipynb +++ b/docs/docs/modules/memory/multiple_memory.ipynb @@ -24,7 +24,7 @@ " ConversationSummaryMemory,\n", ")\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "conv_memory = ConversationBufferMemory(\n", " memory_key=\"chat_history_lines\", input_key=\"input\"\n", diff --git a/docs/docs/modules/memory/types/buffer.mdx b/docs/docs/modules/memory/types/buffer.mdx index fc5a23695b..df174f8f68 100644 --- a/docs/docs/modules/memory/types/buffer.mdx +++ b/docs/docs/modules/memory/types/buffer.mdx @@ -54,7 +54,7 @@ Finally, let's take a look at using this in a chain (setting `verbose=True` so w ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.chains import ConversationChain diff --git a/docs/docs/modules/memory/types/buffer_window.mdx b/docs/docs/modules/memory/types/buffer_window.mdx index 08ba8b8765..7f29402345 100644 --- a/docs/docs/modules/memory/types/buffer_window.mdx +++ b/docs/docs/modules/memory/types/buffer_window.mdx @@ -56,7 +56,7 @@ Let's walk through an example, again setting `verbose=True` so we can see the pr ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.chains import ConversationChain conversation_with_summary = ConversationChain( llm=OpenAI(temperature=0), diff --git a/docs/docs/modules/memory/types/entity_summary_memory.mdx b/docs/docs/modules/memory/types/entity_summary_memory.mdx index 41bd6a2449..4a320cc2f6 100644 --- a/docs/docs/modules/memory/types/entity_summary_memory.mdx +++ b/docs/docs/modules/memory/types/entity_summary_memory.mdx @@ -5,7 +5,7 @@ Entity memory remembers given facts about specific entities in a conversation. I Let's first walk through using this functionality. ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.memory import ConversationEntityMemory llm = OpenAI(temperature=0) ``` diff --git a/docs/docs/modules/memory/types/kg.ipynb b/docs/docs/modules/memory/types/kg.ipynb index 8cac93ae12..8dd648cde9 100644 --- a/docs/docs/modules/memory/types/kg.ipynb +++ b/docs/docs/modules/memory/types/kg.ipynb @@ -26,7 +26,7 @@ "outputs": [], "source": [ "from langchain.memory import ConversationKGMemory\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/modules/memory/types/summary.mdx b/docs/docs/modules/memory/types/summary.mdx index cb08afbf37..c45d5df558 100644 --- a/docs/docs/modules/memory/types/summary.mdx +++ b/docs/docs/modules/memory/types/summary.mdx @@ -6,7 +6,7 @@ Let's first explore the basic functionality of this type of memory. ```python from langchain.memory import ConversationSummaryMemory, ChatMessageHistory -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI ``` @@ -115,7 +115,7 @@ Let's walk through an example of using this in a chain, again setting `verbose=T ```python -from langchain_community.llms import OpenAI +from langchain_openai import OpenAI from langchain.chains import ConversationChain llm = OpenAI(temperature=0) conversation_with_summary = ConversationChain( diff --git a/docs/docs/modules/memory/types/summary_buffer.ipynb b/docs/docs/modules/memory/types/summary_buffer.ipynb index 26219422b2..bc4a302fed 100644 --- a/docs/docs/modules/memory/types/summary_buffer.ipynb +++ b/docs/docs/modules/memory/types/summary_buffer.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "from langchain.memory import ConversationSummaryBufferMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/memory/types/token_buffer.ipynb b/docs/docs/modules/memory/types/token_buffer.ipynb index d902e440fe..2363507580 100644 --- a/docs/docs/modules/memory/types/token_buffer.ipynb +++ b/docs/docs/modules/memory/types/token_buffer.ipynb @@ -28,7 +28,7 @@ "outputs": [], "source": [ "from langchain.memory import ConversationTokenBufferMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx index 0c509eb1fb..a4d8213312 100644 --- a/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx +++ b/docs/docs/modules/memory/types/vectorstore_retriever_memory.mdx @@ -8,8 +8,8 @@ In this case, the "docs" are previous conversation snippets. This can be useful ```python from datetime import datetime -from langchain_community.embeddings.openai import OpenAIEmbeddings -from langchain_community.llms import OpenAI +from langchain_openai import OpenAIEmbeddings +from langchain_openai import OpenAI from langchain.memory import VectorStoreRetrieverMemory from langchain.chains import ConversationChain from langchain.prompts import PromptTemplate diff --git a/docs/docs/modules/model_io/chat/chat_model_caching.ipynb b/docs/docs/modules/model_io/chat/chat_model_caching.ipynb index 69aae9aabf..ec4c413b72 100644 --- a/docs/docs/modules/model_io/chat/chat_model_caching.ipynb +++ b/docs/docs/modules/model_io/chat/chat_model_caching.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.globals import set_llm_cache\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()" ] diff --git a/docs/docs/modules/model_io/chat/quick_start.ipynb b/docs/docs/modules/model_io/chat/quick_start.ipynb index e641bf8521..c0307fca1c 100644 --- a/docs/docs/modules/model_io/chat/quick_start.ipynb +++ b/docs/docs/modules/model_io/chat/quick_start.ipynb @@ -45,7 +45,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat = ChatOpenAI(openai_api_key=\"...\")" ] @@ -65,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat = ChatOpenAI()" ] diff --git a/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb b/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb index 8966d0daf4..d334fb8705 100644 --- a/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb +++ b/docs/docs/modules/model_io/chat/token_usage_tracking.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.callbacks import get_openai_callback\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { @@ -102,7 +102,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", "agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True)" diff --git a/docs/docs/modules/model_io/llms/llm_caching.ipynb b/docs/docs/modules/model_io/llms/llm_caching.ipynb index f75fe56625..da384ec602 100644 --- a/docs/docs/modules/model_io/llms/llm_caching.ipynb +++ b/docs/docs/modules/model_io/llms/llm_caching.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.globals import set_llm_cache\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "# To make the caching really obvious, lets use a slower model.\n", "llm = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", n=2, best_of=2)" diff --git a/docs/docs/modules/model_io/llms/quick_start.ipynb b/docs/docs/modules/model_io/llms/quick_start.ipynb index 4da76c35a4..03703381e7 100644 --- a/docs/docs/modules/model_io/llms/quick_start.ipynb +++ b/docs/docs/modules/model_io/llms/quick_start.ipynb @@ -51,7 +51,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(openai_api_key=\"...\")" ] @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI()" ] diff --git a/docs/docs/modules/model_io/llms/streaming_llm.ipynb b/docs/docs/modules/model_io/llms/streaming_llm.ipynb index a3857bfc47..21637e1f96 100644 --- a/docs/docs/modules/model_io/llms/streaming_llm.ipynb +++ b/docs/docs/modules/model_io/llms/streaming_llm.ipynb @@ -71,7 +71,7 @@ } ], "source": [ - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\", temperature=0, max_tokens=512)\n", "for chunk in llm.stream(\"Write me a song about sparkling water.\"):\n", diff --git a/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb b/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb index 6834979425..5bf73d3cea 100644 --- a/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb +++ b/docs/docs/modules/model_io/llms/token_usage_tracking.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "from langchain.callbacks import get_openai_callback\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -102,7 +102,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", diff --git a/docs/docs/modules/model_io/output_parsers/quick_start.ipynb b/docs/docs/modules/model_io/output_parsers/quick_start.ipynb index af3940833e..55e4cc16c6 100644 --- a/docs/docs/modules/model_io/output_parsers/quick_start.ipynb +++ b/docs/docs/modules/model_io/output_parsers/quick_start.ipynb @@ -52,8 +52,8 @@ "source": [ "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", "from langchain_core.pydantic_v1 import BaseModel, Field, validator\n", + "from langchain_openai import OpenAI\n", "\n", "model = OpenAI(model_name=\"gpt-3.5-turbo-instruct\", temperature=0.0)\n", "\n", diff --git a/docs/docs/modules/model_io/output_parsers/types/csv.ipynb b/docs/docs/modules/model_io/output_parsers/types/csv.ipynb index f74112530f..0dbbd732cc 100644 --- a/docs/docs/modules/model_io/output_parsers/types/csv.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/csv.ipynb @@ -19,7 +19,7 @@ "source": [ "from langchain.output_parsers import CommaSeparatedListOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "output_parser = CommaSeparatedListOutputParser()\n", "\n", diff --git a/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb b/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb index 7154365114..b53d80a298 100644 --- a/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/datetime.ipynb @@ -19,7 +19,7 @@ "source": [ "from langchain.output_parsers import DatetimeOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/enum.ipynb b/docs/docs/modules/model_io/output_parsers/types/enum.ipynb index d901600dfe..963b67299a 100644 --- a/docs/docs/modules/model_io/output_parsers/types/enum.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/enum.ipynb @@ -53,8 +53,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.prompts import PromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = PromptTemplate.from_template(\n", " \"\"\"What color eyes does this person have?\n", diff --git a/docs/docs/modules/model_io/output_parsers/types/json.ipynb b/docs/docs/modules/model_io/output_parsers/types/json.ipynb index 83ec4c8e26..f0ed6ce8dd 100644 --- a/docs/docs/modules/model_io/output_parsers/types/json.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/json.ipynb @@ -23,9 +23,9 @@ "from typing import List\n", "\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import JsonOutputParser\n", - "from langchain_core.pydantic_v1 import BaseModel, Field" + "from langchain_core.pydantic_v1 import BaseModel, Field\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/openai_functions.ipynb b/docs/docs/modules/model_io/output_parsers/types/openai_functions.ipynb index ace7fb2aef..fd2f3c024c 100644 --- a/docs/docs/modules/model_io/output_parsers/types/openai_functions.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/openai_functions.ipynb @@ -22,12 +22,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utils.openai_functions import (\n", " convert_pydantic_to_openai_function,\n", ")\n", "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.pydantic_v1 import BaseModel, Field, validator" + "from langchain_core.pydantic_v1 import BaseModel, Field, validator\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb b/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb index 3c41470987..2813ed4fae 100644 --- a/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/output_fixing.ipynb @@ -24,8 +24,8 @@ "from typing import List\n", "\n", "from langchain.output_parsers import PydanticOutputParser\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.pydantic_v1 import BaseModel, Field" + "from langchain_core.pydantic_v1 import BaseModel, Field\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb b/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb index 9daf18b7c6..75810acd97 100644 --- a/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/pandas_dataframe.ipynb @@ -25,7 +25,7 @@ "import pandas as pd\n", "from langchain.output_parsers import PandasDataFrameOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb b/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb index 0dbea9e950..9c3675cf2f 100644 --- a/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/pydantic.ipynb @@ -24,8 +24,8 @@ "\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.pydantic_v1 import BaseModel, Field, validator" + "from langchain_core.pydantic_v1 import BaseModel, Field, validator\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/retry.ipynb b/docs/docs/modules/model_io/output_parsers/types/retry.ipynb index 3773c57b9c..8703a19b64 100644 --- a/docs/docs/modules/model_io/output_parsers/types/retry.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/retry.ipynb @@ -24,8 +24,7 @@ "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import ChatOpenAI, OpenAI\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/modules/model_io/output_parsers/types/structured.ipynb b/docs/docs/modules/model_io/output_parsers/types/structured.ipynb index c78e4f783b..d9fb0cbeff 100644 --- a/docs/docs/modules/model_io/output_parsers/types/structured.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/structured.ipynb @@ -19,7 +19,7 @@ "source": [ "from langchain.output_parsers import ResponseSchema, StructuredOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb b/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb index 6621713c94..02c0d35d64 100644 --- a/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb +++ b/docs/docs/modules/model_io/output_parsers/types/yaml.ipynb @@ -24,8 +24,8 @@ "\n", "from langchain.output_parsers import YamlOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_core.pydantic_v1 import BaseModel, Field" + "from langchain_core.pydantic_v1 import BaseModel, Field\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/prompts/composition.ipynb b/docs/docs/modules/model_io/prompts/composition.ipynb index 0e07dc72ba..0069ca72a7 100644 --- a/docs/docs/modules/model_io/prompts/composition.ipynb +++ b/docs/docs/modules/model_io/prompts/composition.ipynb @@ -102,7 +102,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { @@ -259,7 +259,7 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb b/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb index 7d0323f68f..4a56b13d83 100644 --- a/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selector_types/mmr.ipynb @@ -22,8 +22,8 @@ " MaxMarginalRelevanceExampleSelector,\n", " SemanticSimilarityExampleSelector,\n", ")\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb b/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb index e2d5806508..0d9be3b77b 100644 --- a/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb +++ b/docs/docs/modules/model_io/prompts/example_selector_types/similarity.ipynb @@ -19,8 +19,8 @@ "source": [ "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "example_prompt = PromptTemplate(\n", " input_variables=[\"input\", \"output\"],\n", diff --git a/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb b/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb index b0642a0ce0..1fd69e1a29 100644 --- a/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb +++ b/docs/docs/modules/model_io/prompts/few_shot_examples.ipynb @@ -244,8 +244,8 @@ ], "source": [ "from langchain.prompts.example_selector import SemanticSimilarityExampleSelector\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "example_selector = SemanticSimilarityExampleSelector.from_examples(\n", " # This is the list of examples available to select from.\n", diff --git a/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb b/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb index 7ce7cdffd5..909e43c3d9 100644 --- a/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb +++ b/docs/docs/modules/model_io/prompts/few_shot_examples_chat.ipynb @@ -192,8 +192,8 @@ "outputs": [], "source": [ "from langchain.prompts import SemanticSimilarityExampleSelector\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/model_io/prompts/quick_start.ipynb b/docs/docs/modules/model_io/prompts/quick_start.ipynb index a6acec3357..cb3887db32 100644 --- a/docs/docs/modules/model_io/prompts/quick_start.ipynb +++ b/docs/docs/modules/model_io/prompts/quick_start.ipynb @@ -120,7 +120,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "chat_template = ChatPromptTemplate.from_messages(\n", " [\n", @@ -161,8 +161,8 @@ ], "source": [ "from langchain.prompts import HumanMessagePromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.messages import SystemMessage\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat_template = ChatPromptTemplate.from_messages(\n", " [\n", diff --git a/docs/docs/modules/model_io/quick_start.mdx b/docs/docs/modules/model_io/quick_start.mdx index 1bee32bff7..931e6d0785 100644 --- a/docs/docs/modules/model_io/quick_start.mdx +++ b/docs/docs/modules/model_io/quick_start.mdx @@ -31,8 +31,8 @@ export OPENAI_API_KEY="..." We can then initialize the model: ```python -from langchain_community.chat_models import ChatOpenAI -from langchain_community.llms import OpenAI +from langchain_openai import ChatOpenAI +from langchain_openai import OpenAI llm = OpenAI() chat_model = ChatOpenAI() @@ -41,7 +41,7 @@ chat_model = ChatOpenAI() If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain_community.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI(openai_api_key="...") ``` diff --git a/docs/docs/use_cases/apis.ipynb b/docs/docs/use_cases/apis.ipynb index dc3207ff89..a752d70793 100644 --- a/docs/docs/use_cases/apis.ipynb +++ b/docs/docs/use_cases/apis.ipynb @@ -236,7 +236,7 @@ "source": [ "from langchain.chains import APIChain\n", "from langchain.chains.api import open_meteo_docs\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "chain = APIChain.from_llm_and_api_docs(\n", @@ -345,7 +345,7 @@ "\n", "from langchain.chains import APIChain\n", "from langchain.chains.api import podcast_docs\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "listen_api_key = \"xxx\" # Get api key here: https://www.listennotes.com/api/pricing/\n", "llm = OpenAI(temperature=0)\n", @@ -381,7 +381,7 @@ "source": [ "from langchain.chains import LLMChain, LLMRequestsChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/use_cases/chatbots.ipynb b/docs/docs/use_cases/chatbots.ipynb index f26456a28f..0848be6f4d 100644 --- a/docs/docs/use_cases/chatbots.ipynb +++ b/docs/docs/use_cases/chatbots.ipynb @@ -95,7 +95,7 @@ ], "source": [ "from langchain.schema import HumanMessage, SystemMessage\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat = ChatOpenAI()\n", "chat(\n", @@ -311,7 +311,7 @@ "outputs": [], "source": [ "from langchain.memory import ConversationSummaryMemory\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationSummaryMemory(llm=llm)\n", @@ -615,8 +615,8 @@ "text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)\n", "all_splits = text_splitter.split_documents(data)\n", "\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())" ] @@ -649,7 +649,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI()\n", "retriever = vectorstore.as_retriever()\n", diff --git a/docs/docs/use_cases/code_understanding.ipynb b/docs/docs/use_cases/code_understanding.ipynb index ba8c44ec60..ee546dac09 100644 --- a/docs/docs/use_cases/code_understanding.ipynb +++ b/docs/docs/use_cases/code_understanding.ipynb @@ -200,8 +200,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "db = Chroma.from_documents(texts, OpenAIEmbeddings(disallowed_special=()))\n", "retriever = db.as_retriever(\n", @@ -233,7 +233,7 @@ "source": [ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.memory import ConversationSummaryMemory\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\")\n", "memory = ConversationSummaryMemory(\n", diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index f126739518..07d6acba48 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -65,7 +65,6 @@ "# dotenv.load_dotenv()\n", "\n", "from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_core.pydantic_v1 import BaseModel\n", "from langchain_experimental.tabular_synthetic_data.openai import (\n", " OPENAI_TEMPLATE,\n", @@ -74,7 +73,8 @@ "from langchain_experimental.tabular_synthetic_data.prompts import (\n", " SYNTHETIC_FEW_SHOT_PREFIX,\n", " SYNTHETIC_FEW_SHOT_SUFFIX,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -252,11 +252,11 @@ }, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_experimental.synthetic_data import (\n", " DatasetGenerator,\n", " create_data_generation_chain,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -493,7 +493,7 @@ "from langchain.chains import create_extraction_chain_pydantic\n", "from langchain.output_parsers import PydanticOutputParser\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from pydantic import BaseModel, Field" ] }, diff --git a/docs/docs/use_cases/extraction.ipynb b/docs/docs/use_cases/extraction.ipynb index 9c58dcd13e..d3b53ec04b 100644 --- a/docs/docs/use_cases/extraction.ipynb +++ b/docs/docs/use_cases/extraction.ipynb @@ -105,7 +105,7 @@ ], "source": [ "from langchain.chains import create_extraction_chain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Schema\n", "schema = {\n", @@ -452,7 +452,7 @@ "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from pydantic import BaseModel, Field, validator\n", "\n", "\n", @@ -531,7 +531,7 @@ "from langchain.prompts import (\n", " PromptTemplate,\n", ")\n", - "from langchain_community.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "from pydantic import BaseModel, Field, validator\n", "\n", "\n", diff --git a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb index eae3b9f7d1..518e593582 100644 --- a/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb +++ b/docs/docs/use_cases/graph/diffbot_graphtransformer.ipynb @@ -186,7 +186,7 @@ "outputs": [], "source": [ "from langchain.chains import GraphCypherQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chain = GraphCypherQAChain.from_llm(\n", " cypher_llm=ChatOpenAI(temperature=0, model_name=\"gpt-4\"),\n", diff --git a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb index 3a10578b22..81d5be2c69 100644 --- a/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_arangodb_qa.ipynb @@ -426,7 +426,7 @@ "outputs": [], "source": [ "from langchain.chains import ArangoGraphQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chain = ArangoGraphQAChain.from_llm(\n", " ChatOpenAI(temperature=0), graph=graph, verbose=True\n", diff --git a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb index 8c86bbfbf1..6cf8c168c2 100644 --- a/docs/docs/use_cases/graph/graph_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_cypher_qa.ipynb @@ -39,8 +39,8 @@ "outputs": [], "source": [ "from langchain.chains import GraphCypherQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import Neo4jGraph" + "from langchain_community.graphs import Neo4jGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb index 57361ac98a..346f8be3f7 100644 --- a/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_falkordb_qa.ipynb @@ -29,8 +29,8 @@ "outputs": [], "source": [ "from langchain.chains import FalkorDBQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import FalkorDBGraph" + "from langchain_community.graphs import FalkorDBGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb index bcc5f12424..9101cd3492 100644 --- a/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_hugegraph_qa.ipynb @@ -156,8 +156,8 @@ "outputs": [], "source": [ "from langchain.chains import HugeGraphQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import HugeGraph" + "from langchain_community.graphs import HugeGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb index 6c50c8354d..560d897c8b 100644 --- a/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_kuzu_qa.ipynb @@ -131,8 +131,8 @@ "outputs": [], "source": [ "from langchain.chains import KuzuQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import KuzuGraph" + "from langchain_community.graphs import KuzuGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb index 7a5612d4b3..29b6489b62 100644 --- a/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_memgraph_qa.ipynb @@ -69,8 +69,8 @@ "from gqlalchemy import Memgraph\n", "from langchain.chains import GraphCypherQAChain\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import MemgraphGraph" + "from langchain_community.graphs import MemgraphGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb index d1b1d5b91a..198de0d63b 100644 --- a/docs/docs/use_cases/graph/graph_nebula_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_nebula_qa.ipynb @@ -122,8 +122,8 @@ "outputs": [], "source": [ "from langchain.chains import NebulaGraphQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import NebulaGraph" + "from langchain_community.graphs import NebulaGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_networkx_qa.ipynb b/docs/docs/use_cases/graph/graph_networkx_qa.ipynb index e7e95db808..eb2c676ae2 100644 --- a/docs/docs/use_cases/graph/graph_networkx_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_networkx_qa.ipynb @@ -50,7 +50,7 @@ "outputs": [], "source": [ "from langchain.indexes import GraphIndexCreator\n", - "from langchain_community.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb index 714181967c..b7541f4388 100644 --- a/docs/docs/use_cases/graph/graph_sparql_qa.ipynb +++ b/docs/docs/use_cases/graph/graph_sparql_qa.ipynb @@ -31,8 +31,8 @@ "outputs": [], "source": [ "from langchain.chains import GraphSparqlQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.graphs import RdfGraph" + "from langchain_community.graphs import RdfGraph\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb index 7e026b371a..0d1218e307 100644 --- a/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb +++ b/docs/docs/use_cases/graph/neptune_cypher_qa.ipynb @@ -41,7 +41,7 @@ ], "source": [ "from langchain.chains import NeptuneOpenCypherQAChain\n", - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-4\")\n", "\n", diff --git a/docs/docs/use_cases/qa_structured/sql.ipynb b/docs/docs/use_cases/qa_structured/sql.ipynb index 6e0a469105..c189c8c87e 100644 --- a/docs/docs/use_cases/qa_structured/sql.ipynb +++ b/docs/docs/use_cases/qa_structured/sql.ipynb @@ -85,9 +85,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", "from langchain_community.utilities import SQLDatabase\n", "from langchain_experimental.sql import SQLDatabaseChain\n", + "from langchain_openai import OpenAI\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = OpenAI(temperature=0, verbose=True)\n", @@ -161,7 +161,7 @@ "outputs": [], "source": [ "from langchain.chains import create_sql_query_chain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { @@ -322,8 +322,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.llms import OpenAI\n", "from langchain_experimental.sql import SQLDatabaseChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0, verbose=True)\n", "db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)" @@ -783,8 +783,8 @@ "outputs": [], "source": [ "from langchain.schema import Document\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", @@ -837,8 +837,8 @@ "source": [ "from langchain.agents import AgentType, create_sql_agent\n", "from langchain_community.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utilities import SQLDatabase\n", + "from langchain_openai import ChatOpenAI\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", @@ -970,8 +970,8 @@ "outputs": [], "source": [ "from langchain_community.agent_toolkits import create_retriever_tool\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "texts = artists + albums\n", "\n", @@ -996,8 +996,8 @@ "source": [ "from langchain.agents import AgentType, create_sql_agent\n", "from langchain_community.agent_toolkits import SQLDatabaseToolkit\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.utilities import SQLDatabase\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# db = SQLDatabase.from_uri(\"sqlite:///Chinook.db\")\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", @@ -1160,7 +1160,7 @@ "source": [ "from elasticsearch import Elasticsearch\n", "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/question_answering/chat_history.ipynb b/docs/docs/use_cases/question_answering/chat_history.ipynb index 0394bdc90f..a83346da0b 100644 --- a/docs/docs/use_cases/question_answering/chat_history.ipynb +++ b/docs/docs/use_cases/question_answering/chat_history.ipynb @@ -119,12 +119,11 @@ "import bs4\n", "from langchain import hub\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb index 5fea5317a4..afcf7b0d20 100644 --- a/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb +++ b/docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb @@ -53,8 +53,8 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", "texts = text_splitter.split_documents(documents)\n", @@ -146,7 +146,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)" ] diff --git a/docs/docs/use_cases/question_answering/per_user.ipynb b/docs/docs/use_cases/question_answering/per_user.ipynb index cb2b9c32a1..cfed58857e 100644 --- a/docs/docs/use_cases/question_answering/per_user.ipynb +++ b/docs/docs/use_cases/question_answering/per_user.ipynb @@ -55,8 +55,8 @@ ], "source": [ "import pinecone\n", - "from langchain_community.embeddings.openai import OpenAIEmbeddings\n", - "from langchain_community.vectorstores import Pinecone" + "from langchain_community.vectorstores import Pinecone\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -159,16 +159,15 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_community.chat_models import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import (\n", " ConfigurableField,\n", " RunnableBinding,\n", " RunnableLambda,\n", " RunnablePassthrough,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/use_cases/question_answering/quickstart.ipynb b/docs/docs/use_cases/question_answering/quickstart.ipynb index b24cd49225..43e3729c30 100644 --- a/docs/docs/use_cases/question_answering/quickstart.ipynb +++ b/docs/docs/use_cases/question_answering/quickstart.ipynb @@ -152,12 +152,11 @@ "import bs4\n", "from langchain import hub\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { @@ -479,8 +478,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())" ] @@ -615,7 +614,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/docs/docs/use_cases/question_answering/sources.ipynb b/docs/docs/use_cases/question_answering/sources.ipynb index 25f9dd587d..30ff195962 100644 --- a/docs/docs/use_cases/question_answering/sources.ipynb +++ b/docs/docs/use_cases/question_answering/sources.ipynb @@ -114,12 +114,11 @@ "import bs4\n", "from langchain import hub\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/use_cases/question_answering/streaming.ipynb b/docs/docs/use_cases/question_answering/streaming.ipynb index 0e3504ffde..7ac099b630 100644 --- a/docs/docs/use_cases/question_answering/streaming.ipynb +++ b/docs/docs/use_cases/question_answering/streaming.ipynb @@ -114,12 +114,11 @@ "import bs4\n", "from langchain import hub\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.vectorstores import Chroma\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableParallel, RunnablePassthrough" + "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/use_cases/summarization.ipynb b/docs/docs/use_cases/summarization.ipynb index f0e1799317..01885029c1 100644 --- a/docs/docs/use_cases/summarization.ipynb +++ b/docs/docs/use_cases/summarization.ipynb @@ -206,8 +206,8 @@ ], "source": [ "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain_community.chat_models import ChatOpenAI\n", "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_openai import ChatOpenAI\n", "\n", "loader = WebBaseLoader(\"https://lilianweng.github.io/posts/2023-06-23-agent/\")\n", "docs = loader.load()\n", diff --git a/docs/docs/use_cases/tagging.ipynb b/docs/docs/use_cases/tagging.ipynb index 758c364ada..9a4e34d6d5 100644 --- a/docs/docs/use_cases/tagging.ipynb +++ b/docs/docs/use_cases/tagging.ipynb @@ -64,7 +64,7 @@ "outputs": [], "source": [ "from langchain.chains import create_tagging_chain, create_tagging_chain_pydantic\n", - "from langchain_community.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/use_cases/web_scraping.ipynb b/docs/docs/use_cases/web_scraping.ipynb index 327d4bb71a..154c3386ba 100644 --- a/docs/docs/use_cases/web_scraping.ipynb +++ b/docs/docs/use_cases/web_scraping.ipynb @@ -263,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")" ] @@ -480,10 +480,9 @@ "outputs": [], "source": [ "from langchain.retrievers.web_research import WebResearchRetriever\n", - "from langchain_community.chat_models.openai import ChatOpenAI\n", - "from langchain_community.embeddings import OpenAIEmbeddings\n", "from langchain_community.utilities import GoogleSearchAPIWrapper\n", - "from langchain_community.vectorstores import Chroma" + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/libs/cli/langchain_cli/package_template/package_template/chain.py b/libs/cli/langchain_cli/package_template/package_template/chain.py index 61d4022a40..f5a8ebc690 100644 --- a/libs/cli/langchain_cli/package_template/package_template/chain.py +++ b/libs/cli/langchain_cli/package_template/package_template/chain.py @@ -1,5 +1,5 @@ -from langchain_community.chat_models import ChatOpenAI -from langchain.prompts import ChatPromptTemplate +from langchain_core.prompts import ChatPromptTemplate +from langchain_openai import ChatOpenAI _prompt = ChatPromptTemplate.from_messages( [ diff --git a/libs/cli/langchain_cli/package_template/pyproject.toml b/libs/cli/langchain_cli/package_template/pyproject.toml index 937c4a16a2..0c665e9feb 100644 --- a/libs/cli/langchain_cli/package_template/pyproject.toml +++ b/libs/cli/langchain_cli/package_template/pyproject.toml @@ -7,8 +7,9 @@ readme = "README.md" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -langchain = ">=0.0.313, <0.1" -openai = "^0.28.1" +langchain-core = ">=0.1.5" +langchain-openai = ">=0.0.1" + [tool.poetry.group.dev.dependencies] langchain-cli = ">=0.0.4" diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 5438dd7f04..8243b9df2e 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -1523,7 +1523,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): .. code-block:: python from langchain_core.output_parsers.json import SimpleJsonOutputParser - from langchain.chat_models.openai import ChatOpenAI + from langchain_openai import ChatOpenAI prompt = PromptTemplate.from_template( 'In JSON format, give me a list of {topic} and their ' @@ -2985,6 +2985,7 @@ class RunnableLambda(Runnable[Input, Output]): except TypeError: output = chunk return cast(Output, output) + else: def func( @@ -3940,7 +3941,7 @@ def chain( from langchain_core.runnables import chain from langchain_core.prompts import PromptTemplate - from langchain.llms import OpenAI + from langchain_openai import OpenAI @chain def my_func(fields):