You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
94 lines
2.3 KiB
Python
94 lines
2.3 KiB
Python
2 months ago
|
#!/usr/bin/env python3
|
||
|
# vim: set expandtab sw=4 ts=4 sts=4 foldmethod=indent filetype=python:
|
||
|
|
||
|
import asyncio
|
||
|
import os
|
||
|
from langchain_core.output_parsers import StrOutputParser
|
||
|
from langchain_core.runnables import RunnablePassthrough
|
||
|
from langchain_openai import ChatOpenAI
|
||
|
|
||
|
os.environ['OPENAI_API_KEY'] = '2708b7c21129e408899d5a38e6d1af8d'
|
||
|
os.environ['OPENAI_API_BASE'] = 'http://localai.srvlan:8080'
|
||
|
|
||
|
|
||
|
#|%%--%%| <9871Wi18GN|qMj6mA5jLr>
|
||
|
r"""°°°
|
||
|
# Basic invokation
|
||
|
°°°"""
|
||
|
|
||
|
#|%%--%%| <qMj6mA5jLr|LxJxvWuF27>
|
||
|
|
||
|
llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.2, max_tokens=100)
|
||
|
|
||
|
|
||
|
#|%%--%%| <LxJxvWuF27|eTnVsVGPqG>
|
||
|
|
||
|
llm.invoke("how can you help me ?")
|
||
|
|
||
|
#|%%--%%| <eTnVsVGPqG|ChQEPJ4k0e>
|
||
|
r"""°°°
|
||
|
# Using prompt templates
|
||
|
°°°"""
|
||
|
#|%%--%%| <ChQEPJ4k0e|Tl3tcWLlrF>
|
||
|
|
||
|
from langchain_core.prompts import ChatPromptTemplate
|
||
|
|
||
|
prompt = ChatPromptTemplate.from_messages([
|
||
|
("system", "You are a genius DIY maker assistant."),
|
||
|
("user", "{input}")
|
||
|
])
|
||
|
|
||
|
|
||
|
# combine llm and prompt to create a chain
|
||
|
|
||
|
chain = prompt | llm
|
||
|
|
||
|
chain.invoke(dict(input="what are the best adhesives to bind metal to wood ?"))
|
||
|
|
||
|
|
||
|
#|%%--%%| <Tl3tcWLlrF|z6Tf8SVZ6W>
|
||
|
r"""°°°
|
||
|
# Creating a chain
|
||
|
|
||
|
- Every element in chain implements Runnable interface (invoke, stream, ainvoke, batch ...)
|
||
|
°°°"""
|
||
|
#|%%--%%| <z6Tf8SVZ6W|qMHVOmTbk0>
|
||
|
|
||
|
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
|
||
|
output_parser = StrOutputParser()
|
||
|
llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.6, max_tokens=100)
|
||
|
chain = (
|
||
|
{"topic": RunnablePassthrough()}
|
||
|
| prompt
|
||
|
| llm
|
||
|
| output_parser
|
||
|
)
|
||
|
|
||
|
chain.invoke("ice cream")
|
||
|
|
||
|
#|%%--%%| <qMHVOmTbk0|RV24rYEjeh>
|
||
|
|
||
|
|
||
|
prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}")
|
||
|
output_parser = StrOutputParser()
|
||
|
llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.6, max_tokens=100)
|
||
|
chain = (
|
||
|
{"topic": RunnablePassthrough()}
|
||
|
| prompt
|
||
|
| llm
|
||
|
| output_parser
|
||
|
)
|
||
|
|
||
|
chain.invoke("ice cream")
|
||
|
|
||
|
#|%%--%%| <RV24rYEjeh|RDvrxXtUVe>
|
||
|
r"""°°°
|
||
|
# Streaming the response
|
||
|
https://python.langchain.com/docs/expression_language/streaming
|
||
|
°°°"""
|
||
|
#|%%--%%| <RDvrxXtUVe|arP98HJHVd>
|
||
|
|
||
|
async for chunk in chain.astream(dict(input="how can I bind metal to plastic ?")):
|
||
|
print(chunk.content, end="", flush=True)
|
||
|
|