침조 : https://python.langchain.com/docs/modules/model_io/output_parsers/
CSV parser
from langchain.output_parsers import CommaSeparatedListOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
output_parser = CommaSeparatedListOutputParser()
format_instructions = output_parser.get_format_instructions()
prompt = PromptTemplate(
template="List five {subject}.\n{format_instructions}",
input_variables=["subject"],
partial_variables={"format_instructions": format_instructions},
)
model = ChatOpenAI(temperature=0)
chain = prompt | model | output_parser
result = chain.invoke({"subject": "ice cream flavors"})
print('Output>', result)
print('Output>')
for s in chain.stream({"subject": "ice cream flavors"}):
print(s)
Output> ['Vanilla', 'Chocolate', 'Strawberry', 'Mint Chocolate Chip', 'Cookies and Cream']
Output>
['Vanilla']
['Chocolate']
['Strawberry']
['Mint Chocolate Chip']
['Cookies and Cream']
Datetime parser
from langchain.output_parsers import DatetimeOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
output_parser = DatetimeOutputParser()
template = """Answer the users question:
{question}
{format_instructions}"""
prompt = PromptTemplate.from_template(
template,
partial_variables={"format_instructions": output_parser.get_format_instructions()},
)
print('Output>', prompt)
chain = prompt | OpenAI() | output_parser
output = chain.invoke({"question": "when was bitcoin founded?"})
print('Output>', output)
Output> input_variables=['question'] partial_variables={'format_instructions': "Write a datetime string that matches the following pattern: '%Y-%m-%dT%H:%M:%S.%fZ'.\n\nExamples: 1725-03-05T17:26:45.931549Z, 854-01-11T20:07:28.505493Z, 1304-03-29T21:09:39.872406Z\n\nReturn ONLY this string, no other words!"} template='Answer the users question:\n\n{question}\n\n{format_instructions}'
Output> 2009-01-03 18:15:05
Json parser
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
# Define your desired data structure.
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
# And a query intented to prompt a language model to populate the data structure.
joke_query = "Tell me a joke."
# Set up a parser + inject instructions into the prompt template.
parser = JsonOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
chain = prompt | model | parser
result = chain.invoke({"query": joke_query})
print('Output>', result)
Output> {'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'Because it was two tired!'}
OpenAI Functions
from langchain_community.utils.openai_functions import (
convert_pydantic_to_openai_function,
)
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
openai_functions = [convert_pydantic_to_openai_function(Joke)]
model = ChatOpenAI(temperature=0)
prompt = ChatPromptTemplate.from_messages(
[("system", "You are helpful assistant"), ("user", "{input}")]
)
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
parser = JsonOutputFunctionsParser()
chain = prompt | model.bind(functions=openai_functions) | parser
result = chain.invoke({"input": "tell me a joke"})
print('Output>', result)
print('Output>')
for s in chain.stream({"input": "tell me a joke"}):
print(s)
/DATA/venv/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `convert_pydantic_to_openai_function` was deprecated in LangChain 0.1.16 and will be removed in 0.2.0. Use langchain_core.utils.function_calling.convert_to_openai_function() instead.
warn_deprecated(
Output> {'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'It was two tired!'}
Output>
{}
{'setup': ''}
{'setup': 'Why'}
{'setup': 'Why couldn'}
{'setup': "Why couldn't"}
{'setup': "Why couldn't the"}
{'setup': "Why couldn't the bicycle"}
{'setup': "Why couldn't the bicycle stand"}
{'setup': "Why couldn't the bicycle stand up"}
{'setup': "Why couldn't the bicycle stand up by"}
{'setup': "Why couldn't the bicycle stand up by itself"}
{'setup': "Why couldn't the bicycle stand up by itself?"}
{'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': ''}
{'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'It'}
{'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'It was'}
{'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'It was two'}
{'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'It was two tired'}
{'setup': "Why couldn't the bicycle stand up by itself?", 'punchline': 'It was two tired!'}
'ML&DL and LLM' 카테고리의 다른 글
LangChain - 2.2 Document loaders (0) | 2024.04.01 |
---|---|
LangChain - 2.1 Retrieval concept (1) | 2024.03.29 |
LangChain - 1.3.1 LLM QuickStart (0) | 2024.03.28 |
LangChain - 1.2.5 MessagePromptTemplate (0) | 2024.03.28 |
LangChain - 1.2.4 Few-shot prompt template (0) | 2024.03.28 |