-
libraryIO的链接:https://libraries.io/pypi/langchain-decorators
-
来colab玩玩它的demo
-
感觉这确实是个挺好用的库
- 想到之前纯调prompt来控制输出格式的痛苦,这个可太有效了
- 想到之前纯调prompt来控制输出格式的痛苦,这个可太有效了
-
cool~
-
最下面这个react的多智能体例子很好玩,来看看!
from typing import List from langchain_decorators import llm_prompt from langchain.agents import load_tools from langchain.tools.base import BaseTool from textwrap import dedent from langchain_decorators import PromptTypes from langchain_decorators.output_parsers import JsonOutputParser import json tools = load_tools([ "llm-math"], llm=GlobalSettings.get_current_settings().default_llm) # you may, or may not use pydantic as your base class... totally up to you class MultilingualAgent: def __init__(self, tools:List[BaseTool],result_language:str=None) -> None: self.tools = tools # we can refer to our field in all out prompts self.result_language = result_language self.agent_scratchpad = "" # we initialize our scratchpad self.feedback = "" # we initialize our feedback if we get some error # other settings self.iterations=10 self.agent_format_instructions = dedent("""\ # Reasoning ... write your reasoning here ... # Tool ```json {{ "tool": name of the tool to use, "tool_input": the input for the tool }} ``` # Observation output from the tool ... repeat this # Reasoning, # Tool, # Observation sequence multiple times until you know the final answer, when you write: # Final answer ... write the final answer """) @property def tools_description(self)->str: # we can refer to properties in out prompts too return "\n".join([f" - {tool.name}: {tool.description}" for tool in self.tools]) # we defined prompt type here, which will make @llm_prompt(prompt_type=PromptTypes.AGENT_REASONING, output_parser="markdown", stop_tokens=["Observation"], verbose=True) def reason(self)->dict: """ The system prompt: ```<prompt:system> You are an assistant that uses reasoning and tools to help user. You use tools for the task the tool is designed to. Before answering the question and/or using the tool, you should write down the explanation. Here is the list of tools available: {tools_description} Use this format: {agent_format_instructions}{? in {result_language}?} here ...{? Make sure to write the final answer in in {result_language}!?} ``` User question: ```<prompt:user> {question} ``` Scratchpad: ```<prompt:assistant> {agent_scratchpad} ``` ```<prompt:user> {feedback} ``` """ return def act(self, tool_name:str, tool_input:str)->str: tool = next((tool for tool in self.tools if tool.name.lower()==tool_name.lower()==tool_name.lower())) if tool is None: self.feedback = f"Tool {tool_name} is not available. Available tools are: {self.tools_description}" return else: try: result = tool.run(tool_input) except Exception as e: if self.feedback is not None: # we've already experienced an error, so we are not going to try forever... let's raise this one raise e self.feedback = f"Tool {tool_name} failed with error: {e}.\nLet's fix it and try again." tool_instructions = json.dumps({"tool":tool.name, "tool_input":tool_input}) self.agent_scratchpad += f"# Tool\n```json\n{tool_instructions}\n```\n# Observation\n\nResult from tool {tool_name}:\n\t{result}\n" def run(self, question): for i in range(self.iterations): reasoning = self.reason(question=question) if reasoning.get("Final answer") is not None: return reasoning.get("Final answer") else: tool_info = reasoning.get("Tool") tool_name, tool_input = (None, None) if tool_info: tool_info_parsed = JsonOutputParser().parse(tool_info) tool_name = tool_info_parsed.get("tool") tool_input = tool_info_parsed.get("tool_input") if tool_name is None or tool_input is None: self.feedback = "Your response was not in the expected format. Please make sure to response in correct format:\n" + self.agent_format_instructions continue self.act(tool_name, tool_input) raise Exception(f"Failed to answer the question after {self.iterations} iterations. Last result: {reasoning}") agent = MultilingualAgent(tools=tools, result_language="German" ) result = agent.run("What is the surface of a sphere with radius with diameter of 100km?") print("\n\nHere is the agent's answer:", result)
-
下面是输出
-
这个包确实能带来不少方便~