How to call tools in parallel
In the Chains with multiple tools guide we saw how to build function-calling chains that select between multiple tools. Some models, like the OpenAI models released in Fall 2023, also support parallel function calling, which allows you to invoke multiple functions (or the same function multiple times) in a single model call. Our previous chain from the multiple tools guides actually already supports this.
Setupβ
We'll need to install the following packages for this guide:
%pip install --upgrade --quiet langchain-core
If you'd like to trace your runs in LangSmith uncomment and set the following environment variables:
import getpass
import os
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()
Toolsβ
from langchain_core.tools import tool
@tool
def multiply(first_int: int, second_int: int) -> int:
"""Multiply two integers together."""
return first_int * second_int
@tool
def add(first_int: int, second_int: int) -> int:
"Add two integers."
return first_int + second_int
@tool
def exponentiate(base: int, exponent: int) -> int:
"Exponentiate the base to the exponent power."
return base**exponent
API Reference:
Chain
- OpenAI
- Anthropic
- Cohere
- FireworksAI
- MistralAI
- TogetherAI
pip install -qU langchain-openai
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
pip install -qU langchain-anthropic
import getpass
import os
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229")
pip install -qU langchain-cohere
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass()
from langchain_cohere import ChatCohere
llm = ChatCohere(model="command-r")
pip install -qU langchain-fireworks
import getpass
import os
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
from langchain_fireworks import ChatFireworks
llm = ChatFireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
pip install -qU langchain-mistralai
import getpass
import os
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
from langchain_mistralai import ChatMistralAI
llm = ChatMistralAI(model="mistral-large-latest")
pip install -qU langchain-openai
import getpass
import os
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_API_KEY"],
model="mistralai/Mixtral-8x7B-Instruct-v0.1",)
from operator import itemgetter
from typing import Dict, List, Union
from langchain_core.messages import AIMessage
from langchain_core.runnables import (
Runnable,
RunnableLambda,
RunnableMap,
RunnablePassthrough,
)
tools = [multiply, exponentiate, add]
llm_with_tools = llm.bind_tools(tools)
tool_map = {tool.name: tool for tool in tools}
def call_tools(msg: AIMessage) -> Runnable:
"""Simple sequential tool calling helper."""
tool_map = {tool.name: tool for tool in tools}
tool_calls = msg.tool_calls.copy()
for tool_call in tool_calls:
tool_call["output"] = tool_map[tool_call["name"]].invoke(tool_call["args"])
return tool_calls
chain = llm_with_tools | call_tools
chain.invoke(
"What's 23 times 7, and what's five times 18 and add a million plus a billion and cube thirty-seven"
)
[{'name': 'multiply',
'args': {'first_int': 23, 'second_int': 7},
'id': 'call_22tgOrsVLyLMsl2RLbUhtycw',
'output': 161},
{'name': 'multiply',
'args': {'first_int': 5, 'second_int': 18},
'id': 'call_EbKHEG3TjqBhEwb7aoxUtgzf',
'output': 90},
{'name': 'add',
'args': {'first_int': 1000000, 'second_int': 1000000000},
'id': 'call_LUhu2IT3vINxlTc5fCVY6Nhi',
'output': 1001000000},
{'name': 'exponentiate',
'args': {'base': 37, 'exponent': 3},
'id': 'call_bnCZIXelOKkmcyd4uGXId9Ct',
'output': 50653}]