Cohere Tools on LangChain (Integration Guide)

Cohere supports various integrations with LangChain, a large language model (LLM) framework which allows you to quickly create applications based on Cohere’s models. This doc will guide you through how to leverage Cohere tools with LangChain.

Prerequisites

Running Cohere tools with LangChain doesn’t require many prerequisites, consult the top-level document for more information.

Multi-Step Tool Use

Multi-step is enabled by default. Here’s an example of using it to put together a simple agent:

PYTHON
1from langchain.agents import AgentExecutor
2from langchain_cohere.react_multi_hop.agent import (
3 create_cohere_react_agent,
4)
5from langchain_core.prompts import ChatPromptTemplate
6from langchain_cohere import ChatCohere
7
8from langchain_community.tools.tavily_search import (
9 TavilySearchResults,
10)
11from pydantic import BaseModel, Field
12
13os.environ["TAVILY_API_KEY"] = "TAVILY_API_KEY"
14
15internet_search = TavilySearchResults()
16internet_search.name = "internet_search"
17internet_search.description = "Returns a list of relevant document snippets for a textual query retrieved from the internet."
18
19
20class TavilySearchInput(BaseModel):
21 query: str = Field(
22 description="Query to search the internet with"
23 )
24
25
26internet_search.args_schema = TavilySearchInput
27
28# Define the Cohere LLM
29llm = ChatCohere(
30 cohere_api_key="COHERE_API_KEY",
31 model="command-r-plus-08-2024",
32 temperature=0,
33)
34# Preamble
35preamble = """
36You are an expert who answers the user's question with the most relevant datasource. You are equipped with an internet search tool and a special vectorstore of information about how to write good essays.
37"""
38
39# Prompt template
40prompt = ChatPromptTemplate.from_template("{input}")
41
42# Create the ReAct agent
43agent = create_cohere_react_agent(
44 llm=llm,
45 tools=[internet_search],
46 prompt=prompt,
47)
48
49agent_executor = AgentExecutor(
50 agent=agent, tools=[internet_search], verbose=True
51)
52
53response = agent_executor.invoke(
54 {
55 "input": "Who is the mayor of the capital of Ontario",
56 "preamble": preamble,
57 }
58)
59
60print(response["output"])

Single-Step Tool Use

In order to utilize single-step mode, you have to set force_single_step=True. Here’s an example of using it to answer a few questions:

PYTHON
1from langchain_cohere import ChatCohere
2from langchain_core.messages import HumanMessage
3from pydantic import BaseModel, Field
4
5
6# Data model
7class web_search(BaseModel):
8 """
9 The internet. Use web_search for questions that are related to anything else than agents, prompt engineering, and adversarial attacks.
10 """
11
12 query: str = Field(
13 description="The query to use when searching the internet."
14 )
15
16
17class vectorstore(BaseModel):
18 """
19 A vectorstore containing documents related to agents, prompt engineering, and adversarial attacks. Use the vectorstore for questions on these topics.
20 """
21
22 query: str = Field(
23 description="The query to use when searching the vectorstore."
24 )
25
26
27# Preamble
28preamble = """You are an expert at routing a user question to a vectorstore or web search.
29The vectorstore contains documents related to agents, prompt engineering, and adversarial attacks.
30Use the vectorstore for questions on these topics. Otherwise, use web-search."""
31
32# LLM with tool use and preamble
33# Define the Cohere LLM
34llm = ChatCohere(
35 cohere_api_key="COHERE_API_KEY", model="command-r-plus-08-2024"
36)
37
38llm_with_tools = llm.bind_tools(
39 tools=[web_search, vectorstore], preamble=preamble
40)
41
42messages = [
43 HumanMessage("Who will the Bears draft first in the NFL draft?")
44]
45response = llm_with_tools.invoke(messages, force_single_step=True)
46print(response.response_metadata["tool_calls"])
47
48messages = [HumanMessage("What are the types of agent memory?")]
49response = llm_with_tools.invoke(messages, force_single_step=True)
50print(response.response_metadata["tool_calls"])
51
52messages = [HumanMessage("Hi, How are you?")]
53response = llm_with_tools.invoke(messages, force_single_step=True)
54print("tool_calls" in response.response_metadata)

SQL Agent

LangChain’s SQL Agent abstraction provides a flexible way of interacting with SQL Databases. This can be accessed via the create_sql_agent constructor.

PYTHON
1from langchain_cohere import ChatCohere, create_sql_agent
2from langchain_community.utilities import SQLDatabase
3import urllib.request
4import pandas as pd
5import sqlite3
6
7# Download the Chinook SQLite database
8url = "https://github.com/lerocha/chinook-database/raw/master/ChinookDatabase/DataSources/Chinook_Sqlite.sqlite"
9urllib.request.urlretrieve(url, "Chinook.db")
10print("Chinook database downloaded successfully.")
11
12db = SQLDatabase.from_uri("sqlite:///Chinook.db")
13print(db.dialect)
14print(db.get_usable_table_names())
15db.run("SELECT * FROM Artist LIMIT 10;")
16
17# Define the Cohere LLM
18llm = ChatCohere(
19 cohere_api_key="COHERE_API_KEY",
20 model="command-r-plus-08-2024",
21 temperature=0,
22)
23
24agent_executor = create_sql_agent(llm, db=db, verbose=True)
25
26resp = agent_executor.invoke(
27 "Show me the first 5 rows of the Album table."
28)
29print(resp)

CSV Agent

LangChain’s CSV Agent abstraction enables building agents that can interact with CSV files. This can be accessed via the create_csv_agent constructor.

PYTHON
1from langchain_cohere import ChatCohere, create_csv_agent
2
3# Define the Cohere LLM
4llm = ChatCohere(
5 cohere_api_key="COHERE_API_KEY",
6 model="command-r-plus-08-2024",
7 temperature=0,
8)
9
10agent_executor = create_csv_agent(
11 llm,
12 "titanic.csv", # https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/titanic.csv
13)
14
15resp = agent_executor.invoke(
16 {"input": "How many people were on the titanic?"}
17)
18print(resp.get("output"))

Streaming for Tool Calling

When tools are called in a streaming context, message chunks will be populated with tool call chunk objects in a list via the .tool_call_chunks attribute.

PYTHON
1from langchain_core.tools import tool
2from langchain_cohere import ChatCohere
3
4
5@tool
6def add(a: int, b: int) -> int:
7 """Adds a and b."""
8 return a + b
9
10
11@tool
12def multiply(a: int, b: int) -> int:
13 """Multiplies a and b."""
14 return a * b
15
16
17tools = [add, multiply]
18
19# Define the Cohere LLM
20llm = ChatCohere(
21 cohere_api_key="COHERE_API_KEY",
22 model="command-r-plus-08-2024",
23 temperature=0,
24)
25
26llm_with_tools = llm.bind_tools(tools)
27
28query = "What is 3 * 12? Also, what is 11 + 49?"
29
30for chunk in llm_with_tools.stream(query):
31 if chunk.tool_call_chunks:
32 print(chunk.tool_call_chunks)

LangGraph Agents

LangGraph is a stateful, orchestration framework that brings added control to agent workflows.

To use LangGraph with Cohere, you need to install the LangGraph package. To install it, run pip install langgraph.

Basic Chatbot

This simple chatbot example will illustrate the core concepts of building with LangGraph.

PYTHON
1from typing import Annotated
2from typing_extensions import TypedDict
3from langgraph.graph import StateGraph, START, END
4from langgraph.graph.message import add_messages
5from langchain_cohere import ChatCohere
6
7
8# Create a state graph
9class State(TypedDict):
10 messages: Annotated[list, add_messages]
11
12
13graph_builder = StateGraph(State)
14
15# Define the Cohere LLM
16llm = ChatCohere(
17 cohere_api_key="COHERE_API_KEY", model="command-r-plus-08-2024"
18)
19
20
21# Add nodes
22def chatbot(state: State):
23 return {"messages": [llm.invoke(state["messages"])]}
24
25
26graph_builder.add_node("chatbot", chatbot)
27graph_builder.add_edge(START, "chatbot")
28graph_builder.add_edge("chatbot", END)
29
30# Compile the graph
31graph = graph_builder.compile()
32
33# Run the chatbot
34while True:
35 user_input = input("User: ")
36 print("User: " + user_input)
37 if user_input.lower() in ["quit", "exit", "q"]:
38 print("Goodbye!")
39 break
40 for event in graph.stream({"messages": ("user", user_input)}):
41 for value in event.values():
42 print("Assistant:", value["messages"][-1].content)

Enhancing the Chatbot with Tools

To handle queries our chatbot can’t answer “from memory”, we’ll integrate a web search tool. Our bot can use this tool to find relevant information and provide better responses.

PYTHON
1from langchain_community.tools.tavily_search import (
2 TavilySearchResults,
3)
4from langchain_cohere import ChatCohere
5from langgraph.graph import StateGraph, START
6from langgraph.graph.message import add_messages
7from langchain_core.messages import ToolMessage
8from langchain_core.messages import BaseMessage
9from typing import Annotated, Literal
10from typing_extensions import TypedDict
11import json
12
13
14# Create a tool
15tool = TavilySearchResults(max_results=2)
16tools = [tool]
17
18
19# Create a state graph
20class State(TypedDict):
21 messages: Annotated[list, add_messages]
22
23
24graph_builder = StateGraph(State)
25
26# Define the LLM
27llm = ChatCohere(
28 cohere_api_key="COHERE_API_KEY", model="command-r-plus-08-2024"
29)
30
31# Bind the tools to the LLM
32llm_with_tools = llm.bind_tools(tools)
33
34
35# Add nodes
36def chatbot(state: State):
37 return {"messages": [llm_with_tools.invoke(state["messages"])]}
38
39
40graph_builder.add_node("chatbot", chatbot)
41
42
43class BasicToolNode:
44 """A node that runs the tools requested in the last AIMessage."""
45
46 def __init__(self, tools: list) -> None:
47 self.tools_by_name = {tool.name: tool for tool in tools}
48
49 def __call__(self, inputs: dict):
50 if messages := inputs.get("messages", []):
51 message = messages[-1]
52 else:
53 raise ValueError("No message found in input")
54 outputs = []
55 for tool_call in message.tool_calls:
56 tool_result = self.tools_by_name[
57 tool_call["name"]
58 ].invoke(tool_call["args"])
59 outputs.append(
60 ToolMessage(
61 content=json.dumps(tool_result),
62 name=tool_call["name"],
63 tool_call_id=tool_call["id"],
64 )
65 )
66 return {"messages": outputs}
67
68
69tool_node = BasicToolNode(tools=[tool])
70graph_builder.add_node("tools", tool_node)
71
72
73def route_tools(
74 state: State,
75) -> Literal["tools", "__end__"]:
76 """
77 Use in the conditional_edge to route to the ToolNode if the last message
78 has tool calls. Otherwise, route to the end.
79 """
80 if isinstance(state, list):
81 ai_message = state[-1]
82 elif messages := state.get("messages", []):
83 ai_message = messages[-1]
84 else:
85 raise ValueError(
86 f"No messages found in input state to tool_edge: {state}"
87 )
88 if (
89 hasattr(ai_message, "tool_calls")
90 and len(ai_message.tool_calls) > 0
91 ):
92 return "tools"
93 return "__end__"
94
95
96graph_builder.add_conditional_edges(
97 "chatbot",
98 route_tools,
99 {"tools": "tools", "__end__": "__end__"},
100)
101graph_builder.add_edge("tools", "chatbot")
102graph_builder.add_edge(START, "chatbot")
103
104# Compile the graph
105graph = graph_builder.compile()
106
107# Run the chatbot
108while True:
109 user_input = input("User: ")
110 if user_input.lower() in ["quit", "exit", "q"]:
111 print("Goodbye!")
112 break
113 for event in graph.stream({"messages": [("user", user_input)]}):
114 for value in event.values():
115 if isinstance(value["messages"][-1], BaseMessage):
116 print("Assistant:", value["messages"][-1].content)
Built with