IntegrationsLangChain

Tools on LangChain

Cohere supports various integrations with LangChain, a large language model (LLM) framework which allows you to quickly create applications based on Cohere’s models. This doc will guide you through how to leverage Cohere tools with LangChain.

Prerequisites

Running Cohere tools with LangChain doesn’t require many prerequisites, consult the top-level document for more information.

Multi-Step Tool Use

Multi-step is enabled by default. Here’s an example of using it to put together a simple agent:

PYTHON
1from langchain.agents import AgentExecutor
2from langchain_cohere.react_multi_hop.agent import create_cohere_react_agent
3from langchain_core.prompts import ChatPromptTemplate
4from langchain_cohere import ChatCohere
5
6from langchain_community.tools.tavily_search import TavilySearchResults
7from pydantic import BaseModel, Field
8
9os.environ["TAVILY_API_KEY"] ="TAVILY_API_KEY"
10
11internet_search = TavilySearchResults()
12internet_search.name = "internet_search"
13internet_search.description = "Returns a list of relevant document snippets for a textual query retrieved from the internet."
14
15class TavilySearchInput(BaseModel):
16 query: str = Field(description="Query to search the internet with")
17internet_search.args_schema = TavilySearchInput
18
19# Define the Cohere LLM
20llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
21 model="command-r-plus-08-2024",
22 temperature=0)
23# Preamble
24preamble = """
25You are an expert who answers the user's question with the most relevant datasource. You are equipped with an internet search tool and a special vectorstore of information about how to write good essays.
26"""
27
28# Prompt template
29prompt = ChatPromptTemplate.from_template("{input}")
30
31# Create the ReAct agent
32agent = create_cohere_react_agent(
33 llm=llm,
34 tools=[internet_search],
35 prompt=prompt,
36)
37
38agent_executor = AgentExecutor(agent=agent, tools=[internet_search], verbose=True)
39
40response = agent_executor.invoke({
41 "input": "Who is the mayor of the capital of Ontario",
42 "preamble": preamble,
43})
44
45print(response['output'])

Single-Step Tool Use

In order to utilize single-step mode, you have to set force_single_step=True. Here’s an example of using it to answer a few questions:

PYTHON
1from langchain_cohere import ChatCohere
2from langchain_core.messages import HumanMessage
3from pydantic import BaseModel, Field
4
5# Data model
6class web_search(BaseModel):
7 """
8 The internet. Use web_search for questions that are related to anything else than agents, prompt engineering, and adversarial attacks.
9 """
10 query: str = Field(description="The query to use when searching the internet.")
11
12class vectorstore(BaseModel):
13 """
14 A vectorstore containing documents related to agents, prompt engineering, and adversarial attacks. Use the vectorstore for questions on these topics.
15 """
16 query: str = Field(description="The query to use when searching the vectorstore.")
17
18# Preamble
19preamble = """You are an expert at routing a user question to a vectorstore or web search.
20The vectorstore contains documents related to agents, prompt engineering, and adversarial attacks.
21Use the vectorstore for questions on these topics. Otherwise, use web-search."""
22
23# LLM with tool use and preamble
24# Define the Cohere LLM
25llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
26 model="command-r-plus-08-2024")
27
28llm_with_tools = llm.bind_tools(tools=[web_search, vectorstore], preamble=preamble)
29
30messages = [HumanMessage("Who will the Bears draft first in the NFL draft?")]
31response = llm_with_tools.invoke(messages, force_single_step=True)
32print(response.response_metadata['tool_calls'])
33
34messages = [HumanMessage("What are the types of agent memory?")]
35response = llm_with_tools.invoke(messages, force_single_step=True)
36print(response.response_metadata['tool_calls'])
37
38messages = [HumanMessage("Hi, How are you?")]
39response = llm_with_tools.invoke(messages, force_single_step=True)
40print('tool_calls' in response.response_metadata)

SQL Agent

LangChain’s SQL Agent abstraction provides a flexible way of interacting with SQL Databases. This can be accessed via the create_sql_agent constructor.

PYTHON
1from langchain_cohere import ChatCohere, create_sql_agent
2from langchain_community.utilities import SQLDatabase
3import urllib.request
4import pandas as pd
5import sqlite3
6
7# Download the Chinook SQLite database
8url = "https://github.com/lerocha/chinook-database/raw/master/ChinookDatabase/DataSources/Chinook_Sqlite.sqlite"
9urllib.request.urlretrieve(url, "Chinook.db")
10print("Chinook database downloaded successfully.")
11
12db = SQLDatabase.from_uri("sqlite:///Chinook.db")
13print(db.dialect)
14print(db.get_usable_table_names())
15db.run("SELECT * FROM Artist LIMIT 10;")
16
17# Define the Cohere LLM
18llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
19 model="command-r-plus-08-2024",
20 temperature=0)
21
22agent_executor = create_sql_agent(llm, db=db, verbose=True)
23
24resp = agent_executor.invoke("Show me the first 5 rows of the Album table.")
25print(resp)

CSV Agent

LangChain’s CSV Agent abstraction enables building agents that can interact with CSV files. This can be accessed via the create_csv_agent constructor.

PYTHON
1from langchain_cohere import ChatCohere, create_csv_agent
2
3# Define the Cohere LLM
4llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
5 model="command-r-plus-08-2024",
6 temperature=0)
7
8agent_executor = create_csv_agent(
9 llm,
10 "titanic.csv" # https://github.com/langchain-ai/langchain/blob/master/templates/csv-agent/titanic.csv
11)
12
13resp = agent_executor.invoke({"input":"How many people were on the titanic?"})
14print(resp.get("output"))

Streaming for Tool Calling

When tools are called in a streaming context, message chunks will be populated with tool call chunk objects in a list via the .tool_call_chunks attribute.

PYTHON
1from langchain_core.tools import tool
2from langchain_cohere import ChatCohere
3
4@tool
5def add(a: int, b: int) -> int:
6 """Adds a and b."""
7 return a + b
8
9
10@tool
11def multiply(a: int, b: int) -> int:
12 """Multiplies a and b."""
13 return a * b
14
15tools = [add, multiply]
16
17# Define the Cohere LLM
18llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
19 model="command-r-plus-08-2024",
20 temperature=0)
21
22llm_with_tools = llm.bind_tools(tools)
23
24query = "What is 3 * 12? Also, what is 11 + 49?"
25
26for chunk in llm_with_tools.stream(query):
27 if chunk.tool_call_chunks:
28 print(chunk.tool_call_chunks)

LangGraph Agents

LangGraph is a stateful, orchestration framework that brings added control to agent workflows.

To use LangGraph with Cohere, you need to install the LangGraph package. To install it, run pip install langgraph.

Basic Chatbot

This simple chatbot example will illustrate the core concepts of building with LangGraph.

PYTHON
1from typing import Annotated
2from typing_extensions import TypedDict
3from langgraph.graph import StateGraph, START, END
4from langgraph.graph.message import add_messages
5from langchain_cohere import ChatCohere
6
7# Create a state graph
8class State(TypedDict):
9 messages: Annotated[list, add_messages]
10
11graph_builder = StateGraph(State)
12
13# Define the Cohere LLM
14llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
15 model="command-r-plus-08-2024")
16
17# Add nodes
18def chatbot(state: State):
19 return {"messages": [llm.invoke(state["messages"])]}
20
21graph_builder.add_node("chatbot", chatbot)
22graph_builder.add_edge(START, "chatbot")
23graph_builder.add_edge("chatbot", END)
24
25# Compile the graph
26graph = graph_builder.compile()
27
28# Run the chatbot
29while True:
30 user_input = input("User: ")
31 print("User: "+ user_input)
32 if user_input.lower() in ["quit", "exit", "q"]:
33 print("Goodbye!")
34 break
35 for event in graph.stream({"messages": ("user", user_input)}):
36 for value in event.values():
37 print("Assistant:", value["messages"][-1].content)

Enhancing the Chatbot with Tools

To handle queries our chatbot can’t answer “from memory”, we’ll integrate a web search tool. Our bot can use this tool to find relevant information and provide better responses.

PYTHON
1from langchain_community.tools.tavily_search import TavilySearchResults
2from langchain_cohere import ChatCohere
3from langgraph.graph import StateGraph, START
4from langgraph.graph.message import add_messages
5from langchain_core.messages import ToolMessage
6from langchain_core.messages import BaseMessage
7from typing import Annotated, Literal
8from typing_extensions import TypedDict
9import json
10
11
12# Create a tool
13tool = TavilySearchResults(max_results=2)
14tools = [tool]
15
16
17# Create a state graph
18class State(TypedDict):
19 messages: Annotated[list, add_messages]
20
21
22graph_builder = StateGraph(State)
23
24# Define the LLM
25llm = ChatCohere(cohere_api_key="COHERE_API_KEY",
26 model="command-r-plus-08-2024")
27
28# Bind the tools to the LLM
29llm_with_tools = llm.bind_tools(tools)
30
31
32# Add nodes
33def chatbot(state: State):
34 return {"messages": [llm_with_tools.invoke(state["messages"])]}
35
36
37graph_builder.add_node("chatbot", chatbot)
38
39class BasicToolNode:
40 """A node that runs the tools requested in the last AIMessage."""
41
42 def __init__(self, tools: list) -> None:
43 self.tools_by_name = {tool.name: tool for tool in tools}
44
45 def __call__(self, inputs: dict):
46 if messages := inputs.get("messages", []):
47 message = messages[-1]
48 else:
49 raise ValueError("No message found in input")
50 outputs = []
51 for tool_call in message.tool_calls:
52 tool_result = self.tools_by_name[tool_call["name"]].invoke(
53 tool_call["args"]
54 )
55 outputs.append(
56 ToolMessage(
57 content=json.dumps(tool_result),
58 name=tool_call["name"],
59 tool_call_id=tool_call["id"],
60 )
61 )
62 return {"messages": outputs}
63
64
65tool_node = BasicToolNode(tools=[tool])
66graph_builder.add_node("tools", tool_node)
67
68def route_tools(
69 state: State,
70) -> Literal["tools", "__end__"]:
71 """
72 Use in the conditional_edge to route to the ToolNode if the last message
73 has tool calls. Otherwise, route to the end.
74 """
75 if isinstance(state, list):
76 ai_message = state[-1]
77 elif messages := state.get("messages", []):
78 ai_message = messages[-1]
79 else:
80 raise ValueError(f"No messages found in input state to tool_edge: {state}")
81 if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
82 return "tools"
83 return "__end__"
84
85
86graph_builder.add_conditional_edges(
87 "chatbot",
88 route_tools,
89 {"tools": "tools", "__end__": "__end__"},
90)
91graph_builder.add_edge("tools", "chatbot")
92graph_builder.add_edge(START, "chatbot")
93
94# Compile the graph
95graph = graph_builder.compile()
96
97# Run the chatbot
98while True:
99 user_input = input("User: ")
100 if user_input.lower() in ["quit", "exit", "q"]:
101 print("Goodbye!")
102 break
103 for event in graph.stream({"messages": [("user", user_input)]}):
104 for value in event.values():
105 if isinstance(value["messages"][-1], BaseMessage):
106 print("Assistant:", value["messages"][-1].content)