Skip to main content
A single agent system involves one autonomous entity that perceives its environment and takes actions to achieve specific goals. It makes decisions independently, without coordination or interaction with other agents.

Installation

npm i @ag-kit/agents @ag-kit/adapter-langgraph @langchain/core @langchain/langgraph @langchain/openai zod

Quick Start

Create your first agent:
import { ChatOpenAI } from "@langchain/openai";
import { SystemMessage } from "@langchain/core/messages";
import {
  StateGraph,
  START,
  END,
  MessagesAnnotation,
} from "@langchain/langgraph";

import { HumanMessage } from "@langchain/core/messages";

async function chatNode(state: typeof MessagesAnnotation.State) {
  const model = new ChatOpenAI({
    model: process.env.OPENAI_MODEL || "gpt-4o-mini",
    apiKey: process.env.OPENAI_API_KEY,
    configuration: {
      baseURL: process.env.OPENAI_BASE_URL,
    },
  });

  const systemMessage = new SystemMessage({
    content: "You are a helpful assistant.",
  });

  const response = await model.invoke([systemMessage, ...state.messages]);

  return { messages: [response] };
}

const workflow = new StateGraph(MessagesAnnotation)
  .addNode("chat_node", chatNode)
  .addEdge(START, "chat_node")
  .addEdge("chat_node", END);

const compiledGraph = workflow.compile({});

const { messages } = await compiledGraph.invoke({
  messages: [new HumanMessage({ content: "Hello, how are you?" })],
});

messages.forEach((message) =>
  console.log(`[${message.type}]: ${message.content}`)
);

Adding Tools

Tools extend agent capabilities beyond text generation.
import { ChatOpenAI } from "@langchain/openai";
import { AIMessage, SystemMessage } from "@langchain/core/messages";
import {
  StateGraph,
  START,
  END,
  MessagesAnnotation,
} from "@langchain/langgraph";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import { tool } from "langchain";
import { HumanMessage } from "@langchain/core/messages";
import { z } from "zod";

const getWeather = tool(
  async ({ location }) => ({
    location,
    condition: "Sunny",
    temperatureC: 25,
  }),
  {
    name: "getWeather",
    description: "Get mock weather for a location",
    schema: z.object({
      location: z.string().optional(),
    }),
  }
);

const tools = [getWeather];

async function chatNode(state: typeof MessagesAnnotation.State) {
  const model = new ChatOpenAI({
    model: process.env.OPENAI_MODEL || "gpt-4o-mini",
    apiKey: process.env.OPENAI_API_KEY,
    configuration: {
      baseURL: process.env.OPENAI_BASE_URL,
    },
  }).bindTools(tools);

  const systemMessage = new SystemMessage({
    content: "You are a helpful assistant.",
  });

  const response = await model.invoke([systemMessage, ...state.messages]);

  return { messages: [response] };
}

function shouldContinue(state: typeof MessagesAnnotation.State) {
  const lastMessage = state.messages.at(-1);

  if (AIMessage.isInstance(lastMessage)) {
    const toolCalls = lastMessage.tool_calls;
    const shouldCallTool = toolCalls?.some((c) =>
      tools.some((t) => t.name === c?.name)
    );

    return shouldCallTool ? "tools" : END;
  }

  return END;
}

const workflow = new StateGraph(MessagesAnnotation)
  .addNode("chat_node", chatNode)
  .addNode("tools", new ToolNode(tools))
  .addEdge(START, "chat_node")
  .addConditionalEdges("chat_node", shouldContinue)
  .addEdge("tools", "chat_node");

const compiledGraph = workflow.compile({});

const { messages } = await compiledGraph.invoke({
  messages: [
    new HumanMessage({
      content: "Help me get weather of London please",
    }),
  ],
});

messages.forEach((message) =>
  console.log(`[${message.type}]: ${message.content}`)
);

Next Steps