A Node.js wrapper for logging LLM traces directly to Owlmetric, bypassing the proxy. Track usage metrics and monitor your AI applications with support for OpenAI, Anthropic, Google Gemini, and more.
npm install @owlmetric/tracker
import OpenAI from "openai";
import { createTrackedClient } from "@owlmetric/tracker";
const client = createTrackedClient(OpenAI, {
apiKey: process.env.OPENAI_API_KEY,
owlmetricToken: process.env.OWLMETRIC_OPENAI_TOKEN!,
});
// Regular completion
const completion = await client.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Hello!" }],
});
// Streaming completion
const stream = await client.chat.completions.create({
model: "gpt-4",
stream: true,
messages: [{ role: "user", content: "Count from 1 to 5" }],
});
for await (const chunk of stream) {
const delta = chunk.choices?.[0]?.delta?.content;
if (delta) process.stdout.write(delta);
}
import Anthropic from "@anthropic-ai/sdk";
import { createTrackedClient } from "@owlmetric/tracker";
const client = createTrackedClient(Anthropic, {
apiKey: process.env.ANTHROPIC_API_KEY,
owlmetricToken: process.env.OWLMETRIC_ANTHROPIC_TOKEN!,
});
// Regular completion
const completion = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 50,
messages: [{ role: "user", content: "Hello!" }],
});
// Streaming completion
const streamResponse = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 100,
messages: [{ role: "user", content: "Count from 1 to 5" }],
stream: true,
});
for await (const event of streamResponse) {
if (event.type === "content_block_delta" && event.delta?.type === "text_delta") {
process.stdout.write(event.delta.text);
}
}
import { GoogleGenAI } from "@google/genai";
import { createTrackedClient } from "@owlmetric/tracker";
const client = createTrackedClient(GoogleGenAI, {
apiKey: process.env.GEMINI_API_KEY,
owlmetricToken: process.env.OWLMETRIC_GEMINI_TOKEN!,
});
// Regular completion
const completion = await client.models.generateContent({
model: "gemini-2.5-flash",
contents: "Hello!",
});
// Streaming completion
const stream = await client.models.generateContentStream({
model: "gemini-2.5-flash",
contents: "Count from 1 to 5",
});
for await (const chunk of stream) {
const text = typeof chunk.text === "function" ? chunk.text : chunk.text;
if (text) process.stdout.write(text);
}
npm install @owlmetric/tracker @vercel/otel @opentelemetry/sdk-logs @opentelemetry/api-logs @opentelemetry/instrumentation
Create instrumentation.ts in your Next.js root directory:
import { registerOTel } from "@vercel/otel";
import { OwlMetricTraceExporter } from "@owlmetric/tracker/owlmetric_trace_exporter";
export function register() {
registerOTel({
serviceName: "next-app",
traceExporter: new OwlMetricTraceExporter(),
});
}
// app/api/chat/route.ts
import { openai } from "@ai-sdk/openai";
import { anthropic } from "@ai-sdk/anthropic";
import { google } from "@ai-sdk/google";
import { streamText } from "ai";
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai("gpt-4o"), // or anthropic("claude-3-haiku-20240307"), google("gemini-2.0-flash")
messages,
experimental_telemetry: {
isEnabled: true,
metadata: {
xOwlToken: process.env.OWLMETRIC_TOKEN, // Your Owlmetric token
},
},
});
return result.toDataStreamResponse();
}
import { OwlMetricTraceLogger } from "@owlmetric/tracker/owlmetric_trace_logger";
import { createOpenAI } from "@ai-sdk/openai";
import { generateText, streamText } from "ai";
// Initialize the logger
const logger = new OwlMetricTraceLogger();
logger.init();
const ai = createOpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
const model = ai("gpt-4");
// Use with telemetry
const { textStream } = streamText({
model: model,
prompt: "Hello, world!",
experimental_telemetry: {
isEnabled: true,
metadata: {
userId: "user-123",
sessionId: "session-456",
xOwlToken: process.env.OWLMETRIC_TOKEN,
},
},
});
for await (const textPart of textStream) {
console.log(textPart);
}
Set up your environment variables:
# Provider API Keys
OPENAI_API_KEY=your_openai_key
ANTHROPIC_API_KEY=your_anthropic_key
GEMINI_API_KEY=your_gemini_key
# Owlmetric Tokens (one per provider)
OWLMETRIC_OPENAI_TOKEN=pt_your_openai_token
OWLMETRIC_ANTHROPIC_TOKEN=pt_your_anthropic_token
OWLMETRIC_GEMINI_TOKEN=pt_your_gemini_token
createTrackedClient(ClientClass, options)Creates a tracked version of your AI client.
Parameters:
ClientClass: The original client class (OpenAI, Anthropic, GoogleGenAI)options: Configuration object including API key and Owlmetric tokenReturns: Tracked client instance with same API as the original
OwlMetricTraceExporterOpenTelemetry trace exporter for Owlmetric.
import { OwlMetricTraceExporter } from "@owlmetric/tracker";
const exporter = new OwlMetricTraceExporter();
OwlMetricTraceLoggerLogger for initializing OpenTelemetry tracing.
import { OwlMetricTraceLogger } from "@owlmetric/tracker";
const logger = new OwlMetricTraceLogger();
logger.init();
SEE LICENSE IN LICENSE.md
For issues and support, please visit our GitHub repository.