Set up observability for Groq integrations: latency histograms, token throughput, rate limit gauges, cost tracking, and Prometheus alerts. Trigger with phrases like "groq monitoring", "groq metrics", "groq observability", "monitor groq", "groq alerts", "groq dashboard".
Monitor Groq LPU inference for latency, token throughput, rate limit utilization, and cost. Groq's defining advantage is speed (280-560 tok/s), so latency degradation is the highest-priority signal. The API returns rich timing metadata (queue_time, prompt_time, completion_time) and rate limit headers on every response.
| Metric | Type | Source | Why |
|---|---|---|---|
| TTFT (time to first token) | Histogram | Client-side timing | Groq's main value prop |
| Tokens/second | Gauge | usage.completion_time | Throughput degradation |
| Total latency | Histogram | Client-side timing | End-to-end performance |
| Rate limit remaining | Gauge | x-ratelimit-remaining-* headers | Prevent 429s |
| Token usage | Counter | usage.total_tokens | Cost attribution |
| Error rate by code | Counter | Error handler | Availability |
| Estimated cost | Counter | Tokens * model price | Budget tracking |
import Groq from "groq-sdk";
const groq = new Groq();
interface GroqMetrics {
model: string;
latencyMs: number;
ttftMs: number;
tokensPerSec: number;
promptTokens: number;
completionTokens: number;
totalTokens: number;
queueTimeMs: number;
estimatedCostUsd: number;
}
const PRICE_PER_1M: Record<string, { input: number; output: number }> = {
"llama-3.1-8b-instant": { input: 0.05, output: 0.08 },
"llama-3.3-70b-versatile": { input: 0.59, output: 0.79 },
"llama-3.3-70b-specdec": { input: 0.59, output: 0.99 },
"meta-llama/llama-4-scout-17b-16e-instruct": { input: 0.11, output: 0.34 },
};
async function trackedCompletion(
model: string,
messages: any[],
options?: { maxTokens?: number; temperature?: number }
): Promise<{ result: any; metrics: GroqMetrics }> {
const start = performance.now();
const result = await groq.chat.completions.create({
model,
messages,
max_tokens: options?.maxTokens ?? 1024,
temperature: options?.temperature ?? 0.7,
});
const latencyMs = performance.now() - start;
const usage = result.usage!;
const pricing = PRICE_PER_1M[model] || { input: 0.10, output: 0.10 };
const metrics: GroqMetrics = {
model,
latencyMs: Math.round(latencyMs),
ttftMs: Math.round(((usage as any).prompt_time ?? 0) * 1000),
tokensPerSec: Math.round(
usage.completion_tokens / ((usage as any).completion_time || latencyMs / 1000)
),
promptTokens: usage.prompt_tokens,
completionTokens: usage.completion_tokens,
totalTokens: usage.total_tokens,
queueTimeMs: Math.round(((usage as any).queue_time ?? 0) * 1000),
estimatedCostUsd:
(usage.prompt_tokens / 1_000_000) * pricing.input +
(usage.completion_tokens / 1_000_000) * pricing.output,
};
emitMetrics(metrics);
return { result, metrics };
}
import { Histogram, Counter, Gauge } from "prom-client";
const groqLatency = new Histogram({
name: "groq_latency_ms",
help: "Groq API latency in milliseconds",
labelNames: ["model"],
buckets: [50, 100, 200, 500, 1000, 2000, 5000],
});
const groqTokens = new Counter({
name: "groq_tokens_total",
help: "Total tokens processed",
labelNames: ["model", "direction"],
});
const groqThroughput = new Gauge({
name: "groq_tokens_per_second",
help: "Current tokens per second",
labelNames: ["model"],
});
const groqRateLimitRemaining = new Gauge({
name: "groq_ratelimit_remaining",
help: "Remaining rate limit quota",
labelNames: ["type"],
});
const groqCost = new Counter({
name: "groq_cost_usd",
help: "Estimated cost in USD",
labelNames: ["model"],
});
const groqErrors = new Counter({
name: "groq_errors_total",
help: "API errors by status code",
labelNames: ["model", "status_code"],
});
function emitMetrics(m: GroqMetrics) {
groqLatency.labels(m.model).observe(m.latencyMs);
groqTokens.labels(m.model, "input").inc(m.promptTokens);
groqTokens.labels(m.model, "output").inc(m.completionTokens);
groqThroughput.labels(m.model).set(m.tokensPerSec);
groqCost.labels(m.model).inc(m.estimatedCostUsd);
}
// Parse rate limit headers from any Groq response
function trackRateLimitHeaders(headers: Record<string, string>) {
const remaining = {
requests: parseInt(headers["x-ratelimit-remaining-requests"] || "0"),
tokens: parseInt(headers["x-ratelimit-remaining-tokens"] || "0"),
};
groqRateLimitRemaining.labels("requests").set(remaining.requests);
groqRateLimitRemaining.labels("tokens").set(remaining.tokens);
return remaining;
}
# prometheus/groq-alerts.yml