Use when working with Sumologic — sumo Logic cloud-native log analytics, metrics queries, monitors, dashboards, and content management. Covers log search, metrics exploration, alerting rules, folder/content management, and data ingestion health. Use when searching logs, querying metrics, managing monitors, or analyzing Sumo Logic resources via API.
Search, analyze, and manage Sumo Logic resources using the Sumo Logic API.
Uses HTTP Basic auth with accessId:accessKey — injected automatically. Never hardcode credentials.
Region-specific deployment URLs:
https://api.sumologic.com/api/v1/https://api.us2.sumologic.com/api/v1/https://api.eu.sumologic.com/api/v1/SUMOLOGIC_BASE_URL.jq to extract only needed fields#!/bin/bash
sumo_api() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
if [ -n "$data" ]; then
curl -s -X "$method" \
-u "${SUMOLOGIC_ACCESS_ID}:${SUMOLOGIC_ACCESS_KEY}" \
-H "Content-Type: application/json" \
"${SUMOLOGIC_BASE_URL}${endpoint}" \
-d "$data"
else
curl -s -X "$method" \
-u "${SUMOLOGIC_ACCESS_ID}:${SUMOLOGIC_ACCESS_KEY}" \
-H "Content-Type: application/json" \
"${SUMOLOGIC_BASE_URL}${endpoint}"
fi
}
sumo_search() {
local query="$1"
local from="${2:--1h}"
local to="${3:-now}"
local job_id=$(sumo_api POST "/search/jobs" \
"{\"query\":\"${query}\",\"from\":\"${from}\",\"to\":\"${to}\",\"timeZone\":\"UTC\"}" \
| jq -r '.id')
while true; do
local state=$(sumo_api GET "/search/jobs/${job_id}" | jq -r '.state')
[ "$state" = "DONE GATHERING RESULTS" ] && break
sleep 2
done
sumo_api GET "/search/jobs/${job_id}/messages?offset=0&limit=100"
}
{
sumo_api GET "/monitors?limit=50" &
sumo_api GET "/collectors?limit=50" &
sumo_api GET "/content/folders/personal" &
}
wait
NEVER assume source categories, collector names, or field names exist. ALWAYS discover first.
#!/bin/bash
echo "=== Installed Collectors ==="
sumo_api GET "/collectors?limit=50" \
| jq -r '.collectors[] | "\(.name)\t\(.collectorType)\t\(.alive)"' | head -20
echo "=== Source Categories ==="
sumo_search "_sourceCategory=* | count by _sourceCategory | sort by _count desc | limit 20" "-1h" \
| jq -r '.messages[].map | "\(._sourcecategory)\t\(._count)"'
echo "=== Active Monitors ==="
sumo_api GET "/monitors?limit=50" \
| jq -r '.[] | "\(.name)\t\(.monitorType)\t\(.status)"' | head -20
#!/bin/bash
echo "=== Error Logs (last 1h) ==="
sumo_search "_sourceCategory=* error | count by _sourceCategory, _sourceHost | sort by _count desc | limit 20" "-1h" \
| jq -r '.messages[].map | "\(._sourcecategory)\t\(._sourcehost)\t\(._count)"'
echo ""
echo "=== Top Error Patterns ==="
sumo_search "error OR exception | parse \"*Error: *\" as errorType, errorMsg | count by errorType | sort by _count desc | limit 10" "-1h" \
| jq -r '.messages[].map | "\(.errortype)\t\(._count)"'
#!/bin/bash
echo "=== Available Metric Dimensions ==="
sumo_api POST "/metrics/results" \
'{"query":[{"query":"metric=CPU_*","rowId":"A"}],"startTime":"'-1h'","endTime":"now"}' \
| jq -r '.response[] | .results[].metric.dimensions | to_entries[] | "\(.key)=\(.value)"' \
| sort -u | head -20
echo ""
echo "=== CPU Metrics by Host ==="
sumo_api POST "/metrics/results" \
'{"query":[{"query":"metric=CPU_Total | avg by host","rowId":"A"}],"startTime":"-1h","endTime":"now"}' \
| jq -r '.response[].results[] | "\(.metric.dimensions.host)\t\(.datapoints.value[-1])"' | head -15
#!/bin/bash
echo "=== Monitor Status Summary ==="
sumo_api GET "/monitors?limit=100" \
| jq -r 'group_by(.status) | .[] | "\(.[0].status): \(length)"'
echo ""
echo "=== Triggered Monitors ==="
sumo_api GET "/monitors?limit=100" \
| jq -r '.[] | select(.status == "Critical" or .status == "Warning") | "\(.status)\t\(.name)\t\(.monitorType)"' | head -15
echo ""
echo "=== Monitor Notification Channels ==="
sumo_api GET "/monitors?limit=50" \
| jq -r '.[] | "\(.name)\t\(.notifications | length) notifications"' | head -15
#!/bin/bash
echo "=== Personal Folder Content ==="
FOLDER_ID=$(sumo_api GET "/content/folders/personal" | jq -r '.id')
sumo_api GET "/content/folders/${FOLDER_ID}" \
| jq -r '.children[] | "\(.itemType)\t\(.name)\t\(.createdAt[0:10])"' | head -20
echo ""
echo "=== Recent Dashboards ==="
sumo_api GET "/dashboards?limit=20" \
| jq -r '.dashboards[] | "\(.id)\t\(.title)\t\(.folderId)"' | head -15
#!/bin/bash
echo "=== Collector Health ==="
{
sumo_api GET "/collectors?limit=50" \
| jq -r '.collectors[] | "\(.name)\t\(.collectorType)\talive:\(.alive)\t\(.lastSeenAlive[0:16] // "never")"' | head -15 &
echo "=== Dead Collectors ==="
sumo_api GET "/collectors?limit=100" \
| jq -r '.collectors[] | select(.alive == false) | "\(.name)\t\(.collectorType)\tlast_seen:\(.lastSeenAlive[0:16] // "never")"' | head -10 &
}
wait
echo ""
echo "=== Ingestion Volume ==="
sumo_search "_index=sumologic_volume | sum(_size) by _sourceCategory | sort by _sum desc | limit 15" "-24h" \
| jq -r '.messages[].map | "\(._sourcecategory)\t\(._sum)"'
Present results as a structured report:
Monitoring Sumologic Report
═══════════════════════════
Resources discovered: [count]
Resource Status Key Metric Issues
──────────────────────────────────────────────
[name] [ok/warn] [value] [findings]
Summary: [total] resources | [ok] healthy | [warn] warnings | [crit] critical
Action Items: [list of prioritized findings]
Target ≤50 lines of output. Use tables for multi-resource comparisons.
| Shortcut | Counter | Why |
|---|---|---|
| "I'll skip discovery and check known resources" | Always run Phase 1 discovery first | Resource names change, new resources appear — assumed names cause errors |
| "The user only asked for a quick check" | Follow the full discovery → analysis flow | Quick checks miss critical issues; structured analysis catches silent failures |
| "Default configuration is probably fine" | Audit configuration explicitly | Defaults often leave logging, security, and optimization features disabled |
| "Metrics aren't needed for this" | Always check relevant metrics when available | API/CLI responses show current state; metrics reveal trends and intermittent issues |
| "I don't have access to that" | Try the command and report the actual error | Assumed permission failures prevent useful investigation; actual errors are informative |
state until DONE GATHERING RESULTS2024-01-01T00:00:00Z) or relative (-1h, -24h)/metrics/results vs /search/jobsoffset and limit — check total field for remaining items| count by field | sort by _count