Generate Azure Cache for Redis configurations with clustering, geo-replication, caching patterns, and Redis modules. Use when the user wants to set up in-memory caching, session management, or real-time data processing.
You are an Azure Cache for Redis expert. Generate production-ready in-memory caching and data store configurations.
Determine from user input or $ARGUMENTS:
| Tier | Replication | Clustering | VNET | Persistence | Modules | SLA |
|---|---|---|---|---|---|---|
| Basic | No | No | No |
| No |
| No |
| None |
| Standard | Yes (1 replica) | No | No | No | No | 99.9% |
| Premium | Yes (up to 3) | Yes (up to 10 shards) | Yes | Yes (RDB/AOF) | No | 99.9% |
| Enterprise | Yes | Yes (up to 500 shards) | Yes | Yes | Yes | 99.99% |
| Enterprise Flash | Yes | Yes | Yes | Yes (RDB) | Yes | 99.99% |
Enterprise tier modules:
Bicep (Standard tier):
param location string = resourceGroup().location
param cacheName string
resource redisCache 'Microsoft.Cache/redis@2023-08-01' = {
name: cacheName
location: location
properties: {
sku: {
name: 'Standard'
family: 'C'
capacity: 2
}
enableNonSslPort: false
minimumTlsVersion: '1.2'
redisConfiguration: {
'maxmemory-policy': 'allkeys-lru'
'maxmemory-reserved': '50'
'maxfragmentationmemory-reserved': '50'
}
publicNetworkAccess: 'Disabled'
}
}
Bicep (Premium tier with clustering):
resource redisPremium 'Microsoft.Cache/redis@2023-08-01' = {
name: cacheName
location: location
zones: ['1', '2', '3']
properties: {
sku: {
name: 'Premium'
family: 'P'
capacity: 1
}
enableNonSslPort: false
minimumTlsVersion: '1.2'
shardCount: 3
replicasPerMaster: 1
redisConfiguration: {
'maxmemory-policy': 'volatile-lru'
'maxmemory-reserved': '200'
'rdb-backup-enabled': 'true'
'rdb-backup-frequency': '60'
'rdb-storage-connection-string': storageConnectionString
}
publicNetworkAccess: 'Disabled'
subnetId: subnetId
}
}
Terraform:
resource "azurerm_redis_cache" "main" {
name = var.cache_name
location = azurerm_resource_group.main.location
resource_group_name = azurerm_resource_group.main.name
capacity = 2
family = "C"
sku_name = "Standard"
enable_non_ssl_port = false
minimum_tls_version = "1.2"
public_network_access_enabled = false
redis_configuration {
maxmemory_reserved = 50
maxfragmentationmemory_reserved = 50
maxmemory_policy = "allkeys-lru"
}
tags = var.tags
}
# Premium tier with clustering
resource "azurerm_redis_cache" "premium" {
name = var.cache_name
location = azurerm_resource_group.main.location
resource_group_name = azurerm_resource_group.main.name
capacity = 1
family = "P"
sku_name = "Premium"
enable_non_ssl_port = false
minimum_tls_version = "1.2"
shard_count = 3
replicas_per_master = 1
public_network_access_enabled = false
subnet_id = azurerm_subnet.redis.id
zones = ["1", "2", "3"]
redis_configuration {
maxmemory_reserved = 200
maxfragmentationmemory_reserved = 200
maxmemory_policy = "volatile-lru"
rdb_backup_enabled = true
rdb_backup_frequency = 60
rdb_storage_connection_string = azurerm_storage_account.main.primary_connection_string
}
tags = var.tags
}
Bicep (Enterprise tier with modules):
resource redisEnterprise 'Microsoft.Cache/redisEnterprise@2023-11-01' = {
name: cacheName
location: location
sku: {
name: 'Enterprise_E10'
capacity: 2
}
zones: ['1', '2', '3']
}
resource redisDatabase 'Microsoft.Cache/redisEnterprise/databases@2023-11-01' = {
parent: redisEnterprise
name: 'default'
properties: {
clientProtocol: 'Encrypted'
evictionPolicy: 'AllKeysLRU'
clusteringPolicy: 'EnterpriseCluster'
modules: [
{ name: 'RediSearch' }
{ name: 'RedisJSON' }
]
persistence: {
rdbEnabled: true
rdbFrequency: '6h'
}
}
}
Active geo-replication (Enterprise tier):
resource geoReplication 'Microsoft.Cache/redisEnterprise/databases@2023-11-01' = {
parent: redisEnterprise
name: 'default'
properties: {
geoReplication: {
groupNickname: 'myapp-geo'
linkedDatabases: [
{ id: primaryDatabaseId }
{ id: secondaryDatabaseId }
]
}
}
}
Passive geo-replication (Premium tier):
az redis server-link create \
--name primary-cache \
--resource-group rg-primary \
--server-to-link /subscriptions/.../secondary-cache \
--replication-role Secondary
resource privateEndpoint 'Microsoft.Network/privateEndpoints@2023-04-01' = {
name: '${cacheName}-pe'
location: location
properties: {
subnet: {
id: subnetId
}
privateLinkServiceConnections: [
{
name: '${cacheName}-plsc'
properties: {
privateLinkServiceId: redisCache.id
groupIds: ['redisCache']
}
}
]
}
}
Cache-Aside (Lazy Loading):
const redis = require('ioredis');
const client = new redis({
host: process.env.REDIS_HOST,
port: 6380,
password: process.env.REDIS_KEY,
tls: { servername: process.env.REDIS_HOST }
});
async function getUser(userId) {
const cacheKey = `user:${userId}`;
// 1. Check cache
const cached = await client.get(cacheKey);
if (cached) {
return JSON.parse(cached);
}
// 2. Cache miss - query database
const user = await db.query('SELECT * FROM users WHERE id = ?', [userId]);
// 3. Populate cache with TTL
await client.setex(cacheKey, 3600, JSON.stringify(user));
return user;
}
async function updateUser(userId, data) {
// 1. Update database
await db.query('UPDATE users SET ? WHERE id = ?', [data, userId]);
// 2. Invalidate cache
await client.del(`user:${userId}`);
}
Write-Through:
async function saveOrder(order) {
const cacheKey = `order:${order.id}`;
// Write to both cache and database
await Promise.all([
client.setex(cacheKey, 7200, JSON.stringify(order)),
db.query('INSERT INTO orders SET ?', order)
]);
return order;
}
Write-Behind (with queue):
async function saveMetric(metric) {
const cacheKey = `metric:${metric.id}`;
// 1. Write to cache immediately
await client.setex(cacheKey, 3600, JSON.stringify(metric));
// 2. Queue for async database write
await client.rpush('db:write-queue', JSON.stringify({
table: 'metrics',
data: metric,
timestamp: Date.now()
}));
}
Session management:
const session = require('express-session');
const RedisStore = require('connect-redis').default;
app.use(session({
store: new RedisStore({ client: redisClient }),
secret: process.env.SESSION_SECRET,
resave: false,
saveUninitialized: false,
cookie: {
secure: true,
httpOnly: true,
maxAge: 1800000 // 30 minutes
}
}));
Rate limiting:
async function rateLimit(clientId, limit, windowSeconds) {
const key = `ratelimit:${clientId}`;
const current = await client.incr(key);
if (current === 1) {
await client.expire(key, windowSeconds);
}
if (current > limit) {
const ttl = await client.ttl(key);
throw new Error(`Rate limit exceeded. Retry after ${ttl} seconds.`);
}
return { remaining: limit - current, limit };
}
Leaderboard with sorted sets:
// Add/update score
await client.zadd('leaderboard:weekly', score, `player:${playerId}`);
// Get top 10
const top10 = await client.zrevrange('leaderboard:weekly', 0, 9, 'WITHSCORES');
// Get player rank
const rank = await client.zrevrank('leaderboard:weekly', `player:${playerId}`);
Distributed locking:
const Redlock = require('redlock');
const redlock = new Redlock([client], {
retryCount: 3,
retryDelay: 200
});
async function processExclusively(resourceId, fn) {
const lock = await redlock.acquire([`lock:${resourceId}`], 30000);
try {
return await fn();
} finally {
await lock.release();
}
}
| Policy | Behavior | Use Case |
|---|---|---|
volatile-lru | Evict LRU keys with TTL set | Mixed persistent + cache data |
allkeys-lru | Evict LRU across all keys | Pure cache |
volatile-ttl | Evict keys with shortest TTL | Time-sensitive data |
volatile-random | Evict random key with TTL | When all data is equally important |
allkeys-random | Evict random key | When all data is equally important |
noeviction | Return error when full | When data loss is unacceptable |
# Enable diagnostics
az redis update --name $CACHE_NAME \
--resource-group $RG \
--set "redisConfiguration.maxmemory-policy=allkeys-lru"
# Monitor key metrics
az monitor metrics list \
--resource $CACHE_RESOURCE_ID \
--metric "cacheHits,cacheMisses,connectedclients,usedmemory,serverLoad" \
--interval PT1M
Key metrics to monitor:
user:123, session:abc)