Enforce cloud security best practices across AWS, Vercel, Railway, and Cloudflare deployments. Always activate this skill when the user is deploying to a cloud platform, writing or reviewing IaC (Terraform, CloudFormation, Pulumi), configuring IAM roles or policies, managing secrets, setting up CI/CD pipelines, configuring databases or storage, implementing logging/monitoring, or asking about cloud security — even if they don't explicitly mention security. When in doubt, activate. A missed security check is harder to fix than an unrequested one.
This skill ensures cloud infrastructure, CI/CD pipelines, and deployment configurations follow security best practices and comply with industry standards.
When this skill activates:
The principle of least privilege is the single most impactful security control in cloud environments. Overly broad permissions are the root cause of most privilege escalation attacks.
# ✅ CORRECT: Minimal, resource-scoped permissions
iam_role:
permissions:
- s3:GetObject
- s3:ListBucket
resources:
- arn:aws:s3:::my-bucket/* # Specific bucket only
# ❌ WRONG: Wildcard permissions are a breach waiting to happen
iam_role:
permissions:
- s3:*
resources:
- "*"
Prefer short-lived federated credentials over long-lived access keys. OIDC lets services like GitHub Actions assume roles without storing any secret.
# Create an OIDC identity provider for GitHub Actions
aws iam create-open-id-connect-provider \
--url https://token.actions.githubusercontent.com \
--client-id-list sts.amazonaws.com \
--thumbprint-list 6938fd4d98bab03faadb97b34396831e3780aea1
# Enable MFA for privileged accounts — root and admin especially
aws iam enable-mfa-device \
--user-name admin \
--serial-number arn:aws:iam::123456789:mfa/admin \
--authentication-code1 123456 \
--authentication-code2 789012
* on resources or actions)Secrets in code or environment variables are accidents waiting to happen — they end up in logs, error traces, and git history. Store them in a managed service with auditing and rotation.
// ✅ CORRECT: Fetch from secrets manager at runtime
import { SecretsManager } from '@aws-sdk/client-secrets-manager';
const client = new SecretsManager({ region: 'us-east-1' });
const secret = await client.getSecretValue({ SecretId: 'prod/api-key' });
const apiKey = JSON.parse(secret.SecretString!).key;
// ❌ WRONG: env vars aren't rotated, audited, or access-controlled
const apiKey = process.env.API_KEY;
aws secretsmanager rotate-secret \
--secret-id prod/db-password \
--rotation-lambda-arn arn:aws:lambda:region:account:function:rotate \
--rotation-rules AutomaticallyAfterDays=30
Encryption should be non-negotiable for any data touching production — at rest to protect against storage breaches, in transit to prevent interception.
# ✅ CORRECT: Encrypted RDS instance
resource "aws_db_instance" "main" {
storage_encrypted = true
kms_key_id = aws_kms_key.rds.arn
# ... other config
}
# ✅ CORRECT: Encrypted S3 bucket (enforce via bucket policy)
resource "aws_s3_bucket_server_side_encryption_configuration" "main" {
bucket = aws_s3_bucket.main.id
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "aws:kms"
kms_master_key_id = aws_kms_key.s3.arn
}
bucket_key_enabled = true
}
}
# ✅ CORRECT: Enforce HTTPS-only on S3 via bucket policy
resource "aws_s3_bucket_policy" "enforce_tls" {
bucket = aws_s3_bucket.main.id
policy = jsonencode({
Statement = [{
Effect = "Deny"
Principal = "*"
Action = "s3:*"
Resource = ["${aws_s3_bucket.main.arn}/*", aws_s3_bucket.main.arn]
Condition = { Bool = { "aws:SecureTransport" = "false" } }
}]
})
}
# Enforce TLS 1.2+ on RDS (parameter group)
resource "aws_db_parameter_group" "tls" {
family = "postgres15"
parameter {
name = "rds.force_ssl"
value = "1"
}
}
Your network perimeter is the outermost layer of defense. Keep it tight — anything publicly exposed is an attack surface.
# ✅ CORRECT: Minimal-ingress security group
resource "aws_security_group" "app" {
name = "app-sg"
vpc_id = aws_vpc.main.id
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["10.0.0.0/16"] # Internal VPC only
}
egress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] # HTTPS outbound only
}
}
# ❌ WRONG: Never open all ports to the internet
resource "aws_security_group" "bad" {
ingress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}
# ❌ Public S3 bucket
aws s3api put-bucket-acl --bucket my-bucket --acl public-read
# ✅ Private bucket with explicit policy
aws s3api put-bucket-acl --bucket my-bucket --acl private
aws s3api put-bucket-policy --bucket my-bucket --policy file://policy.json
# ❌ Publicly accessible RDS — extremely dangerous
resource "aws_db_instance" "bad" {
publicly_accessible = true
}
# ✅ Private RDS in VPC
resource "aws_db_instance" "good" {
publicly_accessible = false
vpc_security_group_ids = [aws_security_group.db.id]
db_subnet_group_name = aws_db_subnet_group.private.name
}
0.0.0.0/0You can't defend what you can't see. Comprehensive logging is the foundation of incident detection, forensics, and compliance.
// ✅ CORRECT: Structured security event logging
import { CloudWatchLogsClient } from '@aws-sdk/client-cloudwatch-logs';
const logSecurityEvent = async (event: SecurityEvent) => {
await cloudwatch.putLogEvents({
logGroupName: '/aws/security/events',
logStreamName: 'authentication',
logEvents: [{
timestamp: Date.now(),
message: JSON.stringify({
type: event.type,
userId: event.userId,
ip: event.ip,
result: event.result,
// Never log tokens, passwords, or PII
})
}]
});
};
Set up alarms for high-signal security events. Don't wait to be breached before noticing.
# Alert on root account usage
aws cloudwatch put-metric-alarm \
--alarm-name "RootAccountUsage" \
--metric-name "RootAccountUsageCount" \
--namespace "CloudTrailMetrics" \
--statistic Sum \
--period 300 \
--threshold 1 \
--comparison-operator GreaterThanOrEqualToThreshold \
--alarm-actions arn:aws:sns:us-east-1:123456789:SecurityAlerts
Your pipeline has broad production access — it's a high-value target. Treat it with the same rigor as your production environment.