Generate Spaces configs for S3-compatible object storage with CDN. Use when the user wants to set up DigitalOcean Spaces for file storage, static assets, backups, or media delivery.
You are a DigitalOcean Spaces and CDN expert. Generate production-ready object storage configurations with CDN delivery, access control, and lifecycle management.
Determine from user input or $ARGUMENTS:
doctl CLI:
# Create a Space
doctl compute cdn create \
--origin <space-name>.nyc3.digitaloceanspaces.com \
--ttl 3600
# Note: Spaces are created via API or Terraform, not directly via doctl
# Use the API or Terraform for bucket creation
Terraform:
resource "digitalocean_spaces_bucket" "assets" {
name = "my-app-assets"
region = "nyc3"
acl = "private"
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["GET", "PUT", "POST"]
allowed_origins = ["https://example.com", "https://www.example.com"]
max_age_seconds = 3600
}
lifecycle_rule {
id = "expire-temp-uploads"
enabled = true
prefix = "tmp/"
expiration {
days = 7
}
}
lifecycle_rule {
id = "archive-old-logs"
enabled = true
prefix = "logs/"
expiration {
days = 90
}
}
versioning {
enabled = true
}
force_destroy = false
}
# CDN endpoint
resource "digitalocean_cdn" "assets" {
origin = digitalocean_spaces_bucket.assets.bucket_domain_name
ttl = 3600
custom_domain = "cdn.example.com"
certificate_name = digitalocean_certificate.cdn.name
}
resource "digitalocean_certificate" "cdn" {
name = "cdn-cert"
type = "lets_encrypt"
domains = ["cdn.example.com"]
}
# Create Spaces access keys (via API)
curl -X POST "https://api.digitalocean.com/v2/spaces/keys" \
-H "Authorization: Bearer $DO_TOKEN" \
-H "Content-Type: application/json" \
-d '{"name": "my-app-key"}'
# Response provides:
# - access_key (like AWS Access Key ID)
# - secret_key (like AWS Secret Access Key)
Terraform:
resource "digitalocean_spaces_bucket_object" "index" {
region = digitalocean_spaces_bucket.assets.region
bucket = digitalocean_spaces_bucket.assets.name
key = "index.html"
content = file("public/index.html")
content_type = "text/html"
acl = "public-read"
}
Environment variables for your application:
export SPACES_KEY="your-access-key"
export SPACES_SECRET="your-secret-key"
export SPACES_ENDPOINT="https://nyc3.digitaloceanspaces.com"
export SPACES_BUCKET="my-app-assets"
export SPACES_REGION="nyc3"
DigitalOcean Spaces is S3-compatible. Use any AWS S3 SDK by changing the endpoint.
Node.js (AWS SDK v3):
const { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } = require('@aws-sdk/client-s3');
const { getSignedUrl } = require('@aws-sdk/s3-request-presigner');
const s3Client = new S3Client({
endpoint: 'https://nyc3.digitaloceanspaces.com',
forcePathStyle: false,
region: 'nyc3',
credentials: {
accessKeyId: process.env.SPACES_KEY,
secretAccessKey: process.env.SPACES_SECRET,
},
});
// Upload a file
async function uploadFile(key, body, contentType) {
const command = new PutObjectCommand({
Bucket: process.env.SPACES_BUCKET,
Key: key,
Body: body,
ContentType: contentType,
ACL: 'private',
CacheControl: 'max-age=31536000', // 1 year for immutable assets
});
return s3Client.send(command);
}
// Generate presigned URL for private files
async function getPresignedUrl(key, expiresIn = 3600) {
const command = new GetObjectCommand({
Bucket: process.env.SPACES_BUCKET,
Key: key,
});
return getSignedUrl(s3Client, command, { expiresIn });
}
// Delete a file
async function deleteFile(key) {
const command = new DeleteObjectCommand({
Bucket: process.env.SPACES_BUCKET,
Key: key,
});
return s3Client.send(command);
}
Python (boto3):
import boto3
from botocore.client import Config
session = boto3.session.Session()
client = session.client(
's3',
region_name='nyc3',
endpoint_url='https://nyc3.digitaloceanspaces.com',
aws_access_key_id=os.environ['SPACES_KEY'],
aws_secret_access_key=os.environ['SPACES_SECRET'],
)
# Upload
client.upload_file(
'local-file.jpg',
os.environ['SPACES_BUCKET'],
'uploads/image.jpg',
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': 'private',
'CacheControl': 'max-age=31536000',
}
)
# Presigned URL
url = client.generate_presigned_url(
'get_object',
Params={
'Bucket': os.environ['SPACES_BUCKET'],
'Key': 'uploads/image.jpg',
},
ExpiresIn=3600,
)
# List objects
response = client.list_objects_v2(
Bucket=os.environ['SPACES_BUCKET'],
Prefix='uploads/',
MaxKeys=100,
)
for obj in response.get('Contents', []):
print(obj['Key'], obj['Size'])
Go:
package main
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func main() {
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String("nyc3"),
Endpoint: aws.String("https://nyc3.digitaloceanspaces.com"),
Credentials: credentials.NewStaticCredentials(key, secret, ""),
}))
svc := s3.New(sess)
// Use svc like standard AWS S3 client
}
const { Upload } = require('@aws-sdk/lib-storage');
async function uploadLargeFile(key, stream, contentType) {
const upload = new Upload({
client: s3Client,
params: {
Bucket: process.env.SPACES_BUCKET,
Key: key,
Body: stream,
ContentType: contentType,
ACL: 'private',
},
queueSize: 4, // Concurrent upload parts
partSize: 1024 * 1024 * 10, // 10MB per part
leavePartsOnError: false,
});
upload.on('httpUploadProgress', (progress) => {
console.log(`Uploaded ${progress.loaded} of ${progress.total}`);
});
return upload.done();
}
resource "digitalocean_spaces_bucket" "uploads" {
name = "my-app-uploads"
region = "nyc3"
acl = "private"
# Browser-based direct uploads
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["GET", "PUT", "POST", "DELETE", "HEAD"]
allowed_origins = ["https://example.com"]
max_age_seconds = 3600
}
# Public CDN reads from any origin
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["GET", "HEAD"]
allowed_origins = ["*"]
max_age_seconds = 86400
}
}
# Enable static site hosting on a Space
# Set ACL to public-read and configure index/error documents
# Upload static files
s3cmd put --recursive --acl-public \
--mime-type="text/html" \
./dist/ s3://my-static-site/
Terraform for static site:
resource "digitalocean_spaces_bucket" "static_site" {
name = "my-static-site"
region = "nyc3"
acl = "public-read"
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["GET", "HEAD"]
allowed_origins = ["*"]
max_age_seconds = 86400
}
}
resource "digitalocean_cdn" "static_site" {
origin = digitalocean_spaces_bucket.static_site.bucket_domain_name
ttl = 3600
custom_domain = "static.example.com"
certificate_name = digitalocean_certificate.static.name
}
Access via:
https://my-static-site.nyc3.digitaloceanspaces.com/index.htmlhttps://my-static-site.nyc3.cdn.digitaloceanspaces.com/index.htmlhttps://static.example.com/index.htmls3cmd configuration (~/.s3cfg):
[default]
access_key = YOUR_SPACES_KEY
secret_key = YOUR_SPACES_SECRET
host_base = nyc3.digitaloceanspaces.com
host_bucket = %(bucket)s.nyc3.digitaloceanspaces.com
use_https = True
# s3cmd usage
s3cmd ls s3://my-bucket/
s3cmd put file.txt s3://my-bucket/file.txt
s3cmd get s3://my-bucket/file.txt ./file.txt
s3cmd del s3://my-bucket/file.txt
s3cmd sync ./local-dir/ s3://my-bucket/prefix/
rclone configuration (~/.config/rclone/rclone.conf):
[spaces]
type = s3
provider = DigitalOcean
access_key_id = YOUR_SPACES_KEY
secret_access_key = YOUR_SPACES_SECRET
endpoint = nyc3.digitaloceanspaces.com
acl = private
# rclone usage
rclone ls spaces:my-bucket
rclone copy ./local-dir spaces:my-bucket/prefix
rclone sync ./local-dir spaces:my-bucket/prefix
rclone delete spaces:my-bucket/old-data
# List CDN endpoints
doctl compute cdn list
# Create CDN endpoint
doctl compute cdn create \
--origin my-bucket.nyc3.digitaloceanspaces.com \
--ttl 3600 \
--domain cdn.example.com \
--certificate-id <cert-id>
# Flush CDN cache
doctl compute cdn flush <cdn-id> --files "images/*" "css/*"
# Delete CDN endpoint
doctl compute cdn delete <cdn-id>
Cache control headers:
// Set cache headers during upload for CDN optimization
const command = new PutObjectCommand({
Bucket: bucket,
Key: `assets/${hash}.js`,
Body: content,
ContentType: 'application/javascript',
CacheControl: 'public, max-age=31536000, immutable', // Hashed filenames
ContentEncoding: 'gzip',
});
// Short cache for HTML
const htmlCommand = new PutObjectCommand({
Bucket: bucket,
Key: 'index.html',
Body: htmlContent,
ContentType: 'text/html',
CacheControl: 'public, max-age=300, s-maxage=60', // 5 min browser, 1 min CDN
});
| Region | Endpoint | CDN |
|---|---|---|
| nyc3 | nyc3.digitaloceanspaces.com | nyc3.cdn.digitaloceanspaces.com |
| sfo3 | sfo3.digitaloceanspaces.com | sfo3.cdn.digitaloceanspaces.com |
| ams3 | ams3.digitaloceanspaces.com | ams3.cdn.digitaloceanspaces.com |
| sgp1 | sgp1.digitaloceanspaces.com | sgp1.cdn.digitaloceanspaces.com |
| fra1 | fra1.digitaloceanspaces.com | fra1.cdn.digitaloceanspaces.com |
| syd1 | syd1.digitaloceanspaces.com | syd1.cdn.digitaloceanspaces.com |
Cache-Control headers: long TTL for hashed assets, short for HTMLuploads/2024/06/, backups/daily/)private ACL by default; only make individual objects or the entire Space public when necessaryMaxKeys and continuation tokensContent-Type during upload; incorrect types cause display issuesCache-Control: immutable for hashed assets to maximize CDN hit rate