Comprehensive Supabase and Postgres best practices for project setup, schema design, RLS policies, Auth integration, real-time subscriptions, Edge Functions, migrations, indexing, query optimization, client library usage, storage, environment variables, and type generation. Use this skill when writing, reviewing, or optimizing Supabase applications, Postgres queries, schema designs, database configurations, RLS policies, Edge Functions, or client-side data access code.
Comprehensive guide for building production-grade applications with Supabase and PostgreSQL. Contains rules across 13 domains, prioritized by impact, with detailed explanations, correct vs. incorrect examples, and Supabase-specific notes.
Reference these guidelines when:
# Install the Supabase CLI
npm install -g supabase
# Initialize a new Supabase project locally
supabase init
# Link to a remote Supabase project
supabase link --project-ref <project-id>
# Start local development environment
supabase start
your-project/
├── supabase/
│ ├── config.toml # Local dev configuration
│ ├── schemas/ # Declarative schema files (preferred)
│ │ ├── 001_auth.sql
│ │ ├── 002_tables.sql
│ │ └── 003_policies.sql
│ ├── migrations/ # Generated migration files
│ │ └── 20240906123045_create_profiles.sql
│ ├── functions/ # Edge Functions
│ │ ├── _shared/ # Shared utilities across functions
│ │ └── my-function/
│ │ └── index.ts
│ └── seed.sql # Seed data for local dev
├── src/
│ └── lib/
│ ├── supabase.ts # Client initialization
│ └── database.types.ts # Generated types
└── .env.local # Local environment variables
These are automatically available in both local and hosted Supabase environments. Users do not need to set them manually:
SUPABASE_URL -- Project API URLSUPABASE_ANON_KEY -- Public anonymous keySUPABASE_SERVICE_ROLE_KEY -- Service role key (server-side only, never expose to client)SUPABASE_DB_URL -- Direct database connection stringsnake_case for all identifiers (tables, columns, functions, indexes).users, orders, products).email, created_at, user_id).tbl_ or similar.For single-database systems (most Supabase projects):
-- CORRECT: Use bigint identity (SQL-standard, optimal index performance)
create table public.orders (
id bigint generated always as identity primary key,
total numeric(10,2) not null
);
For distributed systems:
-- CORRECT: Use UUIDv7 for time-ordered, non-fragmented identifiers
create extension if not exists pg_uuidv7;
create table public.events (
id uuid default uuid_generate_v7() primary key,
payload jsonb not null
);
Avoid:
-- INCORRECT: serial is outdated
create table public.bad_example (
id serial primary key -- use bigint identity instead
);
-- INCORRECT: Random UUIDv4 causes index fragmentation on large tables
create table public.bad_example_2 (
id uuid default gen_random_uuid() primary key -- use UUIDv7 instead for large tables
);
| Use Case | Recommended Type | Avoid |
|---|---|---|
| Identifiers | bigint | int (overflow at 2.1B) |
| Strings | text | varchar(255) (arbitrary limit) |
| Timestamps | timestamptz | timestamp (no timezone) |
| Booleans | boolean | varchar(5) |
| Currency | numeric(10,2) | float / double precision |
| JSON data | jsonb | json (no indexing) |
create table public.profiles (
id bigint generated always as identity primary key,
user_id uuid references auth.users (id) on delete cascade not null,
display_name text not null,
avatar_url text,
bio text,
created_at timestamptz default now() not null,
updated_at timestamptz default now() not null
);
comment on table public.profiles is 'Public user profiles linked to auth.users.';
-- Always enable RLS on every public table
alter table public.profiles enable row level security;
_id suffix: user_id, order_id.on delete cascade for references to auth.users.-- CORRECT: Foreign key with index
create table public.posts (
id bigint generated always as identity primary key,
author_id uuid references auth.users (id) on delete cascade not null,
title text not null,
content text,
created_at timestamptz default now() not null
);
create index posts_author_id_idx on public.posts (author_id);
-- Use check constraints for data validation
alter table public.products
add constraint products_price_positive check (price > 0);
-- Use unique constraints where appropriate
alter table public.profiles
add constraint profiles_user_id_unique unique (user_id);
public schema.TO clause (authenticated, anon).FOR ALL -- create separate policies for SELECT, INSERT, UPDATE, DELETE.(select ...) to enable query plan caching.PERMISSIVE policies over RESTRICTIVE (permissive policies combine with OR, restrictive with AND, making restrictive harder to reason about).| Operation | USING | WITH CHECK |
|---|---|---|
| SELECT | Required | Never |
| INSERT | Never | Required |
| UPDATE | Usually required | Required |
| DELETE | Required | Never |
-- Enable RLS
alter table public.posts enable row level security;
-- SELECT: Users can read their own posts
create policy "Users can view own posts"
on public.posts
for select
to authenticated
using ( (select auth.uid()) = author_id );
-- INSERT: Users can create posts as themselves
create policy "Users can create own posts"
on public.posts
for insert
to authenticated
with check ( (select auth.uid()) = author_id );
-- UPDATE: Users can update their own posts
create policy "Users can update own posts"
on public.posts
for update
to authenticated
using ( (select auth.uid()) = author_id )
with check ( (select auth.uid()) = author_id );
-- DELETE: Users can delete their own posts
create policy "Users can delete own posts"
on public.posts
for delete
to authenticated
using ( (select auth.uid()) = author_id );
-- For tables that should be publicly readable
create policy "Anyone can view published posts"
on public.posts
for select
to authenticated, anon
using ( published = true );
-- Access based on app_metadata (cannot be modified by users)
create policy "Users can access team data"
on public.team_data
for select
to authenticated
using (
team_id in (
select jsonb_array_elements_text(
(select auth.jwt()) -> 'app_metadata' -> 'teams'
)::uuid
)
);
create policy "Require MFA for sensitive updates"
on public.sensitive_data
as restrictive
for update
to authenticated
using (
((select auth.jwt()) ->> 'aal') = 'aal2'
);
-- INCORRECT: Function called per row (slow)
create policy "bad_policy" on public.posts
for select to authenticated
using ( auth.uid() = author_id );
-- CORRECT: Function result cached per statement (fast)
create policy "good_policy" on public.posts
for select to authenticated
using ( (select auth.uid()) = author_id );
-- CORRECT: Add index on policy-checked columns
create index posts_author_id_idx on public.posts using btree (author_id);
-- CORRECT: Avoid joins in policies; use subqueries with IN instead
create policy "Team members can view records"
on public.team_records
for select to authenticated
using (
team_id in (
select team_id from public.team_members
where user_id = (select auth.uid())
)
);
import { createClient } from '@supabase/supabase-js';
const supabase = createClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!
);
// Sign up
const { data, error } = await supabase.auth.signUp({
email: '[email protected]',
password: 'secure-password',
});
// Sign in
const { data, error } = await supabase.auth.signInWithPassword({
email: '[email protected]',
password: 'secure-password',
});
// Sign in with OAuth
const { data, error } = await supabase.auth.signInWithOAuth({
provider: 'google',
options: { redirectTo: 'https://yourapp.com/auth/callback' },
});
// Sign out
await supabase.auth.signOut();
// Get current user (server-side, validates with Supabase)
const { data: { user } } = await supabase.auth.getUser();
// Get session (client-side, reads local JWT, faster but not verified)
const { data: { session } } = await supabase.auth.getSession();
// Listen to auth state changes
supabase.auth.onAuthStateChange((event, session) => {
console.log(event, session);
});
| Table | Key Columns |
|---|---|
auth.users | id, email, phone, created_at, last_sign_in_at, raw_user_meta_data, raw_app_meta_data |
auth.sessions | id, user_id, created_at, updated_at |
auth.identities | id, user_id, provider, identity_data |
-- Create a public profiles table that mirrors auth.users
create table public.profiles (
id uuid references auth.users (id) on delete cascade primary key,
email text,
full_name text,
avatar_url text,
updated_at timestamptz default now()
);
alter table public.profiles enable row level security;
-- Auto-create profile on user signup
create or replace function public.handle_new_user()
returns trigger
language plpgsql
security definer
set search_path = ''
as $$
begin
insert into public.profiles (id, email, full_name, avatar_url)
values (
new.id,
new.email,
new.raw_user_meta_data ->> 'full_name',
new.raw_user_meta_data ->> 'avatar_url'
);
return new;
end;
$$;
create trigger on_auth_user_created
after insert on auth.users
for each row execute function public.handle_new_user();
auth.uid() -- Returns the UUID of the currently authenticated user.auth.jwt() -- Returns the full JWT payload. Use -> 'app_metadata' for authorization data (not user-editable) and -> 'user_metadata' for user-editable data.(select ...) for performance in RLS policies.auth.users directly via the API. Use a public.profiles table instead.raw_app_meta_data (not editable by users).raw_user_meta_data (editable via supabase.auth.update()).SUPABASE_SERVICE_ROLE_KEY only on the server side; never expose it to the client.supabase.auth.getUser() on the server (validates JWT with Supabase).supabase.auth.getSession() on the client only (reads local JWT, not verified).-- Enable real-time on a table (via Supabase Dashboard or SQL)
alter publication supabase_realtime add table public.messages;
// Subscribe to all changes on a table
const channel = supabase
.channel('messages-channel')
.on(
'postgres_changes',
{
event: '*', // 'INSERT' | 'UPDATE' | 'DELETE' | '*'
schema: 'public',
table: 'messages',
},
(payload) => {
console.log('Change received:', payload);
}
)
.subscribe();
// Subscribe with filters
const filteredChannel = supabase
.channel('user-messages')
.on(
'postgres_changes',
{
event: 'INSERT',
schema: 'public',
table: 'messages',
filter: `room_id=eq.${roomId}`,
},
(payload) => {
console.log('New message:', payload.new);
}
)
.subscribe();
// Unsubscribe when done
supabase.removeChannel(channel);
// Broadcast: Send ephemeral messages (not persisted)
const channel = supabase.channel('room-1');
channel.on('broadcast', { event: 'cursor-pos' }, (payload) => {
console.log('Cursor:', payload);
});
await channel.subscribe();
channel.send({ type: 'broadcast', event: 'cursor-pos', payload: { x: 100, y: 200 } });
// Presence: Track online users
const channel = supabase.channel('online-users');
channel.on('presence', { event: 'sync' }, () => {
const state = channel.presenceState();
console.log('Online users:', state);
});
await channel.subscribe(async (status) => {
if (status === 'SUBSCRIBED') {
await channel.track({ user_id: currentUser.id, online_at: new Date().toISOString() });
}
});
fetch instead of Axios).supabase/functions/_shared/ and import via relative paths. Do NOT create cross-dependencies between Edge Functions.npm:, jsr:, or node:.npm:[email protected], not npm:express.Deno.serve() instead of the deprecated serve from deno.land/std./tmp directory.EdgeRuntime.waitUntil(promise) for background tasks that should not block the response.deno.land/x, esm.sh, and unpkg.com CDNs. Prefer npm: specifiers.import { createClient } from 'npm:@supabase/supabase-js@2';
interface RequestPayload {
name: string;
}
Deno.serve(async (req: Request) => {
try {
// Create Supabase client with auth context from the request
const authHeader = req.headers.get('Authorization')!;
const supabase = createClient(
Deno.env.get('SUPABASE_URL')!,
Deno.env.get('SUPABASE_ANON_KEY')!,
{ global: { headers: { Authorization: authHeader } } }
);
// Verify the user
const { data: { user }, error: authError } = await supabase.auth.getUser();
if (authError || !user) {
return new Response(JSON.stringify({ error: 'Unauthorized' }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
});
}
const { name }: RequestPayload = await req.json();
return new Response(
JSON.stringify({ message: `Hello ${name}!`, user_id: user.id }),
{ headers: { 'Content-Type': 'application/json' } }
);
} catch (err) {
return new Response(
JSON.stringify({ error: err.message }),
{ status: 500, headers: { 'Content-Type': 'application/json' } }
);
}
});
import { Hono } from 'npm:hono@3';
const app = new Hono().basePath('/my-function');
app.get('/health', (c) => c.json({ status: 'ok' }));
app.post('/process', async (c) => {
const body = await c.req.json();
return c.json({ received: body });
});
Deno.serve(app.fetch);
# Set secrets from an env file
supabase secrets set --env-file .env.production
# Set individual secrets
supabase secrets set MY_API_KEY=sk-xxxxx
Files MUST follow the format YYYYMMDDHHmmss_short_description.sql in UTC:
20240906123045_create_profiles.sql
20240906130000_add_rls_policies.sql
# 1. Define or modify schema files in supabase/schemas/
# 2. Stop local environment before diffing
supabase stop
# 3. Generate migration by diffing
supabase db diff -f create_profiles
# 4. Review the generated migration file carefully
# 5. Restart and apply
supabase start
supabase db reset # applies all migrations from scratch locally
-- Migration: Create user profiles table
-- Affected tables: public.profiles
-- Purpose: Store public user profile data linked to auth.users
-- Create the profiles table
create table public.profiles (
id uuid references auth.users (id) on delete cascade primary key,
display_name text not null,
bio text,
created_at timestamptz default now() not null,
updated_at timestamptz default now() not null
);
comment on table public.profiles is 'Public user profiles linked to auth.users.';
-- Enable RLS (mandatory for all public tables)
alter table public.profiles enable row level security;
-- RLS policies (one per operation per role)
create policy "Profiles are viewable by everyone"
on public.profiles for select
to authenticated, anon
using (true);
create policy "Users can update own profile"
on public.profiles for update
to authenticated
using ((select auth.uid()) = id)
with check ((select auth.uid()) = id);
create policy "Users can insert own profile"
on public.profiles for insert
to authenticated
with check ((select auth.uid()) = id);
-- Index for commonly filtered columns
create index profiles_display_name_idx on public.profiles (display_name);
true).select, insert, update, delete) per role (anon, authenticated).supabase db reset.The following are NOT captured by supabase db diff and must be added as manual migration files:
INSERT, UPDATE, DELETE)ALTER POLICY statements and column privilegesALTER PUBLICATION ... ADD TABLECREATE DOMAIN statementsWHERE clauses (especially with high cardinality).JOIN conditions.ORDER BY with LIMIT.| Type | Use Case | Example |
|---|---|---|
| B-tree (default) | Equality and range queries | create index idx on orders (customer_id) |
| Hash | Equality-only queries | create index idx on users using hash (email) |
| GIN | JSONB, arrays, full-text search | create index idx on posts using gin (tags) |
| GiST | Geometric, full-text, range types | create index idx on locations using gist (geom) |
| BRIN | Large, naturally ordered tables | create index idx on logs using brin (created_at) |
Place equality columns before range columns. Respect the leftmost prefix rule.
-- CORRECT: equality column first, then range column
create index orders_status_created_idx
on public.orders (status, created_at);
-- This index supports:
-- WHERE status = 'pending'
-- WHERE status = 'pending' AND created_at > '2024-01-01'
-- But NOT:
-- WHERE created_at > '2024-01-01' (skips leading column)
Index only the rows you actually query. Produces 5-20x smaller indexes and faster writes.
-- Only index active users (most queries filter on active users)
create index users_active_email_idx
on public.users (email)
where deleted_at is null;
-- Only index pending orders
create index orders_pending_idx
on public.orders (created_at)
where status = 'pending';
-- Include columns needed by SELECT to enable index-only scans
create index orders_customer_covering_idx
on public.orders (customer_id)
include (order_date, total);
-- GIN index for containment queries (@>, ?, ?|, ?&)
create index products_metadata_idx
on public.products using gin (metadata);
-- Expression index for a specific JSONB key
create index products_category_idx
on public.products ((metadata ->> 'category'));
-- Add a tsvector column and GIN index
alter table public.articles add column fts tsvector
generated always as (to_tsvector('english', coalesce(title, '') || ' ' || coalesce(body, ''))) stored;
create index articles_fts_idx on public.articles using gin (fts);
-- Query using the index
select * from public.articles where fts @@ to_tsquery('english', 'postgres & optimization');
-- Always analyze actual execution, not just estimates
explain (analyze, buffers, format text)
select * from public.orders where customer_id = 123;
-- Look for:
-- Seq Scan -> add an index
-- Nested Loop with high row counts -> consider JOIN restructuring
-- High actual vs. estimated rows -> run ANALYZE to update statistics
-- INCORRECT: Fetching orders one user at a time in a loop
-- Application runs: SELECT * FROM orders WHERE user_id = $1 (100 times)
-- CORRECT: Batch with array parameter
select * from public.orders
where user_id = any($1::uuid[]);
-- CORRECT: Use a JOIN
select u.id, u.email, o.id as order_id, o.total
from public.users u
left join public.orders o on o.user_id = u.id
where u.id = any($1::uuid[]);
-- INCORRECT: OFFSET scans and discards rows (O(n) on page depth)
select * from public.posts
order by created_at desc
limit 20 offset 10000; -- scans 10,020 rows
-- CORRECT: Cursor-based pagination (O(1) regardless of depth)
select * from public.posts
where created_at < $1 -- cursor: last row's created_at from previous page
order by created_at desc
limit 20;
-- INCORRECT: Individual inserts in a loop
insert into public.events (name, data) values ('a', '{}');
insert into public.events (name, data) values ('b', '{}');
-- CORRECT: Batch insert
insert into public.events (name, data) values
('a', '{}'),
('b', '{}'),
('c', '{}');
-- Insert or update on conflict
insert into public.user_settings (user_id, theme, language)
values ($1, $2, $3)
on conflict (user_id)
do update set
theme = excluded.theme,
language = excluded.language;
-- Find the slowest queries
select
query,
calls,
mean_exec_time,
total_exec_time
from pg_stat_statements
order by mean_exec_time desc
limit 20;
-- Update table statistics for the query planner
analyze public.orders;
-- Check vacuum status
select
relname,
last_vacuum,
last_autovacuum,
n_dead_tup
from pg_stat_user_tables
where schemaname = 'public'
order by n_dead_tup desc;
snake_case for identifiers.as keyword.import { createClient } from '@supabase/supabase-js';
import type { Database } from './database.types'; // Generated types
// Browser / client-side (uses anon key)
export const supabase = createClient<Database>(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!
);
// Server-side (uses service role key -- never expose to client)
export const supabaseAdmin = createClient<Database>(
process.env.SUPABASE_URL!,
process.env.SUPABASE_SERVICE_ROLE_KEY!,
{ auth: { autoRefreshToken: false, persistSession: false } }
);
// SELECT with type safety
const { data: posts, error } = await supabase
.from('posts')
.select('id, title, author:profiles(display_name)')
.eq('published', true)
.order('created_at', { ascending: false })
.limit(20);
// INSERT
const { data, error } = await supabase
.from('posts')
.insert({ title: 'New Post', content: 'Hello world', author_id: user.id })
.select()
.single();
// UPDATE
const { data, error } = await supabase
.from('posts')
.update({ title: 'Updated Title' })
.eq('id', postId)
.select()
.single();
// DELETE
const { error } = await supabase
.from('posts')
.delete()
.eq('id', postId);
// UPSERT
const { data, error } = await supabase
.from('user_settings')
.upsert({ user_id: user.id, theme: 'dark' })
.select()
.single();
| Filter | Usage |
|---|---|
| Equals | .eq('col', value) |
| Not equals | .neq('col', value) |
| Greater than | .gt('col', value) |
| Greater or equal | .gte('col', value) |
| Less than | .lt('col', value) |
| Less or equal | .lte('col', value) |
| Pattern match | .ilike('col', '%search%') |
| In list | .in('col', [a, b, c]) |
| Is null | .is('col', null) |
| Contains (JSONB) | .contains('metadata', { key: 'value' }) |
| Contained by | .containedBy('tags', ['a', 'b']) |
| OR conditions | .or('status.eq.active,status.eq.pending') |
// INCORRECT: Fetching all columns
const { data } = await supabase.from('users').select('*');
// CORRECT: Fetch only what you need
const { data } = await supabase.from('users').select('id, email, display_name');
// Fetch posts with their author profiles and comment counts
const { data } = await supabase
.from('posts')
.select(`
id,
title,
created_at,
author:profiles!author_id (
display_name,
avatar_url
),
comments (
count
)
`)
.eq('published', true)
.order('created_at', { ascending: false });
const { data, error } = await supabase.rpc('search_posts', {
search_term: 'postgres',
result_limit: 10,
});
-- Via SQL (in a migration)
insert into storage.buckets (id, name, public)
values ('avatars', 'avatars', true);
insert into storage.buckets (id, name, public, file_size_limit, allowed_mime_types)
values (
'documents',
'documents',
false,
5242880, -- 5MB
array['application/pdf', 'image/png', 'image/jpeg']
);
-- Allow authenticated users to upload to their own folder
create policy "Users can upload own avatar"
on storage.objects for insert
to authenticated
with check (
bucket_id = 'avatars'
and (select auth.uid())::text = (storage.foldername(name))[1]
);
-- Allow public read access
create policy "Public avatar access"
on storage.objects for select
to authenticated, anon
using ( bucket_id = 'avatars' );
-- Allow users to update their own files
create policy "Users can update own avatar"
on storage.objects for update
to authenticated
using (
bucket_id = 'avatars'
and (select auth.uid())::text = (storage.foldername(name))[1]
)
with check (
bucket_id = 'avatars'
and (select auth.uid())::text = (storage.foldername(name))[1]
);
-- Allow users to delete their own files
create policy "Users can delete own avatar"
on storage.objects for delete
to authenticated
using (
bucket_id = 'avatars'
and (select auth.uid())::text = (storage.foldername(name))[1]
);
// Upload a file
const { data, error } = await supabase.storage
.from('avatars')
.upload(`${user.id}/avatar.png`, file, {
cacheControl: '3600',
upsert: true,
});
// Get a public URL
const { data: { publicUrl } } = supabase.storage
.from('avatars')
.getPublicUrl(`${user.id}/avatar.png`);
// Get a signed (temporary) URL for private buckets
const { data: { signedUrl }, error } = await supabase.storage
.from('documents')
.createSignedUrl(`${user.id}/report.pdf`, 3600); // 1 hour
// Delete a file
const { error } = await supabase.storage
.from('avatars')
.remove([`${user.id}/avatar.png`]);
// List files in a folder
const { data: files, error } = await supabase.storage
.from('avatars')
.list(user.id, { limit: 100, offset: 0 });
| Variable | Scope | Exposure |
|---|---|---|
NEXT_PUBLIC_SUPABASE_URL | Client + Server | Safe to expose |
NEXT_PUBLIC_SUPABASE_ANON_KEY | Client + Server | Safe to expose (RLS enforced) |
SUPABASE_SERVICE_ROLE_KEY | Server only | NEVER expose to client |
SUPABASE_DB_URL | Server only | NEVER expose to client |
SUPABASE_JWT_SECRET | Server only | NEVER expose to client |
# .env.local (for local development -- add to .gitignore)
NEXT_PUBLIC_SUPABASE_URL=http://127.0.0.1:54321
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJ...
SUPABASE_SERVICE_ROLE_KEY=eyJ...
SUPABASE_DB_URL=postgresql://postgres:[email protected]:54322/postgres
# .env.production (set via hosting provider, not committed)
NEXT_PUBLIC_SUPABASE_URL=https://xxxxx.supabase.co
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJ...
SUPABASE_SERVICE_ROLE_KEY=eyJ...
# Pre-populated (no action needed):
# SUPABASE_URL, SUPABASE_ANON_KEY, SUPABASE_SERVICE_ROLE_KEY, SUPABASE_DB_URL
# Custom secrets
supabase secrets set --env-file .env.production
supabase secrets set STRIPE_SECRET_KEY=sk_live_xxxxx
# Access in Edge Functions
const stripeKey = Deno.env.get('STRIPE_SECRET_KEY')!;
.env files containing real keys to version control..env, .env.local, .env.production to .gitignore.NEXT_PUBLIC_ prefix only for variables safe to expose to the browser.anon key is safe to expose because RLS enforces access control.service_role key bypasses RLS entirely -- use only in trusted server contexts.# Generate TypeScript types from your remote Supabase project
supabase gen types typescript --project-id <project-ref> > src/lib/database.types.ts
# Generate from local database
supabase gen types typescript --local > src/lib/database.types.ts
import type { Database } from './database.types';
// Typed client
const supabase = createClient<Database>(url, key);
// Extract specific table types
type Post = Database['public']['Tables']['posts']['Row'];
type PostInsert = Database['public']['Tables']['posts']['Insert'];
type PostUpdate = Database['public']['Tables']['posts']['Update'];
// Use in function signatures
async function createPost(post: PostInsert): Promise<Post> {
const { data, error } = await supabase
.from('posts')
.insert(post)
.select()
.single();
if (error) throw error;
return data;
}
// Type-safe RPC calls
type SearchResult = Database['public']['Functions']['search_posts']['Returns'];
// package.json script
{
"scripts": {
"db:types": "supabase gen types typescript --local > src/lib/database.types.ts",
"db:types:remote": "supabase gen types typescript --project-id $PROJECT_REF > src/lib/database.types.ts"
}
}
Row, Insert, and Update variants appropriately (Insert/Update have optional fields for columns with defaults).Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load.
Use the Supavisor pooler URL (provided in Supabase Dashboard) for all application connections:
# Direct connection (for migrations and CLI only)