Guide for using typed-ffmpeg, a modern Python FFmpeg wrapper with extensive typing support and comprehensive filter support. Use this when working with FFmpeg operations, video/audio processing, or filter graphs in Python.
This skill provides comprehensive guidance for using the typed-ffmpeg package, a modern Python FFmpeg wrapper that emphasizes type safety, IDE integration, and comprehensive filter support.
typed-ffmpeg is a zero-dependency Python library (pure standard library) that provides:
# Basic installation
pip install typed-ffmpeg
# With graph visualization support
pip install 'typed-ffmpeg[graph]'
# For compatibility with ffmpeg-python
pip install typed-ffmpeg-compatible
Note: FFmpeg must be installed on your system.
import ffmpeg
# Simple transcoding
stream = ffmpeg.input("input.mp4")
stream = stream.output("output.mp4")
stream.run()
# Chain operations
ffmpeg.input("input.mp4").output("output.mp4").run()
# Input with options
stream = ffmpeg.input(
"input.mp4",
ss="00:00:10", # Start at 10 seconds
t="00:00:30", # Duration of 30 seconds
r=30, # Frame rate
s="1920x1080" # Resolution
)
# Format-specific options
stream = ffmpeg.input(
"input.mp4",
f="mp4", # Force format
codec="h264", # Codec selection
hwaccel="cuda" # Hardware acceleration
)
# Output with encoding options
stream = (
ffmpeg
.input("input.mp4")
.output(
"output.mp4",
vcodec="libx264", # Video codec
acodec="aac", # Audio codec
video_bitrate="2M", # Video bitrate
audio_bitrate="192k", # Audio bitrate
preset="fast", # Encoding preset
crf=23 # Quality (lower = better)
)
)
# Select specific streams
video = ffmpeg.input("input.mp4").video # Video stream
audio = ffmpeg.input("input.mp4").audio # Audio stream
# Select by index
stream = ffmpeg.input("input.mp4")[0] # First stream
video = ffmpeg.input("input.mp4").video(0) # First video stream
audio = ffmpeg.input("input.mp4").audio(0) # First audio stream
import ffmpeg
# Apply video filters
stream = (
ffmpeg
.input("input.mp4")
.hflip() # Horizontal flip
.vflip() # Vertical flip
.scale(width=1280, height=720) # Scale video
.output("output.mp4")
)
# Audio filters
stream = (
ffmpeg
.input("input.mp3")
.audio
.volume(volume=2.0) # Increase volume
.output("output.mp3")
)
import ffmpeg
import ffmpeg.filters
# Multiple inputs
in1 = ffmpeg.input("video1.mp4")
in2 = ffmpeg.input("video2.mp4")
# Concatenate videos
output = (
ffmpeg.filters
.concat(in1, in2, n=2, v=1, a=0)
.video(0)
.output("output.mp4")
)
# Overlay example
main = ffmpeg.input("main.mp4")
overlay = ffmpeg.input("overlay.png")
output = (
main
.video
.overlay(
overlay.hflip(),
x=10,
y=10
)
.output("output.mp4")
)
import ffmpeg
# Use expressions for dynamic values
stream = (
ffmpeg
.input("input.mp4")
.drawtext(
text="Time: %{pts\\:hms}",
x="(w-text_w)/2", # Center horizontally
y="h-th-10", # Bottom with padding
fontsize=24,
fontcolor="white"
)
.output("output.mp4")
)
import ffmpeg
# Get media information
info = ffmpeg.probe("video.mp4")
# Access format information
duration = float(info['format']['duration'])
bitrate = int(info['format']['bit_rate'])
format_name = info['format']['format_name']
# Access stream information
for stream in info['streams']:
codec_type = stream['codec_type'] # 'video', 'audio', etc.
codec_name = stream['codec_name']
if codec_type == 'video':
width = stream['width']
height = stream['height']
fps = eval(stream['r_frame_rate']) # e.g., "30/1"
import ffmpeg
# Get structured probe object
probe_result = ffmpeg.probe_obj("video.mp4")
# Access with typed attributes
format_info = probe_result.format
print(f"Duration: {format_info.duration}s")
print(f"Size: {format_info.size} bytes")
# Iterate streams
for stream in probe_result.streams:
if stream.codec_type == 'video':
print(f"Video: {stream.width}x{stream.height} @ {stream.r_frame_rate}")
elif stream.codec_type == 'audio':
print(f"Audio: {stream.sample_rate}Hz, {stream.channels} channels")
import ffmpeg
# Build filter graph
stream = (
ffmpeg
.input("input.mp4")
.hflip()
.scale(width=1280, height=720)
.output("output.mp4")
)
# Compile to FFmpeg command (without executing)
cmd = ffmpeg.compile(stream)
print(" ".join(cmd))
# Output: ['ffmpeg', '-i', 'input.mp4', '-filter_complex', '...', 'output.mp4']
# Execute the command
stream.run()
# Execute with custom options
stream.run(overwrite_output=True, capture_stdout=True)
import ffmpeg
# Create filter graph
stream = (
ffmpeg
.input("input.mp4")
.hflip()
.scale(width=1280, height=720)
.output("output.mp4")
)
# Visualize in Jupyter/IPython (returns IPython display object)
stream # Just return the stream object
# Get Graphviz source
graph = stream.view() # Returns Graphviz object
# Save visualization
graph.render("filter_graph", format="png")
import ffmpeg
# Multiple inputs
video = ffmpeg.input("video.mp4")
audio = ffmpeg.input("audio.mp3")
# Combine video and audio
output = (
ffmpeg
.output(
video.video,
audio.audio,
filename="output.mp4",
vcodec="copy",
acodec="copy"
)
)
# Multiple outputs from one input
input_stream = ffmpeg.input("input.mp4")
output1 = input_stream.output("output1.mp4", vcodec="libx264")
output2 = input_stream.output("output2.webm", vcodec="libvpx")
# Merge outputs
merged = ffmpeg.merge_outputs(output1, output2)
merged.run()
import ffmpeg
# Create reusable filter chains
def watermark_filter(stream, watermark_path):
"""Reusable watermark filter."""
watermark = ffmpeg.input(watermark_path)
return stream.overlay(watermark, x=10, y=10)
# Apply to different videos
video1 = ffmpeg.input("video1.mp4")
video2 = ffmpeg.input("video2.mp4")
output1 = watermark_filter(video1, "logo.png").output("out1.mp4")
output2 = watermark_filter(video2, "logo.png").output("out2.mp4")
import ffmpeg
from ffmpeg import VideoStream, AudioStream, AVStream
# Functions with type hints work seamlessly
def process_video(input_path: str) -> VideoStream:
"""Process video with type safety."""
stream: AVStream = ffmpeg.input(input_path)
video: VideoStream = stream.video
return video.hflip().scale(width=1280, height=720)
# IDE will provide auto-completion for filter methods
result = process_video("input.mp4")
# Most filter parameters have type hints and documentation
stream = (
ffmpeg
.input("input.mp4")
.scale(
width=1920, # int | str | Expression
height=1080, # int | str | Expression
flags="bilinear", # str (scaling algorithm)
force_original_aspect_ratio="disable" # Literal type
)
)
# IDE will show:
# - Parameter names and types
# - Documentation from FFmpeg
# - Valid values for enums/literals
import ffmpeg
from ffmpeg import FFMpegExecuteError, FFMpegTypeError, FFMpegValueError