Expert skill for CUDA Graph capture and optimization for reduced launch overhead. Capture CUDA operations into graphs, instantiate and execute graph instances, update graph node parameters, profile graph vs stream execution, design graph-friendly kernel patterns, and optimize launch latency for inference.
You are cuda-graphs - a specialized skill for CUDA Graph capture and optimization. This skill provides expert capabilities for reducing kernel launch overhead and optimizing execution patterns through graph-based workflows.
This skill enables AI-powered CUDA Graph operations including:
Capture stream operations into a graph:
#include <cuda_runtime.h>
cudaGraph_t graph;
cudaGraphExec_t graphExec;
cudaStream_t stream;
cudaStreamCreate(&stream);
// Begin stream capture
cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
// Record operations to be captured
kernel1<<<grid1, block1, 0, stream>>>(args1);
kernel2<<<grid2, block2, 0, stream>>>(args2);
kernel3<<<grid3, block3, 0, stream>>>(args3);
// End capture and create graph
cudaStreamEndCapture(stream, &graph);
// Instantiate the graph for execution
cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0);
// Execute the graph (much lower overhead than individual launches)
for (int i = 0; i < iterations; i++) {
cudaGraphLaunch(graphExec, stream);
}
cudaStreamSynchronize(stream);
// Cleanup
cudaGraphExecDestroy(graphExec);
cudaGraphDestroy(graph);
cudaStreamDestroy(stream);
Build graphs programmatically:
cudaGraph_t graph;
cudaGraphCreate(&graph, 0);
// Create kernel nodes
cudaKernelNodeParams kernelParams1 = {0};
kernelParams1.func = (void*)kernel1;
kernelParams1.gridDim = grid1;
kernelParams1.blockDim = block1;
kernelParams1.sharedMemBytes = 0;
kernelParams1.kernelParams = kernelArgs1;
cudaKernelNodeParams kernelParams2 = {0};
kernelParams2.func = (void*)kernel2;
kernelParams2.gridDim = grid2;
kernelParams2.blockDim = block2;
kernelParams2.sharedMemBytes = 0;
kernelParams2.kernelParams = kernelArgs2;
cudaGraphNode_t node1, node2;
// Add first kernel (no dependencies)
cudaGraphAddKernelNode(&node1, graph, NULL, 0, &kernelParams1);
// Add second kernel (depends on first)
cudaGraphNode_t dependencies[] = {node1};
cudaGraphAddKernelNode(&node2, graph, dependencies, 1, &kernelParams2);
// Instantiate and execute
cudaGraphExec_t graphExec;
cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0);
cudaGraphLaunch(graphExec, stream);
// Memory copy node
cudaMemcpy3DParms copyParams = {0};
// ... configure copy parameters
cudaGraphNode_t copyNode;
cudaGraphAddMemcpyNode(©Node, graph, NULL, 0, ©Params);
// Memset node
cudaMemsetParams memsetParams = {0};
memsetParams.dst = d_array;
memsetParams.value = 0;
memsetParams.pitch = 0;
memsetParams.elementSize = sizeof(int);
memsetParams.width = N;
memsetParams.height = 1;
cudaGraphNode_t memsetNode;
cudaGraphAddMemsetNode(&memsetNode, graph, NULL, 0, &memsetParams);
// Host function node
cudaHostNodeParams hostParams = {0};
hostParams.fn = hostCallback;
hostParams.userData = userData;
cudaGraphNode_t hostNode;
cudaGraphAddHostNode(&hostNode, graph, dependencies, numDeps, &hostParams);
// Event record/wait nodes (CUDA 11.1+)
cudaEvent_t event;
cudaEventCreate(&event);
cudaGraphNode_t eventRecordNode, eventWaitNode;
cudaGraphAddEventRecordNode(&eventRecordNode, graph, deps, numDeps, event);
cudaGraphAddEventWaitNode(&eventWaitNode, graph, deps, numDeps, event);
// Empty node (for dependencies only)
cudaGraphNode_t emptyNode;
cudaGraphAddEmptyNode(&emptyNode, graph, deps, numDeps);
Update graph parameters without rebuilding:
cudaGraph_t graph;
cudaGraphExec_t graphExec;
// Initial capture
cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
myKernel<<<grid, block, 0, stream>>>(d_input, d_output, N);
cudaStreamEndCapture(stream, &graph);
cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0);
// Execute initial graph
cudaGraphLaunch(graphExec, stream);
cudaStreamSynchronize(stream);
// Update the graph with new capture
cudaGraph_t newGraph;
cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
myKernel<<<grid, block, 0, stream>>>(d_input2, d_output2, N); // Different pointers
cudaStreamEndCapture(stream, &newGraph);
// Update executable graph
cudaGraphExecUpdateResult updateResult;
cudaGraphExecUpdate(graphExec, newGraph, NULL, &updateResult);
if (updateResult == cudaGraphExecUpdateSuccess) {
// Graph updated successfully
cudaGraphLaunch(graphExec, stream);
} else {
// Need to reinstantiate
cudaGraphExecDestroy(graphExec);
cudaGraphInstantiate(&graphExec, newGraph, NULL, NULL, 0);
cudaGraphLaunch(graphExec, stream);
}
cudaGraphDestroy(newGraph);
// Get kernel node from graph
cudaGraphNode_t* nodes;
size_t numNodes;
cudaGraphGetNodes(graph, NULL, &numNodes);
nodes = new cudaGraphNode_t[numNodes];
cudaGraphGetNodes(graph, nodes, &numNodes);
// Find and update kernel node
for (size_t i = 0; i < numNodes; i++) {
cudaGraphNodeType nodeType;
cudaGraphNodeGetType(nodes[i], &nodeType);
if (nodeType == cudaGraphNodeTypeKernel) {
cudaKernelNodeParams params;
cudaGraphKernelNodeGetParams(nodes[i], ¶ms);
// Update parameters
void* newArgs[] = {&newInput, &newOutput, &newN};
params.kernelParams = newArgs;
// Set new parameters
cudaGraphExecKernelNodeSetParams(graphExec, nodes[i], ¶ms);
}
}
delete[] nodes;
void benchmarkGraphVsStreams(int numKernels, int iterations) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Benchmark stream-based execution
cudaEventRecord(start);
for (int i = 0; i < iterations; i++) {
for (int k = 0; k < numKernels; k++) {
smallKernel<<<grid, block, 0, stream>>>(d_data, N);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float streamTime;
cudaEventElapsedTime(&streamTime, start, stop);
// Benchmark graph-based execution
cudaGraph_t graph;
cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
for (int k = 0; k < numKernels; k++) {
smallKernel<<<grid, block, 0, stream>>>(d_data, N);
}
cudaStreamEndCapture(stream, &graph);
cudaGraphExec_t graphExec;
cudaGraphInstantiate(&graphExec, graph, NULL, NULL, 0);
cudaEventRecord(start);
for (int i = 0; i < iterations; i++) {
cudaGraphLaunch(graphExec, stream);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float graphTime;
cudaEventElapsedTime(&graphTime, start, stop);
printf("Stream execution: %.3f ms\n", streamTime);
printf("Graph execution: %.3f ms\n", graphTime);
printf("Speedup: %.2fx\n", streamTime / graphTime);
cudaGraphExecDestroy(graphExec);
cudaGraphDestroy(graph);
}
class InferenceGraphPipeline {