Use this skill when writing BoxLang asynchronous code: BoxFuture, futureNew, asyncRun, asyncAll, asyncAny, asyncAllApply, executors, schedulers, thread components, parallel pipelines, file watchers, or distributed locking with bx:lock.
BoxLang provides a comprehensive async framework built on Java's CompletableFuture
and Project Loom virtual threads. The AsyncService manages executors, schedulers,
and futures. All async primitives integrate seamlessly with the BoxLang runtime.
BoxFuture extends CompletableFuture with BoxLang-friendly chaining.
futureNew() — Primary BIF (v1.4.0+)// Create a future from a function (runs asynchronously)
var future = futureNew( () => fetchDataFromAPI() )
// Create a completed future with a value
var future = futureNew( "Hello World" )
// Create an empty future (complete later)
var future = futureNew()
// Create with a specific executor
var future = futureNew( () => heavyCalculation(), "cpu-tasks" )
asyncRun()// Run a function asynchronously on the default io-tasks executor
var future = asyncRun( () => fetchDataFromAPI() )
// Run with specific executor
var future = asyncRun( () => processCPUWork(), "cpu-tasks" )
// Get the result (blocks until complete)
var result = future.get()
// With timeout
var result = future.get( 5, "seconds" )
then, thenAsync, and onErrorthen() — runs the transformation on the same thread (fast, lightweight transforms)thenAsync() — runs the transformation on an executor thread (for heavy/I/O work)asyncRun( () => fetchUser( userId ) )
.then( (user) => enrichWithProfile( user ) ) // same thread
.thenAsync( (user) => sendWelcomeEmail( user ) ) // executor thread
.then( (user) => user )
.onError( (error) => {
logError( error.message )
return getDefaultUser()
})
.get()
// Blocking retrieval
var result = future.get()
var result = future.get( 5000 ) // timeout in ms
var result = future.get( 5, "seconds" ) // timeout with unit
// Safe retrieval with defaults
var result = future.getOrDefault( "fallback" )
var result = future.joinOrDefault( 0 ) // join() variant with default
// Get as Attempt object (functional error handling)
var attempt = future.getAsAttempt()
if ( attempt.isPresent() ) {
doSomething( attempt.get() )
}
asyncAllasyncAll() runs multiple operations concurrently and returns a BoxFuture<Array> —
call .get() once to receive all results in order:
// Returns BoxFuture<Array> — .get() resolves to [result1, result2, result3]
var results = asyncAll([
() => fetchOrders( userId ),
() => fetchProfile( userId ),
() => fetchPreferences( userId )
]).get()
var [orders, profile, prefs] = results
// Mix lambdas, closures, and pre-created futures
var results = asyncAll([
() => fetchOrders( userId ), // function
futureNew( () => fetchProfile( id ) ), // pre-created future
() => fetchPreferences( userId ) // function
]).get()
asyncAnyasyncAny() returns the result of whichever future completes first (v1.4.0+):
var fastestResult = asyncAny([
() => fetchFromPrimaryDB(),
() => fetchFromReplicaDB(),
() => fetchFromCache()
]).get()
asyncAllApplyApply a function to every element of an array or struct in parallel (v1.4.0+):
// Array processing in parallel
var userIds = [ 1, 2, 3, 4, 5 ]
var profiles = asyncAllApply(
userIds,
( id ) => fetchUserProfile( id ) // each item processed in parallel
)
// profiles = [ profile1, profile2, profile3, profile4, profile5 ]
// Struct processing in parallel
var config = { db: "prod-db", cache: "redis", queue: "rabbit" }
var validated = asyncAllApply(
config,
( item ) => validateConfig( item.key, item.value ) // item = { key, value }
)
// Three pre-configured runtime executors:
// "io-tasks" — virtual threads (default, best for I/O-bound work)
// "cpu-tasks" — scheduled pool, 20 threads (best for CPU-bound work)
// "scheduled-tasks" — scheduled pool, 20 threads (for cron/periodic tasks)
// Pass executor name as second arg to asyncRun / futureNew
asyncRun( () => cpuIntensiveWork(), "cpu-tasks" )
futureNew( () => fetchData(), "io-tasks" )
// Access an executor by name
var executor = executorGet( "io-tasks" )
// Sequential pipeline
var result = futureNew( () => loadRawData() )
.then( (data) => parseData( data ) )
.then( (parsed) => validate( parsed ) )
.then( (valid) => persist( valid ) )
.onError( (e) => rollback() )
.get()
// Mix sequential and parallel stages
var pipeline = asyncRun( () => fetchConfig() )
.then( (config) => {
// Fan out: run two tasks in parallel with the config
var results = asyncAll([
() => buildReport( config ),
() => sendNotifications( config )
]).get()
return results
})
.get()
Create a BoxLang Scheduler class (Scheduler.bx) and register it in boxlang.json:
// schedulers/MyScheduler.bx
class {
property name="scheduler"
property name="logger"
function configure() {
scheduler.setSchedulerName( "MyApp-Scheduler" )
scheduler.setTimezone( "UTC" )
// Register tasks with fluent DSL
scheduler.task( "cleanupExpiredSessions" )
.call( () => sessionService.cleanup() )
.every( 15, "minutes" )
.onFailure( (task, error) => logError( error.message ) )
scheduler.task( "dailyReport" )
.call( () => reportService.generate() )
.every( 1, "day" )
.startOn( "00:00" )
}
void function onStartup() {
logger.info( "Scheduler started: #scheduler.getSchedulerName()#" )
}
void function onShutdown() {
logger.info( "Scheduler shutting down" )
}
void function onAnyTaskError( required task, required exception ) {
logger.error( "Task '#task.getName()#' failed: #exception.message#" )
}
}
Register in boxlang.json:
{
"scheduler": {
"schedulers": [ "/path/to/schedulers/MyScheduler.bx" ]
}
}
Or run from CLI:
boxlang schedule /path/to/schedulers/MyScheduler.bx
thread ComponentFor explicit thread management:
// Start a named thread
thread name="backgroundWorker" action="run" {
// Code runs in a new thread
processLargeDataset( datasetId )
}
// Start multiple threads
thread name="worker1" action="run" {
processChunk( chunk1 )
}
thread name="worker2" action="run" {
processChunk( chunk2 )
}
// Wait for threads to finish
thread action="join" name="worker1,worker2" timeout=30000
// Access thread results
var result1 = cfthread.worker1.result
var result2 = cfthread.worker2.result
bx:lockbx:lock prevents race conditions and supports distributed locking:
// Exclusive lock (one thread at a time)