Density-Driven Optimal Control (D2OC) for stochastic LTI multi-agent systems. Addresses decentralized non-uniform area coverage using density functions and convergence guarantees. Activation: density-driven control, multi-agent coverage, decentralized control, area coverage.
This paper addresses the decentralized non-uniform area coverage problem for multi-agent systems, a critical task in missions with high spatial priority and resource constraints. While existing density-based methods often rely on computationally heavy Eulerian PDE solvers or heuristic planning, this approach provides convergence guarantees for stochastic LTI multi-agent systems.
import numpy as np
from typing import Callable, Tuple, List
from scipy.integrate import odeint
class DensityDrivenController:
def __init__(self,
A: np.ndarray,
B: np.ndarray,
density_fn: Callable[[np.ndarray], float],
num_agents: int,
gamma: float = 0.1):
self.A = A
self.B = B
self.density_fn = density_fn
self.n = num_agents
self.gamma = gamma
self.state_dim = A.shape[0]
self.input_dim = B.shape[1]
def compute_control(self, states: np.ndarray, time: float) -> np.ndarray:
n_agents = states.shape[0] // self.state_dim
controls = []
for i in range(n_agents):
xi = states[i*self.state_dim:(i+1)*self.state_dim]
# Compute density gradient at agent position
grad_rho = self._compute_density_gradient(xi)
# Control law: move toward high density regions
ui = -self.gamma * grad_rho + self._agent_interaction(i, states)
controls.append(ui)
return np.concatenate(controls)
def _compute_density_gradient(self, x: np.ndarray) -> np.ndarray:
eps = 1e-6
grad = np.zeros_like(x)
for i in range(len(x)):
x_plus = x.copy()
x_plus[i] += eps
x_minus = x.copy()
x_minus[i] -= eps
grad[i] = (self.density_fn(x_plus) - self.density_fn(x_minus)) / (2*eps)
return grad
def _agent_interaction(self, agent_id: int, states: np.ndarray) -> np.ndarray:
# Repulsion term to avoid collision
repulsion = np.zeros(self.input_dim)
xi = states[agent_id*self.state_dim:(agent_id+1)*self.state_dim]
for j in range(self.n):
if j != agent_id:
xj = states[j*self.state_dim:(j+1)*self.state_dim]
diff = xi - xj
dist = np.linalg.norm(diff)
if dist < 1.0 and dist > 0:
repulsion += diff / (dist**3)
return repulsion
def simulate(self, x0: np.ndarray, t_span: np.ndarray) -> np.ndarray:
def dynamics(x, t):
u = self.compute_control(x, t)
n_agents = x.shape[0] // self.state_dim
dxdt = []
for i in range(n_agents):
xi = x[i*self.state_dim:(i+1)*self.state_dim]
ui = u[i*self.input_dim:(i+1)*self.input_dim]
dxi = self.A @ xi + self.B @ ui
dxdt.append(dxi)
return np.concatenate(dxdt)
return odeint(dynamics, x0, t_span)
def gaussian_density(center: np.ndarray, sigma: float):
def density(x: np.ndarray) -> float:
return np.exp(-np.sum((x - center)**2) / (2 * sigma**2))
return density
# Example usage
if __name__ == "__main__":
# 2D double integrator
A = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]])
B = np.array([[0, 0],
[0, 0],
[1, 0],
[0, 1]])
# Density function: Gaussian at origin
rho = gaussian_density(np.array([0, 0]), sigma=2.0)
# Create controller
controller = DensityDrivenController(A, B, rho, num_agents=5)
# Initial conditions
x0 = np.random.randn(20) * 5 # 5 agents, 4 states each
# Simulate
t = np.linspace(0, 10, 100)
trajectory = controller.simulate(x0, t)