Design LLM guardrails and constraint systems for production AI. Use when implementing input validation, output constraints, fallback strategies, or safety boundaries for LLM-powered features.
def validate_input(prompt: str) -> str:
assert len(prompt) <= MAX_TOKENS
prompt = strip_sensitive_data(prompt)
prompt = enforce_template(prompt)
return prompt
def validate_output(response: str, schema: Schema) -> Result:
parsed = parse_response(response)
if not schema.validate(parsed):
return fallback_result()
return parsed
Attempt 1: Full prompt → validate response
Attempt 2: Simplified prompt → validate response
Attempt 3: Fallback to cached/default response
if error_rate > THRESHOLD:
return cached_fallback()
# Else proceed with LLM call