Skip to content

EU AI Act Compliance

Implement EU AI Act requirements for AI system transparency, oversight, and risk management.

Overview

The EU AI Act establishes a comprehensive regulatory framework for AI systems based on risk levels. GateFlow provides tools to help organizations comply with these requirements.

Risk Categories

Risk Levels

LevelDescriptionExamplesRequirements
UnacceptableProhibited AI practicesSocial scoring, real-time biometric IDBanned
High RiskSignificant impact on rightsEmployment, credit, healthcareFull compliance
Limited RiskTransparency concernsChatbots, emotion recognitionDisclosure required
Minimal RiskLow impactSpam filters, recommendationsVoluntary codes

Classify Your AI System

python
from openai import OpenAI

client = OpenAI(
    base_url="https://api.gateflow.ai/v1",
    api_key="gw_prod_..."
)

# Assess AI system risk level
assessment = client.post(
    "/compliance/eu-ai-act/risk-assessment",
    json={
        "system_name": "HR Screening Assistant",
        "description": "AI system that helps screen job applications",
        "use_cases": [
            "Resume parsing and extraction",
            "Candidate ranking based on qualifications",
            "Interview scheduling recommendations"
        ],
        "data_processed": [
            "resumes",
            "cover_letters",
            "interview_notes"
        ],
        "decision_type": "employment_decisions",
        "human_oversight": "human_in_the_loop",
        "affected_persons": "job_applicants"
    }
)

print(f"Risk Level: {assessment['risk_level']}")
print(f"Classification: {assessment['classification']}")
print(f"\nRequired Measures:")
for measure in assessment["required_measures"]:
    print(f"  - {measure}")

High-Risk AI Requirements

Article 9: Risk Management System

python
# Configure risk management for high-risk AI
risk_config = client.post(
    "/compliance/eu-ai-act/risk-management",
    json={
        "system_id": "hr_screening_v1",
        "risk_assessment": {
            "identified_risks": [
                {
                    "risk": "discrimination_bias",
                    "likelihood": "medium",
                    "impact": "high",
                    "mitigation": "Regular bias audits and diverse training data"
                },
                {
                    "risk": "privacy_violation",
                    "likelihood": "low",
                    "impact": "high",
                    "mitigation": "PII detection and minimization"
                }
            ],
            "residual_risks": [
                "Potential for indirect discrimination"
            ],
            "monitoring_frequency": "monthly"
        },
        "testing_requirements": {
            "bias_testing": True,
            "accuracy_benchmarks": True,
            "adversarial_testing": True
        }
    }
)

Article 10: Data Governance

python
# Configure data governance for training data
governance = client.post(
    "/compliance/eu-ai-act/data-governance",
    json={
        "system_id": "hr_screening_v1",
        "training_data": {
            "sources": [
                {
                    "name": "historical_applications",
                    "size": "50000 records",
                    "date_range": "2020-2025",
                    "verified_quality": True
                }
            ],
            "quality_measures": [
                "duplicate_removal",
                "bias_analysis",
                "representativeness_check"
            ],
            "gaps_identified": [
                "Underrepresentation of candidates over 55"
            ],
            "remediation": "Synthetic data augmentation and weighting"
        },
        "validation_data": {
            "separate_from_training": True,
            "size": "10000 records"
        }
    }
)

Article 13: Transparency and Information

python
# Generate transparency documentation
transparency = client.post(
    "/compliance/eu-ai-act/transparency",
    json={
        "system_id": "hr_screening_v1",
        "documentation": {
            "intended_purpose": "Assist HR in screening job applications",
            "capabilities": [
                "Extract information from resumes",
                "Score candidates against job requirements",
                "Identify potential matches"
            ],
            "limitations": [
                "Cannot assess soft skills",
                "May not recognize non-traditional career paths",
                "Requires human review of all decisions"
            ],
            "accuracy_metrics": {
                "precision": 0.89,
                "recall": 0.92,
                "bias_score": 0.03
            },
            "human_oversight_measures": [
                "All rankings reviewed by HR",
                "Override capability for all decisions",
                "Audit trail for accountability"
            ]
        },
        "user_instructions": {
            "format": "pdf",
            "language": ["en", "de", "fr"],
            "include_examples": True
        }
    }
)

print(f"Documentation ID: {transparency['doc_id']}")
print(f"Download: {transparency['download_url']}")

Article 14: Human Oversight

python
# Configure human oversight requirements
oversight = client.post(
    "/compliance/eu-ai-act/human-oversight",
    json={
        "system_id": "hr_screening_v1",
        "oversight_type": "human_in_the_loop",
        "requirements": {
            "review_all_decisions": True,
            "override_capability": True,
            "explanation_required": True,
            "escalation_path": "hr_manager"
        },
        "controls": {
            "stop_button": {
                "enabled": True,
                "authorized_users": ["hr_admins"]
            },
            "intervention_threshold": {
                "confidence_below": 0.7,
                "action": "require_human_review"
            },
            "audit_sampling": {
                "rate": 0.1,  # 10% random audit
                "include_overrides": True
            }
        }
    }
)

Implementing Human Oversight

Decision Review Interface

python
# Get pending decisions for human review
pending = client.get(
    "/compliance/eu-ai-act/pending-reviews",
    params={
        "system_id": "hr_screening_v1",
        "status": "awaiting_review"
    }
)

for decision in pending["decisions"]:
    print(f"\nDecision ID: {decision['id']}")
    print(f"Candidate: {decision['subject_id']}")
    print(f"AI Recommendation: {decision['recommendation']}")
    print(f"Confidence: {decision['confidence']}")
    print(f"Factors:")
    for factor in decision["factors"]:
        print(f"  - {factor['name']}: {factor['contribution']}")

Record Human Decision

python
# Record human oversight decision
review = client.post(
    "/compliance/eu-ai-act/record-review",
    json={
        "decision_id": "dec_abc123",
        "reviewer": "hr_manager@company.com",
        "action": "override",  # or "approve", "escalate"
        "human_decision": "proceed_to_interview",
        "reasoning": "Candidate has relevant experience not captured in parsing",
        "ai_recommendation_accurate": False,
        "feedback": {
            "issue_type": "missed_qualification",
            "details": "Non-traditional background in relevant field"
        }
    }
)

Override Tracking

python
# Analyze override patterns
analysis = client.get(
    "/compliance/eu-ai-act/override-analysis",
    params={
        "system_id": "hr_screening_v1",
        "period": "90d"
    }
)

print("Override Analysis:")
print(f"Total decisions: {analysis['total_decisions']}")
print(f"Overrides: {analysis['override_count']} ({analysis['override_rate']}%)")
print(f"\nOverride reasons:")
for reason, count in analysis["override_reasons"].items():
    print(f"  {reason}: {count}")

Transparency Disclosures

AI Interaction Disclosure

python
# Configure automatic AI disclosure
disclosure = client.post(
    "/compliance/eu-ai-act/disclosure-config",
    json={
        "system_id": "customer_chatbot",
        "disclosure_type": "ai_interaction",
        "message": "You are interacting with an AI assistant. A human agent is available upon request.",
        "display": {
            "position": "conversation_start",
            "repeat_every_n_messages": 10
        },
        "human_handoff": {
            "enabled": True,
            "trigger_phrases": ["speak to human", "real person"],
            "button_visible": True
        }
    }
)

Emotion Recognition Disclosure

python
# Configure emotion recognition disclosure
disclosure = client.post(
    "/compliance/eu-ai-act/disclosure-config",
    json={
        "system_id": "sentiment_analyzer",
        "disclosure_type": "emotion_recognition",
        "message": "This system analyzes emotional content in communications for quality assurance.",
        "consent_required": True,
        "opt_out_available": True
    }
)

Documentation Requirements

Technical Documentation

python
# Generate Article 11 technical documentation
tech_docs = client.post(
    "/compliance/eu-ai-act/technical-documentation",
    json={
        "system_id": "hr_screening_v1",
        "sections": [
            {
                "section": "general_description",
                "content": {
                    "intended_purpose": "...",
                    "system_architecture": "...",
                    "hardware_requirements": "..."
                }
            },
            {
                "section": "development_process",
                "content": {
                    "design_specifications": "...",
                    "development_choices": "...",
                    "training_methodologies": "..."
                }
            },
            {
                "section": "monitoring_and_functioning",
                "content": {
                    "capabilities": "...",
                    "limitations": "...",
                    "accuracy_levels": "..."
                }
            },
            {
                "section": "risk_management",
                "content": {
                    "identified_risks": "...",
                    "mitigation_measures": "...",
                    "residual_risks": "..."
                }
            }
        ],
        "format": "pdf",
        "languages": ["en"]
    }
)

Logging Requirements

python
# Configure Article 12 logging
logging_config = client.post(
    "/compliance/eu-ai-act/logging-config",
    json={
        "system_id": "hr_screening_v1",
        "log_retention_years": 10,  # Or lifetime of system + 10 years
        "events_to_log": [
            "system_start",
            "system_stop",
            "input_data",
            "output_decision",
            "human_override",
            "error_condition",
            "performance_anomaly"
        ],
        "traceability": {
            "link_input_to_output": True,
            "include_model_version": True,
            "include_confidence_scores": True
        },
        "integrity": {
            "hash_chain": True,
            "tamper_detection": True
        }
    }
)

Conformity Assessment

Self-Assessment

python
# Run conformity self-assessment
assessment = client.post(
    "/compliance/eu-ai-act/conformity-assessment",
    json={
        "system_id": "hr_screening_v1",
        "assessment_type": "self_assessment",
        "checklist": {
            "risk_management": True,
            "data_governance": True,
            "technical_documentation": True,
            "record_keeping": True,
            "transparency": True,
            "human_oversight": True,
            "accuracy_robustness": True,
            "cybersecurity": True
        }
    }
)

print(f"Conformity Status: {assessment['status']}")
print(f"\nCompliant areas: {assessment['compliant_count']}/{assessment['total_count']}")
print(f"\nGaps identified:")
for gap in assessment["gaps"]:
    print(f"  - {gap['area']}: {gap['issue']}")
    print(f"    Remediation: {gap['remediation']}")

CE Marking Preparation

python
# Prepare CE marking documentation
ce_prep = client.post(
    "/compliance/eu-ai-act/ce-marking",
    json={
        "system_id": "hr_screening_v1",
        "manufacturer": {
            "name": "Acme Corp",
            "address": "123 Tech Street, Berlin, Germany",
            "contact": "compliance@acme.com"
        },
        "declaration_of_conformity": {
            "standards_applied": [
                "ISO/IEC 42001:2023",
                "ISO/IEC 27001:2022"
            ],
            "notified_body": None,  # For self-assessed systems
            "signed_by": "CTO",
            "date": "2026-02-17"
        }
    }
)

Monitoring and Reporting

Post-Market Monitoring

python
# Configure post-market monitoring
monitoring = client.post(
    "/compliance/eu-ai-act/post-market-monitoring",
    json={
        "system_id": "hr_screening_v1",
        "monitoring_plan": {
            "performance_metrics": [
                "accuracy",
                "bias_score",
                "override_rate"
            ],
            "data_collection": {
                "feedback_from_users": True,
                "incident_reports": True,
                "performance_drift": True
            },
            "review_frequency": "quarterly",
            "threshold_alerts": {
                "accuracy_drop": 0.05,
                "bias_increase": 0.02,
                "override_rate_increase": 0.1
            }
        }
    }
)

Incident Reporting

python
# Report serious incident
incident = client.post(
    "/compliance/eu-ai-act/incident-report",
    json={
        "system_id": "hr_screening_v1",
        "incident_type": "discrimination_detected",
        "severity": "serious",
        "description": "Pattern of lower scores for candidates from certain regions",
        "discovery_date": "2026-02-15",
        "affected_persons": "approximately 200 candidates",
        "immediate_actions": [
            "System paused for review",
            "Manual review of affected decisions",
            "Bias audit initiated"
        ],
        "report_to_authority": True
    }
)

Compliance Dashboard

Status Overview

python
# Get EU AI Act compliance status
status = client.get(
    "/compliance/eu-ai-act/status",
    params={"system_id": "hr_screening_v1"}
)

print("EU AI Act Compliance Status")
print("=" * 40)
print(f"Risk Level: {status['risk_level']}")
print(f"Overall Compliance: {status['compliance_score']}%")
print(f"\nRequirements:")
for req, data in status["requirements"].items():
    icon = "✓" if data["compliant"] else "✗"
    print(f"  {icon} {req}: {data['status']}")

Best Practices

  1. Classify early - Determine risk level during design phase
  2. Document everything - Maintain comprehensive records
  3. Enable oversight - Build human controls into the system
  4. Monitor continuously - Track performance and bias metrics
  5. Plan for updates - AI Act requirements may evolve
  6. Train staff - Ensure operators understand their role
  7. Engage legal counsel - Complex cases need expert guidance

Timeline

DateMilestone
Aug 2024AI Act entered into force
Feb 2025Prohibited practices apply
Aug 2025GPAI rules apply
Aug 2026High-risk rules apply
Aug 2027Full enforcement

Next Steps

Built with reliability in mind.