{
  "_meta": {
    "version": "2.1.0",
    "title": "EU AI Act Scope Assessment Tool",
    "description": "Decision tree for determining AI Act applicability, risk classification, role, and obligations",
    "legal_basis": "Regulation (EU) 2024/1689 (AI Act)",
    "generated": "2026-03-07",
    "stages": [
      "S1: Territorial Scope (Art. 2)",
      "S2: Exclusions (Art. 2(3)-(12))",
      "S3: AI System Definition (Art. 3(1))",
      "S4: GPAI Model Check (Art. 51-56)",
      "S5: Prohibited Practices (Art. 5)",
      "S6: High-Risk Classification (Art. 6, Annex I, Annex III)",
      "S7: Art. 6(3) Filter",
      "S8: Sector Selection",
      "S9: Transparency Obligations (Art. 50)",
      "S10: Open-Source Check (Art. 2(12))",
      "S11: Role Determination (Art. 3, Art. 25)",
      "S12: Result"
    ],
    "set_array_semantics": "When an option has a 'set_array' property like {\"transparency_obligations\": \"value\"}, the implementation should APPEND the value to the named array attribute (do not overwrite — accumulate across questions).",
    "attribute_vs_set": "'attribute' on a question node declares the primary field being set by the selected option's 'value'. 'set' on an individual option sets additional/derived fields beyond the primary attribute. Both can coexist on the same question.",
    "changelog": [
      {
        "version": "2.1.0",
        "date": "2026-03-08",
        "changes": [
          "Fixed attribute/set collision on q_is_gpai_model: renamed attribute to gpai_role to avoid overwriting is_gpai_model boolean with string value",
          "Fixed router: GPAI providers with transparency obligations now correctly get result_gpai (previously fell through to result_transparency, missing GPAI obligations)",
          "Added obligations_by_type (transparency) to result_gpai so GPAI providers also see their Art. 50 obligations",
          "Router now uses gpai_role == 'provider' to distinguish GPAI providers from deployers"
        ]
      },
      {
        "version": "2.0.0",
        "date": "2026-03-07",
        "changes": [
          "Relabeled all stages to match actual flow order (S1-S12)",
          "Added set_array semantics documentation to _meta",
          "Documented attribute vs set semantics in _meta",
          "Added 'previous' back-navigation pointers to all nodes",
          "Removed Section B items from q_annex_i_legislation (fix routing bug)",
          "Added q_annex_i_also_annex_iii node; re-routed Annex I path to also check Annex III",
          "Added q_sector node; re-routed all paths through sector selection before transparency",
          "Added sensitive-area warning to Art. 6(3) filter node",
          "Made precautionary high-risk classification more transparent for 'unsure' on CA",
          "Clarified provider/deployer distinction in Art. 50(3) transparency question",
          "Added Art. 25(1) check for importers/distributors; added Art. 25(3) note for product manufacturers",
          "Added result_high_risk_gpai combined result; updated router logic",
          "Added AI literacy (Art. 4) note to all result nodes",
          "Added Art. 2(5,7,9,11) other-legislation notes to key result nodes",
          "Added sector-specific guidance references (healthcare, financial, insurance, energy) to result nodes"
        ]
      },
      {
        "version": "1.1.0",
        "date": "2026-03-07",
        "changes": [
          "Initial decision tree"
        ]
      }
    ]
  },
  "attributes": {
    "territorial_nexus": {
      "type": "enum",
      "values": [
        "eu_provider",
        "eu_deployer",
        "third_country_output_eu",
        "eu_importer_distributor",
        "eu_product_manufacturer",
        "eu_authorised_rep",
        "eu_affected_person",
        "no_nexus"
      ],
      "description": "How the operator connects to the EU market (Art. 2(1))"
    },
    "exclusion_type": {
      "type": "enum",
      "values": [
        "none",
        "military_defence",
        "national_security",
        "scientific_research",
        "pre_market_rd",
        "personal_use",
        "open_source_exempt",
        "third_country_law_enforcement_cooperation"
      ],
      "description": "Applicable exclusion from scope (Art. 2(3)-(12))"
    },
    "is_ai_system": {
      "type": "boolean",
      "description": "Meets Art. 3(1) AI system definition",
      "note": "Can be true, false, or \"unsure\" (string) — implementation must handle non-boolean values"
    },
    "is_gpai_model": {
      "type": "boolean",
      "description": "Is a general-purpose AI model (Art. 3(63))"
    },
    "gpai_role": {
      "type": "enum",
      "values": [
        "provider",
        "deployer",
        "none"
      ],
      "description": "Whether the user provides or deploys the GPAI model (used for routing to correct obligation set)"
    },
    "gpai_systemic_risk": {
      "type": "boolean",
      "description": "GPAI model with systemic risk (Art. 51, >=10^25 FLOPs)",
      "note": "Can be true, false, or \"unknown\" (string) — implementation must handle non-boolean values"
    },
    "gpai_open_source": {
      "type": "boolean",
      "description": "GPAI model released under qualifying open-source licence (Art. 53(2))"
    },
    "risk_category": {
      "type": "enum",
      "values": [
        "prohibited",
        "high_risk_annex_i",
        "high_risk_annex_iii",
        "transparency",
        "minimal",
        "not_in_scope"
      ],
      "description": "Primary risk classification"
    },
    "prohibited_practice": {
      "type": "enum",
      "values": [
        "none",
        "subliminal_manipulation",
        "vulnerability_exploitation",
        "social_scoring",
        "predictive_policing",
        "facial_db_scraping",
        "emotion_workplace_education",
        "biometric_categorisation_sensitive",
        "realtime_rbi_law_enforcement"
      ],
      "description": "Which prohibited practice applies (Art. 5(1)(a)-(h))"
    },
    "high_risk_pathway": {
      "type": "enum",
      "values": [
        "none",
        "annex_i_safety_component",
        "annex_iii_use_case",
        "unsure"
      ],
      "description": "How the system qualifies as high-risk (Art. 6(1) vs Art. 6(2))"
    },
    "annex_i_legislation": {
      "type": "enum",
      "values": [
        "none",
        "machinery",
        "toys",
        "recreational_craft",
        "lifts",
        "atex",
        "radio_equipment",
        "pressure_equipment",
        "cableways",
        "ppe",
        "gas_appliances",
        "medical_devices",
        "ivd_medical_devices",
        "civil_aviation",
        "two_three_wheel_vehicles",
        "agricultural_vehicles",
        "marine_equipment",
        "rail_interoperability",
        "motor_vehicles",
        "motor_vehicle_safety",
        "unmanned_aircraft",
        "other"
      ],
      "description": "Applicable Union harmonisation legislation (Annex I)"
    },
    "annex_i_section": {
      "type": "enum",
      "values": [
        "none",
        "section_a",
        "section_b"
      ],
      "description": "Annex I section (A = full AI Act requirements, B = limited)"
    },
    "annex_iii_area": {
      "type": "enum",
      "values": [
        "none",
        "biometrics",
        "critical_infrastructure",
        "education",
        "employment",
        "essential_services",
        "law_enforcement",
        "migration_asylum_border",
        "justice_democracy"
      ],
      "description": "Annex III high-risk area (1-8)"
    },
    "annex_iii_use_case": {
      "type": "string",
      "description": "Specific Annex III use case identifier (e.g. '3a', '4b', '5c')"
    },
    "art6_3_filter_applied": {
      "type": "boolean",
      "description": "Whether Art. 6(3) filter removes high-risk classification",
      "note": "Can be true, false, or \"unsure\" (string) — implementation must handle non-boolean values"
    },
    "performs_profiling": {
      "type": "boolean",
      "description": "System performs profiling of natural persons (blocks Art. 6(3) filter)"
    },
    "role": {
      "type": "enum",
      "values": [
        "provider",
        "deployer",
        "importer",
        "distributor",
        "product_manufacturer",
        "authorised_representative"
      ],
      "description": "Operator role under the AI Act (Art. 3)"
    },
    "deployer_becomes_provider": {
      "type": "boolean",
      "description": "Deployer assumes provider obligations (Art. 25)"
    },
    "deployer_becomes_provider_reason": {
      "type": "enum",
      "values": [
        "none",
        "own_name_trademark",
        "substantial_modification",
        "repurpose_to_high_risk"
      ],
      "description": "Reason deployer becomes provider (Art. 25(1)(a)-(c))"
    },
    "is_public_authority": {
      "type": "boolean",
      "description": "Deployer is a public authority or acts on behalf of one"
    },
    "transparency_obligations": {
      "type": "array",
      "values": [
        "chatbot_disclosure",
        "synthetic_content_marking",
        "emotion_biometric_disclosure",
        "deepfake_disclosure",
        "ai_generated_text_disclosure"
      ],
      "description": "Applicable Art. 50 transparency obligations"
    },
    "sector": {
      "type": "enum",
      "values": [
        "none",
        "healthcare_medical_devices",
        "financial_services",
        "insurance",
        "energy",
        "transport",
        "education",
        "employment_hr",
        "law_enforcement",
        "migration_border",
        "justice",
        "public_services",
        "other"
      ],
      "description": "User's sector for sector-specific guidance"
    }
  },
  "nodes": {
    "start": {
      "id": "start",
      "type": "info",
      "stage": "S1",
      "title": "EU AI Act Scope Assessment",
      "body": "This tool helps you determine whether your AI system or general-purpose AI model falls within the scope of the EU AI Act (Regulation 2024/1689), what your risk classification is, and what obligations apply to you.\n\nYou will need to know:\n- What your system does and how it works\n- Where it is developed, deployed, or used\n- Your role in the AI value chain\n- The sector/domain where the system operates",
      "legal_ref": "Regulation (EU) 2024/1689",
      "next": "q_territorial_nexus",
      "translations": {
        "fr": {
          "title": "Évaluation du champ d'application du règlement européen sur l'IA",
          "body": "Cet outil vous aide à déterminer si votre système d'IA ou votre modèle d'IA à usage général relève du champ d'application du règlement européen sur l'IA (Règlement (UE) 2024/1689), quelle est votre catégorie de risque et quelles obligations s'appliquent à vous.\n\nVous devrez connaître :\n- Ce que fait votre système et comment il fonctionne\n- Où il est développé, déployé ou utilisé\n- Votre rôle dans la chaîne de valeur de l'IA\n- Le secteur/domaine dans lequel le système opère"
        },
        "nl": {
          "title": "Beoordeling toepassingsgebied EU AI-verordening",
          "body": "Dit hulpmiddel helpt u te bepalen of uw AI-systeem of AI-model voor algemene doeleinden binnen het toepassingsgebied van de EU AI-verordening (Verordening (EU) 2024/1689) valt, wat uw risicoclassificatie is en welke verplichtingen op u van toepassing zijn.\n\nU moet weten:\n- Wat uw systeem doet en hoe het werkt\n- Waar het wordt ontwikkeld, ingezet of gebruikt\n- Uw rol in de AI-waardeketen\n- De sector/het domein waarin het systeem opereert"
        },
        "de": {
          "title": "Bewertung des Anwendungsbereichs der EU-KI-Verordnung",
          "body": "Dieses Tool hilft Ihnen festzustellen, ob Ihr KI-System oder Ihr KI-Modell mit allgemeinem Verwendungszweck in den Anwendungsbereich der EU-KI-Verordnung (Verordnung (EU) 2024/1689) fällt, welche Risikokategorie gilt und welche Pflichten für Sie gelten.\n\nSie müssen wissen:\n- Was Ihr System tut und wie es funktioniert\n- Wo es entwickelt, eingesetzt oder genutzt wird\n- Ihre Rolle in der KI-Wertschöpfungskette\n- Den Sektor/Bereich, in dem das System eingesetzt wird"
        },
        "es": {
          "title": "Evaluación del ámbito de aplicación del Reglamento europeo de IA",
          "body": "Esta herramienta le ayuda a determinar si su sistema de IA o su modelo de IA de uso general está comprendido en el ámbito de aplicación del Reglamento europeo de IA (Reglamento (UE) 2024/1689), cuál es su clasificación de riesgo y qué obligaciones le son aplicables.\n\nNecesitará saber:\n- Qué hace su sistema y cómo funciona\n- Dónde se desarrolla, despliega o utiliza\n- Su papel en la cadena de valor de la IA\n- El sector/ámbito en el que opera el sistema"
        }
      }
    },
    "q_territorial_nexus": {
      "id": "q_territorial_nexus",
      "type": "question",
      "stage": "S1",
      "title": "Connection to the EU",
      "body": "What is your connection to the EU market? Select the option that best describes your situation.",
      "legal_ref": "Art. 2(1)",
      "attribute": "territorial_nexus",
      "options": [
        {
          "label": "I develop/provide AI systems or GPAI models and place them on the EU market (regardless of where I am established)",
          "value": "eu_provider",
          "next": "q_exclusion_military",
          "translations": {
            "fr": {
              "label": "Je développe/fournis des systèmes d'IA ou des modèles d'IA à usage général et les mets sur le marché de l'UE (quel que soit mon lieu d'établissement)"
            },
            "nl": {
              "label": "Ik ontwikkel/bied AI-systemen of AI-modellen voor algemene doeleinden aan en breng deze op de EU-markt (ongeacht waar ik gevestigd ben)"
            },
            "de": {
              "label": "Ich entwickle/biete KI-Systeme oder KI-Modelle mit allgemeinem Verwendungszweck an und bringe sie auf den EU-Markt (unabhängig von meinem Niederlassungsort)"
            },
            "es": {
              "label": "Desarrollo/proporciono sistemas de IA o modelos de IA de uso general y los comercializo en el mercado de la UE (independientemente de dónde esté establecido)"
            }
          }
        },
        {
          "label": "I use (deploy) AI systems and I am established or located in the EU",
          "value": "eu_deployer",
          "next": "q_exclusion_military",
          "translations": {
            "fr": {
              "label": "J'utilise (déploie) des systèmes d'IA et je suis établi ou situé dans l'UE"
            },
            "nl": {
              "label": "Ik gebruik (zet in) AI-systemen en ben gevestigd of bevind me in de EU"
            },
            "de": {
              "label": "Ich nutze (betreibe) KI-Systeme und bin in der EU niedergelassen oder ansässig"
            },
            "es": {
              "label": "Utilizo (despliego) sistemas de IA y estoy establecido o ubicado en la UE"
            }
          }
        },
        {
          "label": "I am established outside the EU, but the output of my AI system is used in the EU",
          "value": "third_country_output_eu",
          "next": "q_exclusion_military",
          "translations": {
            "fr": {
              "label": "Je suis établi en dehors de l'UE, mais les résultats de mon système d'IA sont utilisés dans l'UE"
            },
            "nl": {
              "label": "Ik ben buiten de EU gevestigd, maar de output van mijn AI-systeem wordt in de EU gebruikt"
            },
            "de": {
              "label": "Ich bin außerhalb der EU niedergelassen, aber die Ausgabe meines KI-Systems wird in der EU verwendet"
            },
            "es": {
              "label": "Estoy establecido fuera de la UE, pero los resultados de mi sistema de IA se utilizan en la UE"
            }
          }
        },
        {
          "label": "I import or distribute AI systems in the EU",
          "value": "eu_importer_distributor",
          "next": "q_exclusion_military",
          "translations": {
            "fr": {
              "label": "J'importe ou distribue des systèmes d'IA dans l'UE"
            },
            "nl": {
              "label": "Ik importeer of distribueer AI-systemen in de EU"
            },
            "de": {
              "label": "Ich importiere oder vertreibe KI-Systeme in der EU"
            },
            "es": {
              "label": "Importo o distribuyo sistemas de IA en la UE"
            }
          }
        },
        {
          "label": "I am a product manufacturer placing a product with an AI system on the EU market",
          "value": "eu_product_manufacturer",
          "next": "q_exclusion_military",
          "translations": {
            "fr": {
              "label": "Je suis un fabricant de produits qui met un produit intégrant un système d'IA sur le marché de l'UE"
            },
            "nl": {
              "label": "Ik ben een productfabrikant die een product met een AI-systeem op de EU-markt brengt"
            },
            "de": {
              "label": "Ich bin ein Produkthersteller, der ein Produkt mit einem KI-System auf den EU-Markt bringt"
            },
            "es": {
              "label": "Soy un fabricante de productos que comercializa un producto con un sistema de IA en el mercado de la UE"
            }
          }
        },
        {
          "label": "I am an authorised representative of a non-EU AI provider",
          "value": "eu_authorised_rep",
          "next": "q_exclusion_military",
          "translations": {
            "fr": {
              "label": "Je suis un mandataire d'un fournisseur d'IA non européen"
            },
            "nl": {
              "label": "Ik ben een gemachtigde van een niet-EU AI-aanbieder"
            },
            "de": {
              "label": "Ich bin ein Bevollmächtigter eines nicht in der EU ansässigen KI-Anbieters"
            },
            "es": {
              "label": "Soy un representante autorizado de un proveedor de IA no perteneciente a la UE"
            }
          }
        },
        {
          "label": "I am an affected person located in the EU",
          "value": "eu_affected_person",
          "next": "result_affected_person",
          "translations": {
            "fr": {
              "label": "Je suis une personne concernée située dans l'UE"
            },
            "nl": {
              "label": "Ik ben een betrokkene die zich in de EU bevindt"
            },
            "de": {
              "label": "Ich bin eine betroffene Person mit Sitz in der EU"
            },
            "es": {
              "label": "Soy una persona afectada ubicada en la UE"
            }
          }
        },
        {
          "label": "None of the above — I have no connection to the EU market",
          "value": "no_nexus",
          "next": "result_out_of_scope_no_nexus",
          "translations": {
            "fr": {
              "label": "Aucune des réponses ci-dessus — je n'ai aucun lien avec le marché de l'UE"
            },
            "nl": {
              "label": "Geen van bovenstaande — ik heb geen band met de EU-markt"
            },
            "de": {
              "label": "Keine der oben genannten Optionen — ich habe keine Verbindung zum EU-Markt"
            },
            "es": {
              "label": "Ninguna de las anteriores — no tengo conexión con el mercado de la UE"
            }
          }
        }
      ],
      "previous": "start",
      "translations": {
        "fr": {
          "title": "Lien avec l'Union européenne",
          "body": "Quel est votre lien avec le marché de l'UE ? Sélectionnez l'option qui décrit le mieux votre situation."
        },
        "nl": {
          "title": "Band met de Europese Unie",
          "body": "Wat is uw band met de EU-markt? Selecteer de optie die uw situatie het beste beschrijft."
        },
        "de": {
          "title": "Verbindung zur Europäischen Union",
          "body": "Welche Verbindung haben Sie zum EU-Markt? Wählen Sie die Option, die Ihre Situation am besten beschreibt."
        },
        "es": {
          "title": "Conexión con la Unión Europea",
          "body": "¿Cuál es su conexión con el mercado de la UE? Seleccione la opción que mejor describa su situación."
        }
      }
    },
    "q_exclusion_military": {
      "id": "q_exclusion_military",
      "type": "question",
      "stage": "S2",
      "title": "Military, Defence, or National Security",
      "body": "Is your AI system placed on the market, put into service, or used exclusively for military, defence, or national security purposes?",
      "legal_ref": "Art. 2(3)",
      "attribute": "exclusion_type",
      "options": [
        {
          "label": "Yes",
          "value": "military_defence",
          "next": "result_excluded_military",
          "translations": {
            "fr": {
              "label": "Oui"
            },
            "nl": {
              "label": "Ja"
            },
            "de": {
              "label": "Ja"
            },
            "es": {
              "label": "Sí"
            }
          }
        },
        {
          "label": "No",
          "value": "none",
          "next": "q_exclusion_intl_cooperation",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_territorial_nexus",
      "translations": {
        "fr": {
          "title": "Militaire, défense ou sécurité nationale",
          "body": "Votre système d'IA est-il mis sur le marché, mis en service ou utilisé exclusivement à des fins militaires, de défense ou de sécurité nationale ?"
        },
        "nl": {
          "title": "Militair, defensie of nationale veiligheid",
          "body": "Wordt uw AI-systeem uitsluitend voor militaire doeleinden, defensie of nationale veiligheid in de handel gebracht, in gebruik gesteld of gebruikt?"
        },
        "de": {
          "title": "Militär, Verteidigung oder nationale Sicherheit",
          "body": "Wird Ihr KI-System ausschließlich für militärische Zwecke, die Verteidigung oder die nationale Sicherheit in Verkehr gebracht, in Betrieb genommen oder verwendet?"
        },
        "es": {
          "title": "Militar, defensa o seguridad nacional",
          "body": "¿Su sistema de IA se comercializa, se pone en servicio o se utiliza exclusivamente con fines militares, de defensa o de seguridad nacional?"
        }
      }
    },
    "q_exclusion_intl_cooperation": {
      "id": "q_exclusion_intl_cooperation",
      "type": "question",
      "stage": "S2",
      "title": "International Law Enforcement Cooperation",
      "body": "Are you a **public authority in a third country** or an **international organisation** using AI systems in the framework of international cooperation or agreements for **law enforcement and judicial cooperation** with the EU or its Member States?",
      "legal_ref": "Art. 2(4)",
      "note": "This exclusion requires that the third country or international organisation provides adequate safeguards for the protection of fundamental rights and freedoms.",
      "options": [
        {
          "label": "Yes — with adequate fundamental rights safeguards",
          "value": "yes",
          "set": {
            "exclusion_type": "third_country_law_enforcement_cooperation"
          },
          "next": "result_excluded_intl_cooperation",
          "translations": {
            "fr": {
              "label": "Oui — avec des garanties adéquates en matière de droits fondamentaux"
            },
            "nl": {
              "label": "Ja — met passende waarborgen voor de grondrechten"
            },
            "de": {
              "label": "Ja — mit angemessenen Garantien für die Grundrechte"
            },
            "es": {
              "label": "Sí — con garantías adecuadas en materia de derechos fundamentales"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_exclusion_research",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_exclusion_military",
      "translations": {
        "fr": {
          "title": "Coopération internationale en matière répressive",
          "body": "Êtes-vous une **autorité publique d'un pays tiers** ou une **organisation internationale** utilisant des systèmes d'IA dans le cadre de la coopération internationale ou d'accords de **coopération en matière répressive et judiciaire** avec l'UE ou ses États membres ?",
          "note": "Cette exclusion exige que le pays tiers ou l'organisation internationale fournisse des garanties adéquates pour la protection des droits fondamentaux et des libertés."
        },
        "nl": {
          "title": "Internationale samenwerking op het gebied van rechtshandhaving",
          "body": "Bent u een **overheidsinstantie van een derde land** of een **internationale organisatie** die AI-systemen gebruikt in het kader van internationale samenwerking of overeenkomsten voor **samenwerking op het gebied van rechtshandhaving en justitie** met de EU of haar lidstaten?",
          "note": "Deze uitzondering vereist dat het derde land of de internationale organisatie passende waarborgen biedt voor de bescherming van de grondrechten en fundamentele vrijheden."
        },
        "de": {
          "title": "Internationale Zusammenarbeit bei der Strafverfolgung",
          "body": "Sind Sie eine **Behörde eines Drittlandes** oder eine **internationale Organisation**, die KI-Systeme im Rahmen der internationalen Zusammenarbeit oder von Abkommen über die **Zusammenarbeit bei der Strafverfolgung und der justiziellen Zusammenarbeit** mit der EU oder ihren Mitgliedstaaten einsetzt?",
          "note": "Diese Ausnahme setzt voraus, dass das Drittland oder die internationale Organisation angemessene Garantien für den Schutz der Grundrechte und Grundfreiheiten bietet."
        },
        "es": {
          "title": "Cooperación internacional en materia de aplicación de la ley",
          "body": "¿Es usted una **autoridad pública de un tercer país** o una **organización internacional** que utiliza sistemas de IA en el marco de la cooperación internacional o de acuerdos de **cooperación en materia de aplicación de la ley y cooperación judicial** con la UE o sus Estados miembros?",
          "note": "Esta exclusión exige que el tercer país o la organización internacional ofrezca garantías adecuadas para la protección de los derechos fundamentales y las libertades."
        }
      }
    },
    "q_exclusion_research": {
      "id": "q_exclusion_research",
      "type": "question",
      "stage": "S2",
      "title": "Scientific Research & Development",
      "body": "Is your AI system or AI model developed and used for the sole purpose of scientific research and development?",
      "legal_ref": "Art. 2(6)",
      "note": "This exclusion covers pure R&D. It does NOT cover testing in real-world conditions, nor systems that are placed on the market or put into service.",
      "options": [
        {
          "label": "Yes — sole purpose is scientific R&D",
          "value": "scientific_research",
          "set": {
            "exclusion_type": "scientific_research"
          },
          "next": "result_excluded_research",
          "translations": {
            "fr": {
              "label": "Oui — le seul objectif est la R&D scientifique"
            },
            "nl": {
              "label": "Ja — het enige doel is wetenschappelijk O&O"
            },
            "de": {
              "label": "Ja — der einzige Zweck ist wissenschaftliche F&E"
            },
            "es": {
              "label": "Sí — el único fin es la I+D científica"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_exclusion_pre_market",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_exclusion_intl_cooperation",
      "translations": {
        "fr": {
          "title": "Recherche scientifique et développement",
          "body": "Votre système d'IA ou votre modèle d'IA est-il développé et utilisé dans le seul but de la recherche scientifique et du développement ?",
          "note": "Cette exclusion couvre la R&D pure. Elle ne couvre PAS les essais en conditions réelles, ni les systèmes mis sur le marché ou mis en service."
        },
        "nl": {
          "title": "Wetenschappelijk onderzoek en ontwikkeling",
          "body": "Wordt uw AI-systeem of AI-model uitsluitend ontwikkeld en gebruikt voor wetenschappelijk onderzoek en ontwikkeling?",
          "note": "Deze uitzondering heeft betrekking op zuiver onderzoek en ontwikkeling. Ze heeft GEEN betrekking op tests onder reële omstandigheden, noch op systemen die in de handel worden gebracht of in gebruik worden gesteld."
        },
        "de": {
          "title": "Wissenschaftliche Forschung und Entwicklung",
          "body": "Wird Ihr KI-System oder KI-Modell ausschließlich zum Zweck der wissenschaftlichen Forschung und Entwicklung entwickelt und verwendet?",
          "note": "Diese Ausnahme betrifft reine Forschung und Entwicklung. Sie umfasst NICHT die Erprobung unter Realbedingungen und auch keine Systeme, die in Verkehr gebracht oder in Betrieb genommen werden."
        },
        "es": {
          "title": "Investigación científica y desarrollo",
          "body": "¿Su sistema de IA o modelo de IA se desarrolla y utiliza con el único fin de la investigación científica y el desarrollo?",
          "note": "Esta exclusión cubre la I+D pura. NO cubre las pruebas en condiciones reales, ni los sistemas comercializados o puestos en servicio."
        }
      }
    },
    "q_exclusion_pre_market": {
      "id": "q_exclusion_pre_market",
      "type": "question",
      "stage": "S2",
      "title": "Pre-Market Research, Testing, or Development",
      "body": "Is your AI system or model still in the research, testing, or development phase — i.e., it has NOT yet been placed on the market or put into service?",
      "legal_ref": "Art. 2(8)",
      "note": "Testing in real-world conditions is NOT covered by this exclusion and is subject to specific rules (Art. 57-63).",
      "options": [
        {
          "label": "Yes — still in pre-market R&D (not yet on market or in service)",
          "value": "pre_market",
          "set": {
            "exclusion_type": "pre_market_rd"
          },
          "next": "result_excluded_pre_market",
          "translations": {
            "fr": {
              "label": "Oui — encore en phase de R&D avant mise sur le marché (pas encore sur le marché ni en service)"
            },
            "nl": {
              "label": "Ja — nog in O&O-fase vóór het in de handel brengen (nog niet op de markt of in gebruik)"
            },
            "de": {
              "label": "Ja — noch in der F&E-Phase vor dem Inverkehrbringen (noch nicht auf dem Markt oder in Betrieb)"
            },
            "es": {
              "label": "Sí — aún en fase de I+D previa a la comercialización (aún no en el mercado ni en servicio)"
            }
          }
        },
        {
          "label": "No — it is placed on the market or put into service (or will be)",
          "value": "no",
          "next": "q_exclusion_personal",
          "translations": {
            "fr": {
              "label": "Non — il est mis sur le marché ou mis en service (ou le sera)"
            },
            "nl": {
              "label": "Nee — het is in de handel gebracht of in gebruik gesteld (of zal dat worden)"
            },
            "de": {
              "label": "Nein — es wird in Verkehr gebracht oder in Betrieb genommen (oder wird es)"
            },
            "es": {
              "label": "No — está comercializado o puesto en servicio (o lo estará)"
            }
          }
        }
      ],
      "previous": "q_exclusion_research",
      "translations": {
        "fr": {
          "title": "Recherche, essais ou développement avant mise sur le marché",
          "body": "Votre système ou modèle d'IA est-il encore en phase de recherche, d'essais ou de développement — c'est-à-dire qu'il n'a PAS encore été mis sur le marché ou mis en service ?",
          "note": "Les essais en conditions réelles ne sont PAS couverts par cette exclusion et sont soumis à des règles spécifiques (art. 57-63)."
        },
        "nl": {
          "title": "Onderzoek, tests of ontwikkeling vóór het in de handel brengen",
          "body": "Bevindt uw AI-systeem of -model zich nog in de fase van onderzoek, tests of ontwikkeling — d.w.z. het is nog NIET in de handel gebracht of in gebruik gesteld?",
          "note": "Tests onder reële omstandigheden vallen NIET onder deze uitzondering en zijn onderworpen aan specifieke regels (art. 57-63)."
        },
        "de": {
          "title": "Forschung, Erprobung oder Entwicklung vor dem Inverkehrbringen",
          "body": "Befindet sich Ihr KI-System oder -Modell noch in der Forschungs-, Erprobungs- oder Entwicklungsphase — d. h. es wurde NOCH NICHT in Verkehr gebracht oder in Betrieb genommen?",
          "note": "Die Erprobung unter Realbedingungen fällt NICHT unter diese Ausnahme und unterliegt spezifischen Vorschriften (Art. 57-63)."
        },
        "es": {
          "title": "Investigación, pruebas o desarrollo previos a la comercialización",
          "body": "¿Su sistema o modelo de IA se encuentra todavía en fase de investigación, pruebas o desarrollo, es decir, NO ha sido comercializado ni puesto en servicio?",
          "note": "Las pruebas en condiciones reales NO están cubiertas por esta exclusión y están sujetas a normas específicas (art. 57-63)."
        }
      }
    },
    "q_exclusion_personal": {
      "id": "q_exclusion_personal",
      "type": "question",
      "stage": "S2",
      "title": "Personal Non-Professional Use",
      "body": "Are you a natural person using this AI system in the course of a purely personal, non-professional activity?",
      "legal_ref": "Art. 2(10)",
      "options": [
        {
          "label": "Yes — purely personal, non-professional use",
          "value": "personal",
          "set": {
            "exclusion_type": "personal_use"
          },
          "next": "result_excluded_personal",
          "translations": {
            "fr": {
              "label": "Oui — utilisation purement personnelle et non professionnelle"
            },
            "nl": {
              "label": "Ja — zuiver persoonlijk, niet-professioneel gebruik"
            },
            "de": {
              "label": "Ja — rein persönliche, nicht berufliche Nutzung"
            },
            "es": {
              "label": "Sí — uso puramente personal y no profesional"
            }
          }
        },
        {
          "label": "No — professional or commercial use",
          "value": "no",
          "next": "q_is_ai_system",
          "translations": {
            "fr": {
              "label": "Non — utilisation professionnelle ou commerciale"
            },
            "nl": {
              "label": "Nee — professioneel of commercieel gebruik"
            },
            "de": {
              "label": "Nein — berufliche oder kommerzielle Nutzung"
            },
            "es": {
              "label": "No — uso profesional o comercial"
            }
          }
        }
      ],
      "previous": "q_exclusion_pre_market",
      "translations": {
        "fr": {
          "title": "Utilisation personnelle non professionnelle",
          "body": "Êtes-vous une personne physique utilisant ce système d'IA dans le cadre d'une activité purement personnelle et non professionnelle ?"
        },
        "nl": {
          "title": "Persoonlijk niet-professioneel gebruik",
          "body": "Bent u een natuurlijk persoon die dit AI-systeem gebruikt in het kader van een zuiver persoonlijke, niet-professionele activiteit?"
        },
        "de": {
          "title": "Persönliche nicht-berufliche Nutzung",
          "body": "Sind Sie eine natürliche Person, die dieses KI-System im Rahmen einer rein persönlichen, nicht beruflichen Tätigkeit nutzt?"
        },
        "es": {
          "title": "Uso personal no profesional",
          "body": "¿Es usted una persona física que utiliza este sistema de IA en el marco de una actividad puramente personal y no profesional?"
        }
      }
    },
    "q_is_ai_system": {
      "id": "q_is_ai_system",
      "type": "question",
      "stage": "S3",
      "title": "AI System Definition",
      "body": "Does your system meet ALL of the following criteria?\n\n1. It is a **machine-based system** (software/hardware)\n2. It is designed to operate with **some degree of autonomy** (not purely manual human operation)\n3. It **infers** from the input it receives how to generate outputs (using ML, logic/knowledge-based approaches, or similar techniques — not just rule-based automation or basic statistics)\n4. It generates outputs such as **predictions, content, recommendations, or decisions**\n5. These outputs can **influence** physical or virtual environments",
      "legal_ref": "Art. 3(1), EC Guidelines B1",
      "note": "Systems that do NOT qualify as AI include: purely rule-based automation, basic data processing (sort/filter), simple statistical analysis, classical heuristics without learning, simple prediction using averages, and systems with zero autonomy.\n\nSystems that DO qualify include: machine learning (supervised, unsupervised, reinforcement, deep learning), knowledge/logic-based approaches (expert systems, knowledge bases), NLP models, generative AI, recommendation systems.",
      "attribute": "is_ai_system",
      "options": [
        {
          "label": "Yes — my system meets all these criteria",
          "value": true,
          "next": "q_is_gpai_model",
          "translations": {
            "fr": {
              "label": "Oui — mon système remplit tous ces critères"
            },
            "nl": {
              "label": "Ja — mijn systeem voldoet aan al deze criteria"
            },
            "de": {
              "label": "Ja — mein System erfüllt alle diese Kriterien"
            },
            "es": {
              "label": "Sí — mi sistema cumple todos estos criterios"
            }
          }
        },
        {
          "label": "No — my system does not meet one or more criteria",
          "value": false,
          "next": "result_not_ai_system",
          "translations": {
            "fr": {
              "label": "Non — mon système ne remplit pas un ou plusieurs critères"
            },
            "nl": {
              "label": "Nee — mijn systeem voldoet niet aan een of meer criteria"
            },
            "de": {
              "label": "Nein — mein System erfüllt eines oder mehrere Kriterien nicht"
            },
            "es": {
              "label": "No — mi sistema no cumple uno o más criterios"
            }
          }
        },
        {
          "label": "I'm not sure",
          "value": "unsure",
          "next": "q_ai_definition_detail",
          "flag": "consult_expert",
          "translations": {
            "fr": {
              "label": "Je ne suis pas sûr"
            },
            "nl": {
              "label": "Ik weet het niet zeker"
            },
            "de": {
              "label": "Ich bin mir nicht sicher"
            },
            "es": {
              "label": "No estoy seguro"
            }
          }
        }
      ],
      "previous": "q_exclusion_personal",
      "translations": {
        "fr": {
          "title": "Définition d'un système d'IA",
          "body": "Votre système remplit-il TOUS les critères suivants ?\n\n1. Il s'agit d'un **système fondé sur une machine** (logiciel/matériel)\n2. Il est conçu pour fonctionner avec **un certain degré d'autonomie** (pas une opération purement manuelle)\n3. Il **infère** à partir des données qu'il reçoit la manière de générer des résultats (utilisant l'apprentissage automatique, des approches logiques/fondées sur les connaissances ou des techniques similaires — pas de la simple automatisation fondée sur des règles ou des statistiques de base)\n4. Il génère des résultats tels que des **prédictions, contenus, recommandations ou décisions**\n5. Ces résultats peuvent **influencer** des environnements physiques ou virtuels",
          "note": "Les systèmes qui NE sont PAS considérés comme des systèmes d'IA : automatisation purement fondée sur des règles, traitement de données de base (tri/filtre), analyse statistique simple, heuristiques classiques sans apprentissage, prédiction simple fondée sur des moyennes et systèmes sans aucune autonomie.\n\nLes systèmes qui SONT considérés comme des systèmes d'IA : apprentissage automatique (supervisé, non supervisé, par renforcement, apprentissage profond), approches fondées sur les connaissances/la logique (systèmes experts, bases de connaissances), modèles de traitement du langage naturel, IA générative, systèmes de recommandation."
        },
        "nl": {
          "title": "Definitie van een AI-systeem",
          "body": "Voldoet uw systeem aan ALLE volgende criteria?\n\n1. Het is een **op machines gebaseerd systeem** (software/hardware)\n2. Het is ontworpen om te functioneren met **een zekere mate van autonomie** (niet zuiver handmatige menselijke bediening)\n3. Het **leidt af** uit de ontvangen input hoe het outputs moet genereren (met behulp van machinaal leren, op logica/kennis gebaseerde benaderingen of vergelijkbare technieken — niet louter op regels gebaseerde automatisering of basisstatistieken)\n4. Het genereert outputs zoals **voorspellingen, content, aanbevelingen of beslissingen**\n5. Deze outputs kunnen **invloed uitoefenen** op fysieke of virtuele omgevingen",
          "note": "Systemen die NIET als AI-systeem kwalificeren: zuiver op regels gebaseerde automatisering, basisgegevensverwerking (sorteren/filteren), eenvoudige statistische analyse, klassieke heuristieken zonder leren, eenvoudige voorspelling op basis van gemiddelden en systemen met nul autonomie.\n\nSystemen die WEL als AI-systeem kwalificeren: machinaal leren (supervised, unsupervised, reinforcement, deep learning), op kennis/logica gebaseerde benaderingen (expertsystemen, kennisbanken), NLP-modellen, generatieve AI, aanbevelingssystemen."
        },
        "de": {
          "title": "Definition eines KI-Systems",
          "body": "Erfüllt Ihr System ALLE folgenden Kriterien?\n\n1. Es handelt sich um ein **maschinengestütztes System** (Software/Hardware)\n2. Es ist so konzipiert, dass es mit **einem gewissen Grad an Autonomie** arbeitet (nicht rein manuelle menschliche Bedienung)\n3. Es **leitet** aus den empfangenen Eingaben ab, wie Ausgaben erzeugt werden sollen (unter Verwendung von maschinellem Lernen, logik-/wissensbasierten Ansätzen oder ähnlichen Techniken — nicht bloße regelbasierte Automatisierung oder einfache Statistik)\n4. Es erzeugt Ausgaben wie **Vorhersagen, Inhalte, Empfehlungen oder Entscheidungen**\n5. Diese Ausgaben können **physische oder virtuelle Umgebungen beeinflussen**",
          "note": "Systeme, die NICHT als KI-System gelten: rein regelbasierte Automatisierung, einfache Datenverarbeitung (Sortieren/Filtern), einfache statistische Analyse, klassische Heuristiken ohne Lernen, einfache Vorhersage anhand von Durchschnittswerten und Systeme ohne jegliche Autonomie.\n\nSysteme, die als KI-System gelten: maschinelles Lernen (überwacht, unüberwacht, Reinforcement, Deep Learning), wissens-/logikbasierte Ansätze (Expertensysteme, Wissensbasen), NLP-Modelle, generative KI, Empfehlungssysteme."
        },
        "es": {
          "title": "Definición de sistema de IA",
          "body": "¿Su sistema cumple TODOS los criterios siguientes?\n\n1. Es un **sistema basado en máquinas** (software/hardware)\n2. Está diseñado para funcionar con **cierto grado de autonomía** (no es una operación puramente manual)\n3. **Infiere** a partir de los datos que recibe cómo generar resultados (utilizando aprendizaje automático, enfoques lógicos/basados en el conocimiento o técnicas similares — no se trata de simple automatización basada en reglas o estadísticas básicas)\n4. Genera resultados tales como **predicciones, contenidos, recomendaciones o decisiones**\n5. Estos resultados pueden **influir** en entornos físicos o virtuales",
          "note": "Sistemas que NO son considerados como sistemas de IA: automatización puramente basada en reglas, procesamiento de datos básico (ordenar/filtrar), análisis estadístico simple, heurísticas clásicas sin aprendizaje, predicción simple basada en promedios y sistemas sin autonomía alguna.\n\nSistemas que SÍ son considerados como sistemas de IA: aprendizaje automático (supervisado, no supervisado, por refuerzo, aprendizaje profundo), enfoques basados en el conocimiento/la lógica (sistemas expertos, bases de conocimiento), modelos de procesamiento del lenguaje natural, IA generativa, sistemas de recomendación."
        }
      }
    },
    "q_ai_definition_detail": {
      "id": "q_ai_definition_detail",
      "type": "info",
      "stage": "S3",
      "title": "Understanding the AI System Definition",
      "body": "The AI Act defines an AI system through 7 elements. Here are some examples to help you decide:\n\n**IS an AI system:**\n- Email spam detection (supervised learning)\n- Medical diagnostic imaging systems\n- Fraud detection systems\n- Language models (GPT, etc.)\n- Autonomous vehicles\n- Content recommendation engines\n- Expert systems using encoded knowledge\n\n**Is NOT an AI system:**\n- Database sort/filter systems\n- Standard spreadsheet software\n- Sales dashboards showing averages/trends\n- Rule-based automation (if-then rules defined by humans)\n- Simple prediction using historical averages\n- Chess programs using only minimax (no learning)\n- Physics simulations without ML",
      "legal_ref": "Art. 3(1), EC Guidelines on AI System Definition (2025)",
      "next": "q_is_ai_system_retry",
      "previous": "q_is_ai_system",
      "translations": {
        "fr": {
          "title": "Comprendre la définition d'un système d'IA",
          "body": "Le règlement sur l'IA définit un système d'IA à travers 7 éléments. Voici quelques exemples pour vous aider à décider :\n\n**EST un système d'IA :**\n- Détection de spam par e-mail (apprentissage supervisé)\n- Systèmes d'imagerie diagnostique médicale\n- Systèmes de détection de fraude\n- Modèles de langage (GPT, etc.)\n- Véhicules autonomes\n- Moteurs de recommandation de contenu\n- Systèmes experts utilisant des connaissances encodées\n\n**N'est PAS un système d'IA :**\n- Systèmes de tri/filtre de bases de données\n- Logiciels de tableur standard\n- Tableaux de bord de vente affichant des moyennes/tendances\n- Automatisation fondée sur des règles (règles si-alors définies par des humains)\n- Prédiction simple fondée sur des moyennes historiques\n- Programmes d'échecs utilisant uniquement minimax (sans apprentissage)\n- Simulations physiques sans apprentissage automatique"
        },
        "nl": {
          "title": "De definitie van een AI-systeem begrijpen",
          "body": "De AI-verordening definieert een AI-systeem aan de hand van 7 elementen. Hier zijn enkele voorbeelden om u te helpen beslissen:\n\n**IS een AI-systeem:**\n- E-mailspamdetectie (supervised learning)\n- Medische diagnostische beeldvormingssystemen\n- Fraudedetectiesystemen\n- Taalmodellen (GPT, enz.)\n- Autonome voertuigen\n- Contentaanbevelingssystemen\n- Expertsystemen met gecodeerde kennis\n\n**Is GEEN AI-systeem:**\n- Database sorteer-/filtersystemen\n- Standaard spreadsheetsoftware\n- Verkoopdashboards met gemiddelden/trends\n- Op regels gebaseerde automatisering (als-dan-regels gedefinieerd door mensen)\n- Eenvoudige voorspelling op basis van historische gemiddelden\n- Schaakprogramma's die alleen minimax gebruiken (zonder leren)\n- Fysische simulaties zonder machinaal leren"
        },
        "de": {
          "title": "Die Definition eines KI-Systems verstehen",
          "body": "Die KI-Verordnung definiert ein KI-System anhand von 7 Elementen. Hier sind einige Beispiele, die Ihnen bei der Entscheidung helfen:\n\n**IST ein KI-System:**\n- E-Mail-Spam-Erkennung (überwachtes Lernen)\n- Medizinische diagnostische Bildgebungssysteme\n- Betrugserkennungssysteme\n- Sprachmodelle (GPT usw.)\n- Autonome Fahrzeuge\n- Inhaltsempfehlungssysteme\n- Expertensysteme mit kodiertem Wissen\n\n**Ist KEIN KI-System:**\n- Datenbank-Sortier-/Filtersysteme\n- Standard-Tabellenkalkulationssoftware\n- Verkaufs-Dashboards mit Durchschnitts-/Trendanzeigen\n- Regelbasierte Automatisierung (von Menschen definierte Wenn-Dann-Regeln)\n- Einfache Vorhersage anhand historischer Durchschnittswerte\n- Schachprogramme, die nur Minimax verwenden (ohne Lernen)\n- Physiksimulationen ohne maschinelles Lernen"
        },
        "es": {
          "title": "Comprender la definición de sistema de IA",
          "body": "El Reglamento de IA define un sistema de IA a través de 7 elementos. Aquí hay algunos ejemplos para ayudarle a decidir:\n\n**ES un sistema de IA:**\n- Detección de spam en correo electrónico (aprendizaje supervisado)\n- Sistemas de diagnóstico por imagen médica\n- Sistemas de detección de fraude\n- Modelos de lenguaje (GPT, etc.)\n- Vehículos autónomos\n- Motores de recomendación de contenido\n- Sistemas expertos que utilizan conocimiento codificado\n\n**NO es un sistema de IA:**\n- Sistemas de ordenación/filtrado de bases de datos\n- Software de hoja de cálculo estándar\n- Paneles de ventas que muestran promedios/tendencias\n- Automatización basada en reglas (reglas si-entonces definidas por humanos)\n- Predicción simple basada en promedios históricos\n- Programas de ajedrez que solo usan minimax (sin aprendizaje)\n- Simulaciones físicas sin aprendizaje automático"
        }
      }
    },
    "q_is_ai_system_retry": {
      "id": "q_is_ai_system_retry",
      "type": "question",
      "stage": "S3",
      "title": "AI System Definition — Your Assessment",
      "body": "Based on the examples above, does your system qualify as an AI system under the AI Act?",
      "legal_ref": "Art. 3(1)",
      "attribute": "is_ai_system",
      "options": [
        {
          "label": "Yes — it qualifies as an AI system",
          "value": true,
          "next": "q_is_gpai_model",
          "translations": {
            "fr": {
              "label": "Oui — il est considéré comme un système d'IA"
            },
            "nl": {
              "label": "Ja — het kwalificeert als een AI-systeem"
            },
            "de": {
              "label": "Ja — es qualifiziert sich als KI-System"
            },
            "es": {
              "label": "Sí — se considera un sistema de IA"
            }
          }
        },
        {
          "label": "No — it does not qualify",
          "value": false,
          "next": "result_not_ai_system",
          "translations": {
            "fr": {
              "label": "Non — il n'est pas considéré comme un système d'IA"
            },
            "nl": {
              "label": "Nee — het kwalificeert niet als een AI-systeem"
            },
            "de": {
              "label": "Nein — es qualifiziert sich nicht als KI-System"
            },
            "es": {
              "label": "No — no se considera un sistema de IA"
            }
          }
        },
        {
          "label": "Still unsure — I need expert advice",
          "value": "unsure",
          "flag": "consult_expert",
          "next": "result_consult_expert_ai_definition",
          "translations": {
            "fr": {
              "label": "Toujours incertain — j'ai besoin d'un avis d'expert"
            },
            "nl": {
              "label": "Nog steeds onzeker — ik heb deskundig advies nodig"
            },
            "de": {
              "label": "Immer noch unsicher — ich benötige Expertenrat"
            },
            "es": {
              "label": "Sigo sin estar seguro — necesito asesoramiento experto"
            }
          }
        }
      ],
      "previous": "q_ai_definition_detail",
      "translations": {
        "fr": {
          "title": "Définition d'un système d'IA — Votre évaluation",
          "body": "Sur la base des exemples ci-dessus, votre système est-il considéré comme un système d'IA au sens du règlement sur l'IA ?"
        },
        "nl": {
          "title": "Definitie van een AI-systeem — Uw beoordeling",
          "body": "Kwalificeert uw systeem, op basis van de bovenstaande voorbeelden, als een AI-systeem onder de AI-verordening?"
        },
        "de": {
          "title": "Definition eines KI-Systems — Ihre Einschätzung",
          "body": "Qualifiziert sich Ihr System auf Grundlage der obigen Beispiele als KI-System im Sinne der KI-Verordnung?"
        },
        "es": {
          "title": "Definición de sistema de IA — Su evaluación",
          "body": "Basándose en los ejemplos anteriores, ¿su sistema se considera un sistema de IA en virtud del Reglamento de IA?"
        }
      }
    },
    "q_is_gpai_model": {
      "id": "q_is_gpai_model",
      "type": "question",
      "stage": "S4",
      "title": "General-Purpose AI Model",
      "body": "Is your system (or does it integrate) a **general-purpose AI model** — i.e., a model that:\n- Displays significant generality\n- Is capable of competently performing a wide range of distinct tasks\n- Can be integrated into a variety of downstream systems or applications\n- Was typically trained with a large amount of data using self-supervision at scale?",
      "legal_ref": "Art. 3(63)",
      "note": "Examples: Large language models (GPT, Claude, Llama, Mistral), large multimodal models, foundation models. This does NOT include narrow/task-specific AI models.",
      "attribute": "gpai_role",
      "options": [
        {
          "label": "Yes — I provide (develop/distribute) a GPAI model",
          "value": "provider",
          "set": {
            "is_gpai_model": true
          },
          "next": "q_gpai_systemic_risk",
          "translations": {
            "fr": {
              "label": "Oui — je fournis (développe/distribue) un modèle d'IA à usage général"
            },
            "nl": {
              "label": "Ja — ik bied (ontwikkel/distribueer) een AI-model voor algemene doeleinden aan"
            },
            "de": {
              "label": "Ja — ich biete (entwickle/vertreibe) ein KI-Modell mit allgemeinem Verwendungszweck an"
            },
            "es": {
              "label": "Sí — proporciono (desarrollo/distribuyo) un modelo de IA de uso general"
            }
          }
        },
        {
          "label": "Yes — I use/deploy a system that integrates a GPAI model",
          "value": "deployer",
          "set": {
            "is_gpai_model": true
          },
          "next": "q_prohibited_subliminal",
          "translations": {
            "fr": {
              "label": "Oui — j'utilise/déploie un système qui intègre un modèle d'IA à usage général"
            },
            "nl": {
              "label": "Ja — ik gebruik/zet een systeem in dat een AI-model voor algemene doeleinden integreert"
            },
            "de": {
              "label": "Ja — ich nutze/betreibe ein System, das ein KI-Modell mit allgemeinem Verwendungszweck integriert"
            },
            "es": {
              "label": "Sí — utilizo/despliego un sistema que integra un modelo de IA de uso general"
            }
          }
        },
        {
          "label": "No — it is not a GPAI model",
          "value": "none",
          "set": {
            "is_gpai_model": false
          },
          "next": "q_prohibited_subliminal",
          "translations": {
            "fr": {
              "label": "Non — ce n'est pas un modèle d'IA à usage général"
            },
            "nl": {
              "label": "Nee — het is geen AI-model voor algemene doeleinden"
            },
            "de": {
              "label": "Nein — es ist kein KI-Modell mit allgemeinem Verwendungszweck"
            },
            "es": {
              "label": "No — no es un modelo de IA de uso general"
            }
          }
        }
      ],
      "previous": [
        "q_is_ai_system",
        "q_is_ai_system_retry"
      ],
      "translations": {
        "fr": {
          "title": "Modèle d'IA à usage général",
          "body": "Votre système est-il (ou intègre-t-il) un **modèle d'IA à usage général** — c'est-à-dire un modèle qui :\n- Fait preuve d'une généralité significative\n- Est capable d'exécuter de manière compétente un large éventail de tâches distinctes\n- Peut être intégré dans divers systèmes ou applications en aval\n- A généralement été entraîné avec une grande quantité de données en utilisant l'auto-supervision à grande échelle ?",
          "note": "Exemples : grands modèles de langage (GPT, Claude, Llama, Mistral), grands modèles multimodaux, modèles de fondation. Cela n'inclut PAS les modèles d'IA étroits/spécifiques à une tâche."
        },
        "nl": {
          "title": "AI-model voor algemene doeleinden",
          "body": "Is uw systeem (of integreert het) een **AI-model voor algemene doeleinden** — d.w.z. een model dat:\n- Aanzienlijke algemeenheid vertoont\n- In staat is om een breed scala aan verschillende taken vakkundig uit te voeren\n- Kan worden geïntegreerd in diverse downstreamsystemen of -toepassingen\n- Doorgaans is getraind met een grote hoeveelheid gegevens via zelfsupervisie op grote schaal?",
          "note": "Voorbeelden: grote taalmodellen (GPT, Claude, Llama, Mistral), grote multimodale modellen, foundation-modellen. Dit omvat NIET smalle/taakspecifieke AI-modellen."
        },
        "de": {
          "title": "KI-Modell mit allgemeinem Verwendungszweck",
          "body": "Ist Ihr System (oder integriert es) ein **KI-Modell mit allgemeinem Verwendungszweck** — d. h. ein Modell, das:\n- Erhebliche Allgemeinheit aufweist\n- In der Lage ist, ein breites Spektrum unterschiedlicher Aufgaben kompetent auszuführen\n- In verschiedene nachgelagerte Systeme oder Anwendungen integriert werden kann\n- Typischerweise mit einer großen Datenmenge mittels Selbstüberwachung in großem Maßstab trainiert wurde?",
          "note": "Beispiele: große Sprachmodelle (GPT, Claude, Llama, Mistral), große multimodale Modelle, Grundlagenmodelle (Foundation Models). Dies umfasst NICHT enge/aufgabenspezifische KI-Modelle."
        },
        "es": {
          "title": "Modelo de IA de uso general",
          "body": "¿Su sistema es (o integra) un **modelo de IA de uso general**, es decir, un modelo que:\n- Muestra una generalidad significativa\n- Es capaz de realizar de manera competente una amplia gama de tareas distintas\n- Puede integrarse en diversos sistemas o aplicaciones posteriores\n- Fue típicamente entrenado con una gran cantidad de datos utilizando autosupervisión a gran escala?",
          "note": "Ejemplos: grandes modelos de lenguaje (GPT, Claude, Llama, Mistral), grandes modelos multimodales, modelos fundacionales. Esto NO incluye modelos de IA estrechos/específicos para una tarea."
        }
      }
    },
    "q_gpai_systemic_risk": {
      "id": "q_gpai_systemic_risk",
      "type": "question",
      "stage": "S4",
      "title": "GPAI with Systemic Risk",
      "body": "Does your general-purpose AI model have **systemic risk**?\n\nA GPAI model is presumed to have systemic risk if the cumulative amount of computation used for training is **greater than 10²⁵ floating point operations (FLOPs)**.\n\nThe Commission may also designate a model as having systemic risk based on other criteria (Annex XIII): number of registered users, high-impact capabilities, etc.",
      "legal_ref": "Art. 51(1)-(2), Annex XIII",
      "attribute": "gpai_systemic_risk",
      "options": [
        {
          "label": "Yes — training compute ≥ 10²⁵ FLOPs or designated by Commission",
          "value": true,
          "next": "q_gpai_open_source",
          "translations": {
            "fr": {
              "label": "Oui — calcul d'entraînement ≥ 10²⁵ FLOP ou désigné par la Commission"
            },
            "nl": {
              "label": "Ja — trainingsberekening ≥ 10²⁵ FLOP's of aangewezen door de Commissie"
            },
            "de": {
              "label": "Ja — Trainingsberechnung ≥ 10²⁵ FLOPs oder von der Kommission bestimmt"
            },
            "es": {
              "label": "Sí — cálculo de entrenamiento ≥ 10²⁵ FLOP o designado por la Comisión"
            }
          }
        },
        {
          "label": "No — below threshold and not designated",
          "value": false,
          "next": "q_gpai_open_source",
          "translations": {
            "fr": {
              "label": "Non — en dessous du seuil et non désigné"
            },
            "nl": {
              "label": "Nee — onder de drempel en niet aangewezen"
            },
            "de": {
              "label": "Nein — unter dem Schwellenwert und nicht bestimmt"
            },
            "es": {
              "label": "No — por debajo del umbral y no designado"
            }
          }
        },
        {
          "label": "I don't know my training compute",
          "value": "unknown",
          "flag": "consult_expert",
          "next": "q_gpai_open_source",
          "translations": {
            "fr": {
              "label": "Je ne connais pas mon calcul d'entraînement"
            },
            "nl": {
              "label": "Ik ken mijn trainingsberekening niet"
            },
            "de": {
              "label": "Ich kenne meine Trainingsberechnung nicht"
            },
            "es": {
              "label": "No conozco mi cálculo de entrenamiento"
            }
          }
        }
      ],
      "previous": "q_is_gpai_model",
      "translations": {
        "fr": {
          "title": "IA à usage général présentant un risque systémique",
          "body": "Votre modèle d'IA à usage général présente-t-il un **risque systémique** ?\n\nUn modèle d'IA à usage général est présumé présenter un risque systémique si la quantité cumulée de calcul utilisée pour l'entraînement est **supérieure à 10²⁵ opérations en virgule flottante (FLOP)**.\n\nLa Commission peut également désigner un modèle comme présentant un risque systémique sur la base d'autres critères (annexe XIII) : nombre d'utilisateurs enregistrés, capacités à fort impact, etc."
        },
        "nl": {
          "title": "AI voor algemene doeleinden met systeemrisico",
          "body": "Heeft uw AI-model voor algemene doeleinden een **systeemrisico**?\n\nEen AI-model voor algemene doeleinden wordt geacht een systeemrisico te hebben als de cumulatieve hoeveelheid berekeningen die voor de training is gebruikt **groter is dan 10²⁵ drijvende-kommabewerkingen (FLOP's)**.\n\nDe Commissie kan een model ook als systeemrisico aanmerken op basis van andere criteria (bijlage XIII): aantal geregistreerde gebruikers, capaciteiten met grote impact, enz."
        },
        "de": {
          "title": "KI mit allgemeinem Verwendungszweck mit systemischem Risiko",
          "body": "Weist Ihr KI-Modell mit allgemeinem Verwendungszweck ein **systemisches Risiko** auf?\n\nEin KI-Modell mit allgemeinem Verwendungszweck gilt als systemisches Risiko aufweisend, wenn die kumulative Menge an Berechnungen, die für das Training verwendet wurde, **mehr als 10²⁵ Gleitkommaoperationen (FLOPs) beträgt**.\n\nDie Kommission kann ein Modell auch auf der Grundlage anderer Kriterien (Anhang XIII) als systemisches Risiko einstufen: Anzahl registrierter Nutzer, Fähigkeiten mit großer Tragweite usw."
        },
        "es": {
          "title": "IA de uso general con riesgo sistémico",
          "body": "¿Su modelo de IA de uso general presenta un **riesgo sistémico**?\n\nSe presume que un modelo de IA de uso general presenta un riesgo sistémico si la cantidad acumulada de cálculo utilizada para el entrenamiento es **superior a 10²⁵ operaciones de coma flotante (FLOP)**.\n\nLa Comisión también puede designar un modelo como de riesgo sistémico sobre la base de otros criterios (anexo XIII): número de usuarios registrados, capacidades de alto impacto, etc."
        }
      }
    },
    "q_gpai_open_source": {
      "id": "q_gpai_open_source",
      "type": "question",
      "stage": "S4",
      "title": "Open-Source GPAI Model",
      "body": "Is your GPAI model released under a **free and open-source licence** where the parameters (weights), model architecture information, and model usage information are all made **publicly available**?",
      "legal_ref": "Art. 53(2)",
      "note": "Even if open-source, the exemption does NOT apply to GPAI models with systemic risk. Open-source GPAI models must still comply with copyright policy (Art. 53(1)(c)) and training data summary (Art. 53(1)(d)) obligations.",
      "attribute": "gpai_open_source",
      "options": [
        {
          "label": "Yes — fully open-source (weights, architecture, usage info public)",
          "value": true,
          "next": "q_prohibited_subliminal",
          "translations": {
            "fr": {
              "label": "Oui — entièrement en source ouverte (poids, architecture, informations d'utilisation publics)"
            },
            "nl": {
              "label": "Ja — volledig open source (gewichten, architectuur, gebruiksinformatie openbaar)"
            },
            "de": {
              "label": "Ja — vollständig quelloffen (Gewichte, Architektur, Nutzungsinformationen öffentlich)"
            },
            "es": {
              "label": "Sí — totalmente de código abierto (pesos, arquitectura, información de uso públicos)"
            }
          }
        },
        {
          "label": "No — proprietary or partially open",
          "value": false,
          "next": "q_prohibited_subliminal",
          "translations": {
            "fr": {
              "label": "Non — propriétaire ou partiellement ouvert"
            },
            "nl": {
              "label": "Nee — propriëtair of gedeeltelijk open"
            },
            "de": {
              "label": "Nein — proprietär oder teilweise offen"
            },
            "es": {
              "label": "No — propietario o parcialmente abierto"
            }
          }
        }
      ],
      "previous": "q_gpai_systemic_risk",
      "translations": {
        "fr": {
          "title": "Modèle d'IA à usage général en source ouverte",
          "body": "Votre modèle d'IA à usage général est-il publié sous une **licence libre et ouverte** où les paramètres (poids), les informations sur l'architecture du modèle et les informations sur l'utilisation du modèle sont tous rendus **publiquement disponibles** ?",
          "note": "Même en source ouverte, l'exemption ne s'applique PAS aux modèles d'IA à usage général présentant un risque systémique. Les modèles d'IA à usage général en source ouverte doivent toujours respecter les obligations relatives à la politique en matière de droit d'auteur (art. 53(1)(c)) et au résumé des données d'entraînement (art. 53(1)(d))."
        },
        "nl": {
          "title": "Open-source AI-model voor algemene doeleinden",
          "body": "Is uw AI-model voor algemene doeleinden gepubliceerd onder een **vrije en open-sourcelicentie** waarbij de parameters (gewichten), informatie over de modelarchitectuur en informatie over het modelgebruik allemaal **openbaar beschikbaar** zijn gesteld?",
          "note": "Zelfs bij open source geldt de vrijstelling NIET voor AI-modellen voor algemene doeleinden met systeemrisico. Open-source AI-modellen voor algemene doeleinden moeten nog steeds voldoen aan de verplichtingen inzake auteursrechtbeleid (art. 53(1)(c)) en samenvatting van trainingsgegevens (art. 53(1)(d))."
        },
        "de": {
          "title": "Open-Source-KI-Modell mit allgemeinem Verwendungszweck",
          "body": "Wird Ihr KI-Modell mit allgemeinem Verwendungszweck unter einer **freien und quelloffenen Lizenz** veröffentlicht, bei der die Parameter (Gewichte), Informationen zur Modellarchitektur und Informationen zur Modellnutzung alle **öffentlich zugänglich** gemacht werden?",
          "note": "Auch bei Open Source gilt die Ausnahme NICHT für KI-Modelle mit allgemeinem Verwendungszweck mit systemischem Risiko. Open-Source-KI-Modelle mit allgemeinem Verwendungszweck müssen weiterhin die Verpflichtungen in Bezug auf die Urheberrechtspolitik (Art. 53(1)(c)) und die Zusammenfassung der Trainingsdaten (Art. 53(1)(d)) einhalten."
        },
        "es": {
          "title": "Modelo de IA de uso general de código abierto",
          "body": "¿Su modelo de IA de uso general se publica bajo una **licencia libre y de código abierto** en la que los parámetros (pesos), la información sobre la arquitectura del modelo y la información sobre el uso del modelo se ponen **a disposición del público**?",
          "note": "Incluso en código abierto, la exención NO se aplica a los modelos de IA de uso general con riesgo sistémico. Los modelos de IA de uso general de código abierto deben cumplir igualmente las obligaciones relativas a la política de derechos de autor (art. 53(1)(c)) y al resumen de datos de entrenamiento (art. 53(1)(d))."
        }
      }
    },
    "q_prohibited_subliminal": {
      "id": "q_prohibited_subliminal",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Manipulation",
      "body": "Does your AI system deploy **subliminal, manipulative, or deceptive techniques** with the objective or effect of materially distorting a person's behaviour, impairing their ability to make an informed decision, and causing or likely causing **significant harm**?",
      "legal_ref": "Art. 5(1)(a)",
      "attribute": "prohibited_practice",
      "options": [
        {
          "label": "Yes or possibly",
          "value": "subliminal_manipulation",
          "next": "result_prohibited_manipulation",
          "translations": {
            "fr": {
              "label": "Oui ou possiblement"
            },
            "nl": {
              "label": "Ja of mogelijk"
            },
            "de": {
              "label": "Ja oder möglicherweise"
            },
            "es": {
              "label": "Sí o posiblemente"
            }
          }
        },
        {
          "label": "No",
          "value": "none",
          "next": "q_prohibited_vulnerability",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": [
        "q_is_gpai_model",
        "q_gpai_open_source"
      ],
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : manipulation",
          "body": "Votre système d'IA utilise-t-il des **techniques subliminales, manipulatrices ou trompeuses** ayant pour objectif ou pour effet d'altérer substantiellement le comportement d'une personne, de compromettre sa capacité à prendre une décision éclairée et de causer ou d'être susceptible de causer un **préjudice important** ?"
        },
        "nl": {
          "title": "Controle op verboden praktijken: manipulatie",
          "body": "Maakt uw AI-systeem gebruik van **subliminale, manipulatieve of misleidende technieken** met het doel of het gevolg het gedrag van een persoon wezenlijk te verstoren, het vermogen om een geïnformeerd besluit te nemen te ondermijnen en **aanzienlijke schade** te veroorzaken of waarschijnlijk te veroorzaken?"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: Manipulation",
          "body": "Setzt Ihr KI-System **unterschwellige, manipulative oder täuschende Techniken** ein, die darauf abzielen oder bewirken, das Verhalten einer Person wesentlich zu verzerren, ihre Fähigkeit zu einer fundierten Entscheidung zu beeinträchtigen und einen **erheblichen Schaden** zu verursachen oder wahrscheinlich zu verursachen?"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: manipulación",
          "body": "¿Su sistema de IA emplea **técnicas subliminales, manipuladoras o engañosas** con el objetivo o el efecto de distorsionar materialmente el comportamiento de una persona, menoscabar su capacidad para tomar una decisión informada y causar o ser susceptible de causar un **perjuicio significativo**?"
        }
      }
    },
    "q_prohibited_vulnerability": {
      "id": "q_prohibited_vulnerability",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Exploiting Vulnerabilities",
      "body": "Does your AI system **exploit vulnerabilities** of persons due to their age, disability, or specific social or economic situation, with the objective or effect of materially distorting their behaviour and causing or likely causing **significant harm**?",
      "legal_ref": "Art. 5(1)(b)",
      "options": [
        {
          "label": "Yes or possibly",
          "value": "vulnerability_exploitation",
          "set": {
            "prohibited_practice": "vulnerability_exploitation"
          },
          "next": "result_prohibited_vulnerability",
          "translations": {
            "fr": {
              "label": "Oui ou possiblement"
            },
            "nl": {
              "label": "Ja of mogelijk"
            },
            "de": {
              "label": "Ja oder möglicherweise"
            },
            "es": {
              "label": "Sí o posiblemente"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_prohibited_social_scoring",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_subliminal",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : exploitation des vulnérabilités",
          "body": "Votre système d'IA **exploite-t-il les vulnérabilités** de personnes en raison de leur âge, d'un handicap ou d'une situation sociale ou économique spécifique, ayant pour objectif ou pour effet d'altérer substantiellement leur comportement et de causer ou d'être susceptible de causer un **préjudice important** ?"
        },
        "nl": {
          "title": "Controle op verboden praktijken: misbruik van kwetsbaarheden",
          "body": "**Misbruikt** uw AI-systeem **kwetsbaarheden** van personen vanwege hun leeftijd, een handicap of een specifieke sociale of economische situatie, met het doel of het gevolg hun gedrag wezenlijk te verstoren en **aanzienlijke schade** te veroorzaken of waarschijnlijk te veroorzaken?"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: Ausnutzung von Schwachstellen",
          "body": "**Nutzt** Ihr KI-System **Schwachstellen** von Personen aufgrund ihres Alters, einer Behinderung oder einer bestimmten sozialen oder wirtschaftlichen Situation aus, mit dem Ziel oder der Wirkung, ihr Verhalten wesentlich zu verzerren und einen **erheblichen Schaden** zu verursachen oder wahrscheinlich zu verursachen?"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: explotación de vulnerabilidades",
          "body": "¿**Explota** su sistema de IA **las vulnerabilidades** de personas debido a su edad, discapacidad o situación social o económica específica, con el objetivo o el efecto de distorsionar materialmente su comportamiento y causar o ser susceptible de causar un **perjuicio significativo**?"
        }
      }
    },
    "q_prohibited_social_scoring": {
      "id": "q_prohibited_social_scoring",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Social Scoring",
      "body": "Does your AI system evaluate or classify persons over time based on their **social behaviour** or personal/personality characteristics, where the resulting score leads to **detrimental treatment** that is either:\n- In contexts unrelated to where the data was collected, OR\n- Unjustified or disproportionate to the behaviour?",
      "legal_ref": "Art. 5(1)(c)",
      "options": [
        {
          "label": "Yes or possibly",
          "value": "social_scoring",
          "set": {
            "prohibited_practice": "social_scoring"
          },
          "next": "result_prohibited_social_scoring",
          "translations": {
            "fr": {
              "label": "Oui ou possiblement"
            },
            "nl": {
              "label": "Ja of mogelijk"
            },
            "de": {
              "label": "Ja oder möglicherweise"
            },
            "es": {
              "label": "Sí o posiblemente"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_prohibited_predictive_policing",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_vulnerability",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : notation sociale",
          "body": "Votre système d'IA évalue-t-il ou classe-t-il des personnes dans le temps sur la base de leur **comportement social** ou de leurs caractéristiques personnelles/de personnalité, lorsque le score qui en résulte conduit à un **traitement préjudiciable** qui est soit :\n- Dans des contextes sans rapport avec celui où les données ont été collectées, SOIT\n- Injustifié ou disproportionné par rapport au comportement ?"
        },
        "nl": {
          "title": "Controle op verboden praktijken: sociale scoring",
          "body": "Beoordeelt of classificeert uw AI-systeem personen in de loop van de tijd op basis van hun **sociaal gedrag** of persoonlijke/persoonlijkheidskenmerken, waarbij de resulterende score leidt tot **nadelige behandeling** die ofwel:\n- Plaatsvindt in contexten die geen verband houden met de context waarin de gegevens zijn verzameld, OF\n- Ongerechtvaardigd of onevenredig is in verhouding tot het gedrag?"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: Sozialbewertung",
          "body": "Bewertet oder klassifiziert Ihr KI-System Personen im Laufe der Zeit auf der Grundlage ihres **Sozialverhaltens** oder ihrer persönlichen Eigenschaften/Persönlichkeitsmerkmale, wobei die resultierende Bewertung zu einer **nachteiligen Behandlung** führt, die entweder:\n- In Kontexten erfolgt, die keinen Bezug zu dem Kontext haben, in dem die Daten erhoben wurden, ODER\n- Ungerechtfertigt oder unverhältnismäßig im Verhältnis zum Verhalten ist?"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: puntuación social",
          "body": "¿Su sistema de IA evalúa o clasifica a personas a lo largo del tiempo en función de su **comportamiento social** o de sus características personales/de personalidad, cuando la puntuación resultante conduce a un **trato perjudicial** que es:\n- En contextos no relacionados con el contexto en el que se recogieron los datos, O\n- Injustificado o desproporcionado en relación con el comportamiento?"
        }
      }
    },
    "q_prohibited_predictive_policing": {
      "id": "q_prohibited_predictive_policing",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Predictive Policing",
      "body": "Does your AI system assess or predict the risk of a person committing a criminal offence, based **solely** on profiling or personality traits/characteristics?\n\n(This does NOT cover systems supporting human assessment based on objective, verifiable facts directly linked to criminal activity.)",
      "legal_ref": "Art. 5(1)(d)",
      "options": [
        {
          "label": "Yes or possibly",
          "value": "predictive_policing",
          "set": {
            "prohibited_practice": "predictive_policing"
          },
          "next": "result_prohibited_predictive_policing",
          "translations": {
            "fr": {
              "label": "Oui ou possiblement"
            },
            "nl": {
              "label": "Ja of mogelijk"
            },
            "de": {
              "label": "Ja oder möglicherweise"
            },
            "es": {
              "label": "Sí o posiblemente"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_prohibited_facial_scraping",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_social_scoring",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : police prédictive",
          "body": "Votre système d'IA évalue-t-il ou prédit-il le risque qu'une personne commette une infraction pénale, en se fondant **uniquement** sur le profilage ou les traits/caractéristiques de personnalité ?\n\n(Ceci ne couvre PAS les systèmes appuyant l'évaluation humaine fondée sur des faits objectifs et vérifiables directement liés à une activité criminelle.)"
        },
        "nl": {
          "title": "Controle op verboden praktijken: voorspellend politiewerk",
          "body": "Beoordeelt of voorspelt uw AI-systeem het risico dat een persoon een strafbaar feit pleegt, **uitsluitend** op basis van profilering of persoonlijkheidskenmerken/-eigenschappen?\n\n(Dit heeft GEEN betrekking op systemen die menselijke beoordeling ondersteunen op basis van objectieve, verifieerbare feiten die rechtstreeks verband houden met criminele activiteiten.)"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: vorausschauende Polizeiarbeit",
          "body": "Bewertet oder prognostiziert Ihr KI-System das Risiko, dass eine Person eine Straftat begeht, **ausschließlich** auf der Grundlage von Profiling oder Persönlichkeitsmerkmalen/-eigenschaften?\n\n(Dies umfasst NICHT Systeme, die die menschliche Bewertung auf der Grundlage objektiver, überprüfbarer Tatsachen unterstützen, die in direktem Zusammenhang mit kriminellen Aktivitäten stehen.)"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: vigilancia policial predictiva",
          "body": "¿Su sistema de IA evalúa o predice el riesgo de que una persona cometa un delito penal, basándose **únicamente** en la elaboración de perfiles o en rasgos/características de personalidad?\n\n(Esto NO cubre los sistemas que apoyan la evaluación humana basada en hechos objetivos y verificables directamente vinculados a actividades delictivas.)"
        }
      }
    },
    "q_prohibited_facial_scraping": {
      "id": "q_prohibited_facial_scraping",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Facial Recognition Database Scraping",
      "body": "Does your AI system create or expand **facial recognition databases** through untargeted scraping of facial images from the internet or CCTV footage?",
      "legal_ref": "Art. 5(1)(e)",
      "options": [
        {
          "label": "Yes or possibly",
          "value": "facial_scraping",
          "set": {
            "prohibited_practice": "facial_db_scraping"
          },
          "next": "result_prohibited_facial_scraping",
          "translations": {
            "fr": {
              "label": "Oui ou possiblement"
            },
            "nl": {
              "label": "Ja of mogelijk"
            },
            "de": {
              "label": "Ja oder möglicherweise"
            },
            "es": {
              "label": "Sí o posiblemente"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_prohibited_emotion_work_edu",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_predictive_policing",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : extraction de données de reconnaissance faciale",
          "body": "Votre système d'IA crée-t-il ou élargit-il des **bases de données de reconnaissance faciale** par l'extraction non ciblée d'images faciales à partir d'internet ou de la vidéosurveillance ?"
        },
        "nl": {
          "title": "Controle op verboden praktijken: scraping van gezichtsherkenningsdatabases",
          "body": "Creëert of breidt uw AI-systeem **gezichtsherkenningsdatabases** uit door ongerichte scraping van gezichtsafbeeldingen van het internet of CCTV-beelden?"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: Sammeln von Gesichtserkennungsdaten",
          "body": "Erstellt oder erweitert Ihr KI-System **Gesichtserkennungsdatenbanken** durch ungezieltes Sammeln von Gesichtsbildern aus dem Internet oder von Videoüberwachungsaufnahmen?"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: extracción de datos de reconocimiento facial",
          "body": "¿Su sistema de IA crea o amplía **bases de datos de reconocimiento facial** mediante la extracción no selectiva de imágenes faciales de internet o de circuitos cerrados de televisión?"
        }
      }
    },
    "q_prohibited_emotion_work_edu": {
      "id": "q_prohibited_emotion_work_edu",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Emotion Recognition in Workplace/Education",
      "body": "Does your AI system infer the emotions of persons in the **workplace** or in **education institutions**?\n\n(Exception: systems intended for medical or safety reasons are NOT prohibited.)",
      "legal_ref": "Art. 5(1)(f)",
      "options": [
        {
          "label": "Yes — and it is NOT for medical or safety purposes",
          "value": "prohibited",
          "set": {
            "prohibited_practice": "emotion_workplace_education"
          },
          "next": "result_prohibited_emotion",
          "translations": {
            "fr": {
              "label": "Oui — et ce n'est PAS à des fins médicales ou de sécurité"
            },
            "nl": {
              "label": "Ja — en het is NIET voor medische of veiligheidsdoeleinden"
            },
            "de": {
              "label": "Ja — und es dient NICHT medizinischen oder Sicherheitszwecken"
            },
            "es": {
              "label": "Sí — y NO es con fines médicos o de seguridad"
            }
          }
        },
        {
          "label": "Yes — but it IS for medical or safety purposes",
          "value": "exempt",
          "next": "q_prohibited_biometric_sensitive",
          "translations": {
            "fr": {
              "label": "Oui — mais c'est à des fins médicales ou de sécurité"
            },
            "nl": {
              "label": "Ja — maar het IS voor medische of veiligheidsdoeleinden"
            },
            "de": {
              "label": "Ja — aber es dient medizinischen oder Sicherheitszwecken"
            },
            "es": {
              "label": "Sí — pero ES con fines médicos o de seguridad"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_prohibited_biometric_sensitive",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_facial_scraping",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : reconnaissance des émotions sur le lieu de travail/dans l'éducation",
          "body": "Votre système d'IA infère-t-il les émotions de personnes sur le **lieu de travail** ou dans les **établissements d'enseignement** ?\n\n(Exception : les systèmes destinés à des fins médicales ou de sécurité ne sont PAS interdits.)"
        },
        "nl": {
          "title": "Controle op verboden praktijken: emotieherkenning op de werkplek/in het onderwijs",
          "body": "Leidt uw AI-systeem emoties af van personen op de **werkplek** of in **onderwijsinstellingen**?\n\n(Uitzondering: systemen bedoeld voor medische of veiligheidsdoeleinden zijn NIET verboden.)"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: Emotionserkennung am Arbeitsplatz/im Bildungswesen",
          "body": "Erkennt Ihr KI-System die Emotionen von Personen am **Arbeitsplatz** oder in **Bildungseinrichtungen**?\n\n(Ausnahme: Systeme, die für medizinische oder Sicherheitszwecke bestimmt sind, sind NICHT verboten.)"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: reconocimiento de emociones en el lugar de trabajo/educación",
          "body": "¿Su sistema de IA infiere las emociones de personas en el **lugar de trabajo** o en **centros educativos**?\n\n(Excepción: los sistemas destinados a fines médicos o de seguridad NO están prohibidos.)"
        }
      }
    },
    "q_prohibited_biometric_sensitive": {
      "id": "q_prohibited_biometric_sensitive",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Biometric Categorisation by Sensitive Attributes",
      "body": "Does your AI system categorise persons based on their **biometric data** to deduce or infer their race, political opinions, trade union membership, religious/philosophical beliefs, sex life, or sexual orientation?\n\n(Exception: labelling/filtering of lawfully acquired biometric datasets and categorisation for law enforcement are NOT covered by this prohibition.)",
      "legal_ref": "Art. 5(1)(g)",
      "options": [
        {
          "label": "Yes or possibly",
          "value": "biometric_sensitive",
          "set": {
            "prohibited_practice": "biometric_categorisation_sensitive"
          },
          "next": "result_prohibited_biometric_sensitive",
          "translations": {
            "fr": {
              "label": "Oui ou possiblement"
            },
            "nl": {
              "label": "Ja of mogelijk"
            },
            "de": {
              "label": "Ja oder möglicherweise"
            },
            "es": {
              "label": "Sí o posiblemente"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_prohibited_realtime_rbi",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_emotion_work_edu",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : catégorisation biométrique par attributs sensibles",
          "body": "Votre système d'IA catégorise-t-il des personnes sur la base de leurs **données biométriques** pour déduire ou inférer leur race, opinions politiques, appartenance syndicale, convictions religieuses/philosophiques, vie sexuelle ou orientation sexuelle ?\n\n(Exception : l'étiquetage/filtrage d'ensembles de données biométriques légalement acquis et la catégorisation à des fins répressives ne sont PAS couverts par cette interdiction.)"
        },
        "nl": {
          "title": "Controle op verboden praktijken: biometrische categorisering op basis van gevoelige kenmerken",
          "body": "Categoriseert uw AI-systeem personen op basis van hun **biometrische gegevens** om hun ras, politieke opvattingen, vakbondslidmaatschap, religieuze/levensbeschouwelijke overtuigingen, seksleven of seksuele geaardheid af te leiden?\n\n(Uitzondering: labeling/filtering van rechtmatig verkregen biometrische datasets en categorisering ten behoeve van rechtshandhaving vallen NIET onder dit verbod.)"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: biometrische Kategorisierung nach sensiblen Merkmalen",
          "body": "Kategorisiert Ihr KI-System Personen auf der Grundlage ihrer **biometrischen Daten**, um ihre Rasse, politische Meinungen, Gewerkschaftszugehörigkeit, religiöse/weltanschauliche Überzeugungen, Sexualleben oder sexuelle Orientierung abzuleiten?\n\n(Ausnahme: Die Kennzeichnung/Filterung rechtmäßig erworbener biometrischer Datensätze und die Kategorisierung für Strafverfolgungszwecke fallen NICHT unter dieses Verbot.)"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: categorización biométrica por atributos sensibles",
          "body": "¿Su sistema de IA categoriza a personas basándose en sus **datos biométricos** para deducir o inferir su raza, opiniones políticas, afiliación sindical, convicciones religiosas/filosóficas, vida sexual u orientación sexual?\n\n(Excepción: el etiquetado/filtrado de conjuntos de datos biométricos legalmente adquiridos y la categorización con fines de aplicación de la ley NO están cubiertos por esta prohibición.)"
        }
      }
    },
    "q_prohibited_realtime_rbi": {
      "id": "q_prohibited_realtime_rbi",
      "type": "question",
      "stage": "S5",
      "title": "Prohibited Practice Check: Real-Time Remote Biometric Identification",
      "body": "Does your AI system perform **real-time remote biometric identification** in publicly accessible spaces for the purposes of **law enforcement**?\n\n(Narrow exceptions exist for: searching for victims of abduction/trafficking, preventing imminent threats to life, and locating suspects of serious crimes punishable by a custodial sentence of at least four years — all requiring prior judicial authorisation per Art. 5(2)-(3).)",
      "legal_ref": "Art. 5(1)(h)",
      "options": [
        {
          "label": "Yes — and none of the narrow law enforcement exceptions apply",
          "value": "prohibited",
          "set": {
            "prohibited_practice": "realtime_rbi_law_enforcement"
          },
          "next": "result_prohibited_realtime_rbi",
          "translations": {
            "fr": {
              "label": "Oui — et aucune des exceptions limitées en matière répressive ne s'applique"
            },
            "nl": {
              "label": "Ja — en geen van de beperkte uitzonderingen voor rechtshandhaving is van toepassing"
            },
            "de": {
              "label": "Ja — und keine der eingeschränkten Ausnahmen für die Strafverfolgung gilt"
            },
            "es": {
              "label": "Sí — y ninguna de las excepciones limitadas en materia de aplicación de la ley es aplicable"
            }
          }
        },
        {
          "label": "Yes — but a narrow exception applies (with judicial authorisation)",
          "value": "exception",
          "next": "q_high_risk_annex_i",
          "translations": {
            "fr": {
              "label": "Oui — mais une exception limitée s'applique (avec autorisation judiciaire)"
            },
            "nl": {
              "label": "Ja — maar een beperkte uitzondering is van toepassing (met rechterlijke toestemming)"
            },
            "de": {
              "label": "Ja — aber eine eingeschränkte Ausnahme gilt (mit richterlicher Genehmigung)"
            },
            "es": {
              "label": "Sí — pero se aplica una excepción limitada (con autorización judicial)"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_high_risk_annex_i",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_prohibited_biometric_sensitive",
      "translations": {
        "fr": {
          "title": "Vérification des pratiques interdites : identification biométrique à distance en temps réel",
          "body": "Votre système d'IA effectue-t-il une **identification biométrique à distance en temps réel** dans des espaces accessibles au public à des fins **répressives** ?\n\n(Des exceptions limitées existent pour : la recherche de victimes d'enlèvement/de traite, la prévention de menaces imminentes pour la vie, et la localisation de suspects de crimes graves passibles d'une peine privative de liberté d'au moins quatre ans — toutes nécessitant une autorisation judiciaire préalable conformément à l'art. 5(2)-(3).)"
        },
        "nl": {
          "title": "Controle op verboden praktijken: biometrische identificatie op afstand in real time",
          "body": "Voert uw AI-systeem **biometrische identificatie op afstand in real time** uit in openbaar toegankelijke ruimten ten behoeve van **rechtshandhaving**?\n\n(Er bestaan beperkte uitzonderingen voor: het zoeken naar slachtoffers van ontvoering/mensenhandel, het voorkomen van onmiddellijke bedreigingen voor het leven, en het lokaliseren van verdachten van ernstige misdrijven die strafbaar zijn met een gevangenisstraf van ten minste vier jaar — alle vereisen voorafgaande rechterlijke toestemming conform art. 5(2)-(3).)"
        },
        "de": {
          "title": "Überprüfung verbotener Praktiken: biometrische Echtzeit-Fernidentifizierung",
          "body": "Führt Ihr KI-System eine **biometrische Echtzeit-Fernidentifizierung** in öffentlich zugänglichen Räumen zu Zwecken der **Strafverfolgung** durch?\n\n(Eingeschränkte Ausnahmen bestehen für: die Suche nach Opfern von Entführung/Menschenhandel, die Abwehr unmittelbarer Bedrohungen für das Leben und die Lokalisierung von Verdächtigen schwerer Straftaten, die mit einer Freiheitsstrafe von mindestens vier Jahren bedroht sind — alle erfordern eine vorherige richterliche Genehmigung gemäß Art. 5(2)-(3).)"
        },
        "es": {
          "title": "Verificación de prácticas prohibidas: identificación biométrica remota en tiempo real",
          "body": "¿Su sistema de IA realiza **identificación biométrica remota en tiempo real** en espacios de acceso público con fines de **aplicación de la ley**?\n\n(Existen excepciones limitadas para: la búsqueda de víctimas de secuestro/trata, la prevención de amenazas inminentes para la vida y la localización de sospechosos de delitos graves castigados con pena privativa de libertad de al menos cuatro años — todas requieren autorización judicial previa conforme al art. 5(2)-(3).)"
        }
      }
    },
    "q_high_risk_annex_i": {
      "id": "q_high_risk_annex_i",
      "type": "question",
      "stage": "S6",
      "title": "High-Risk: Safety Component of Regulated Product (Annex I)",
      "body": "Is your AI system:\n- A **safety component** of a product covered by EU harmonisation legislation (listed below), OR\n- **Itself a product** covered by that legislation?\n\nAND is the product required to undergo **third-party conformity assessment**?\n\n**Annex I, Section A** (full AI Act requirements apply):\nMachinery, Toys, Recreational craft, Lifts, ATEX equipment, Radio equipment, Pressure equipment, Cableway installations, PPE, Gas appliances, Medical devices (MDR), In-vitro diagnostics (IVDR)\n\n**Annex I, Section B** (limited requirements):\nCivil aviation (Regulation (EU) 2018/1139), Two/three-wheelers, Agricultural vehicles, Marine equipment, Rail systems, Motor vehicles, Unmanned aircraft",
      "legal_ref": "Art. 6(1), Annex I",
      "attribute": "high_risk_pathway",
      "options": [
        {
          "label": "Yes — Section A product (e.g., medical device, machinery, toy with AI)",
          "value": "annex_i_safety_component",
          "set": {
            "annex_i_section": "section_a"
          },
          "next": "q_annex_i_legislation",
          "translations": {
            "fr": {
              "label": "Oui — Produit de la Section A (ex. : dispositif médical, machine, jouet avec IA)"
            },
            "nl": {
              "label": "Ja — Sectie A-product (bv. medisch hulpmiddel, machine, speelgoed met AI)"
            },
            "de": {
              "label": "Ja — Produkt aus Abschnitt A (z. B. Medizinprodukt, Maschine, Spielzeug mit KI)"
            },
            "es": {
              "label": "Sí — Producto de la Sección A (ej.: producto sanitario, maquinaria, juguete con IA)"
            }
          }
        },
        {
          "label": "Yes — Section B product (e.g., motor vehicle, aircraft, rail)",
          "value": "annex_i_safety_component",
          "set": {
            "annex_i_section": "section_b",
            "risk_category": "high_risk_annex_i"
          },
          "next": "result_high_risk_section_b",
          "translations": {
            "fr": {
              "label": "Oui — Produit de la Section B (ex. : véhicule à moteur, aéronef, transport ferroviaire)"
            },
            "nl": {
              "label": "Ja — Sectie B-product (bv. motorvoertuig, luchtvaartuig, spoorvervoer)"
            },
            "de": {
              "label": "Ja — Produkt aus Abschnitt B (z. B. Kraftfahrzeug, Luftfahrzeug, Schienenverkehr)"
            },
            "es": {
              "label": "Sí — Producto de la Sección B (ej.: vehículo de motor, aeronave, transporte ferroviario)"
            }
          }
        },
        {
          "label": "No — not a safety component of a regulated product",
          "value": "none",
          "next": "q_high_risk_annex_iii_area",
          "translations": {
            "fr": {
              "label": "Non — pas un composant de sécurité d'un produit réglementé"
            },
            "nl": {
              "label": "Nee — geen veiligheidscomponent van een gereguleerd product"
            },
            "de": {
              "label": "Nein — keine Sicherheitskomponente eines regulierten Produkts"
            },
            "es": {
              "label": "No — no es un componente de seguridad de un producto regulado"
            }
          }
        },
        {
          "label": "I'm not sure",
          "value": "unsure",
          "flag": "consult_expert",
          "next": "q_high_risk_annex_iii_area",
          "translations": {
            "fr": {
              "label": "Je ne suis pas sûr"
            },
            "nl": {
              "label": "Ik weet het niet zeker"
            },
            "de": {
              "label": "Ich bin nicht sicher"
            },
            "es": {
              "label": "No estoy seguro/a"
            }
          }
        }
      ],
      "previous": "q_prohibited_realtime_rbi",
      "translations": {
        "fr": {
          "title": "Haut risque : composant de sécurité d'un produit réglementé (Annexe I)",
          "body": "Votre système d'IA est-il :\n- Un **composant de sécurité** d'un produit couvert par la législation d'harmonisation de l'Union (listée ci-dessous), OU\n- **Le produit lui-même** couvert par cette législation ?\n\nET le produit est-il soumis à une **évaluation de conformité par un tiers** ?\n\n**Annexe I, Section A** (l'ensemble des exigences du Règlement IA s'applique) :\nMachines, Jouets, Bateaux de plaisance, Ascenseurs, Équipements ATEX, Équipements radioélectriques, Équipements sous pression, Installations à câbles, EPI, Appareils à gaz, Dispositifs médicaux (MDR), Dispositifs médicaux de diagnostic in vitro (IVDR)\n\n**Annexe I, Section B** (exigences limitées) :\nAviation civile (Règlement (UE) 2018/1139), Véhicules à deux/trois roues, Véhicules agricoles, Équipements marins, Systèmes ferroviaires, Véhicules à moteur, Aéronefs sans équipage"
        },
        "nl": {
          "title": "Hoog risico: veiligheidscomponent van een gereguleerd product (Bijlage I)",
          "body": "Is uw AI-systeem:\n- Een **veiligheidscomponent** van een product dat onder EU-harmonisatiewetgeving valt (hieronder vermeld), OF\n- **Zelf een product** dat onder die wetgeving valt?\n\nEN is het product onderworpen aan een **conformiteitsbeoordeling door een derde partij**?\n\n**Bijlage I, Sectie A** (volledige AI-verordening van toepassing):\nMachines, Speelgoed, Pleziervaartuigen, Liften, ATEX-apparatuur, Radioapparatuur, Drukapparatuur, Kabelbaaninstallaties, PBM, Gastoestellen, Medische hulpmiddelen (MDR), In-vitrodiagnostiek (IVDR)\n\n**Bijlage I, Sectie B** (beperkte vereisten):\nBurgerluchtvaart (Verordening (EU) 2018/1139), Twee-/driewielige voertuigen, Landbouwvoertuigen, Scheepsuitrusting, Spoorwegsystemen, Motorvoertuigen, Onbemande luchtvaartuigen"
        },
        "de": {
          "title": "Hochrisiko: Sicherheitskomponente eines regulierten Produkts (Anhang I)",
          "body": "Ist Ihr KI-System:\n- Eine **Sicherheitskomponente** eines Produkts, das unter EU-Harmonisierungsrechtsvorschriften fällt (unten aufgeführt), ODER\n- **Selbst ein Produkt**, das unter diese Rechtsvorschriften fällt?\n\nUND ist das Produkt einer **Konformitätsbewertung durch Dritte** zu unterziehen?\n\n**Anhang I, Abschnitt A** (vollständige KI-Verordnung anwendbar):\nMaschinen, Spielzeug, Sportboote, Aufzüge, ATEX-Geräte, Funkanlagen, Druckgeräte, Seilbahnen, PSA, Gasverbrauchseinrichtungen, Medizinprodukte (MDR), In-vitro-Diagnostika (IVDR)\n\n**Anhang I, Abschnitt B** (eingeschränkte Anforderungen):\nZivilluftfahrt (Verordnung (EU) 2018/1139), Zwei-/dreirädrige Fahrzeuge, Land- und forstwirtschaftliche Fahrzeuge, Schiffsausrüstung, Eisenbahnsysteme, Kraftfahrzeuge, Unbemannte Luftfahrzeuge"
        },
        "es": {
          "title": "Alto riesgo: componente de seguridad de un producto regulado (Anexo I)",
          "body": "¿Su sistema de IA es:\n- Un **componente de seguridad** de un producto cubierto por la legislación de armonización de la Unión (enumerada a continuación), O\n- **El propio producto** cubierto por dicha legislación?\n\n¿Y el producto debe someterse a una **evaluación de conformidad por un tercero**?\n\n**Anexo I, Sección A** (se aplican todos los requisitos del Reglamento de IA):\nMaquinaria, Juguetes, Embarcaciones de recreo, Ascensores, Equipos ATEX, Equipos radioeléctricos, Equipos a presión, Instalaciones de teleféricos, EPI, Aparatos de gas, Productos sanitarios (MDR), Productos sanitarios para diagnóstico in vitro (IVDR)\n\n**Anexo I, Sección B** (requisitos limitados):\nAviación civil (Reglamento (UE) 2018/1139), Vehículos de dos/tres ruedas, Vehículos agrícolas, Equipos marinos, Sistemas ferroviarios, Vehículos de motor, Aeronaves no tripuladas"
        }
      }
    },
    "q_annex_i_legislation": {
      "id": "q_annex_i_legislation",
      "type": "question",
      "stage": "S6",
      "title": "Applicable Product Legislation",
      "body": "Which EU harmonisation legislation (Annex I, Section A) applies to your product?",
      "legal_ref": "Annex I",
      "attribute": "annex_i_legislation",
      "options": [
        {
          "label": "Machinery Regulation (EU) 2023/1230",
          "value": "machinery",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Règlement Machines (UE) 2023/1230"
            },
            "nl": {
              "label": "Machineverordening (EU) 2023/1230"
            },
            "de": {
              "label": "Maschinenverordnung (EU) 2023/1230"
            },
            "es": {
              "label": "Reglamento de Máquinas (UE) 2023/1230"
            }
          }
        },
        {
          "label": "Toys Safety Directive (2009/48/EC)",
          "value": "toys",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Directive Sécurité des jouets (2009/48/CE)"
            },
            "nl": {
              "label": "Richtlijn Speelgoedveiligheid (2009/48/EG)"
            },
            "de": {
              "label": "Spielzeugrichtlinie (2009/48/EG)"
            },
            "es": {
              "label": "Directiva de Seguridad de los Juguetes (2009/48/CE)"
            }
          }
        },
        {
          "label": "Recreational Craft Directive (2013/53/EU)",
          "value": "recreational_craft",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Directive Bateaux de plaisance (2013/53/UE)"
            },
            "nl": {
              "label": "Richtlijn Pleziervaartuigen (2013/53/EU)"
            },
            "de": {
              "label": "Sportbootrichtlinie (2013/53/EU)"
            },
            "es": {
              "label": "Directiva de Embarcaciones de Recreo (2013/53/UE)"
            }
          }
        },
        {
          "label": "Lifts Directive (2014/33/EU)",
          "value": "lifts",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Directive Ascenseurs (2014/33/UE)"
            },
            "nl": {
              "label": "Liftenrichtlijn (2014/33/EU)"
            },
            "de": {
              "label": "Aufzugsrichtlinie (2014/33/EU)"
            },
            "es": {
              "label": "Directiva de Ascensores (2014/33/UE)"
            }
          }
        },
        {
          "label": "ATEX Directive (2014/34/EU)",
          "value": "atex",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Directive ATEX (2014/34/UE)"
            },
            "nl": {
              "label": "ATEX-richtlijn (2014/34/EU)"
            },
            "de": {
              "label": "ATEX-Richtlinie (2014/34/EU)"
            },
            "es": {
              "label": "Directiva ATEX (2014/34/UE)"
            }
          }
        },
        {
          "label": "Radio Equipment Directive (2014/53/EU)",
          "value": "radio_equipment",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Directive Équipements radioélectriques (2014/53/UE)"
            },
            "nl": {
              "label": "Richtlijn Radioapparatuur (2014/53/EU)"
            },
            "de": {
              "label": "Funkanlagenrichtlinie (2014/53/EU)"
            },
            "es": {
              "label": "Directiva de Equipos Radioeléctricos (2014/53/UE)"
            }
          }
        },
        {
          "label": "Pressure Equipment Directive (2014/68/EU)",
          "value": "pressure_equipment",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Directive Équipements sous pression (2014/68/UE)"
            },
            "nl": {
              "label": "Richtlijn Drukapparatuur (2014/68/EU)"
            },
            "de": {
              "label": "Druckgeräterichtlinie (2014/68/EU)"
            },
            "es": {
              "label": "Directiva de Equipos a Presión (2014/68/UE)"
            }
          }
        },
        {
          "label": "Cableway Installations (2016/424)",
          "value": "cableways",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Installations à câbles (2016/424)"
            },
            "nl": {
              "label": "Kabelbaaninstallaties (2016/424)"
            },
            "de": {
              "label": "Seilbahnen (2016/424)"
            },
            "es": {
              "label": "Instalaciones de Teleféricos (2016/424)"
            }
          }
        },
        {
          "label": "PPE Regulation (2016/425)",
          "value": "ppe",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Règlement EPI (2016/425)"
            },
            "nl": {
              "label": "PBM-verordening (2016/425)"
            },
            "de": {
              "label": "PSA-Verordnung (2016/425)"
            },
            "es": {
              "label": "Reglamento de EPI (2016/425)"
            }
          }
        },
        {
          "label": "Gas Appliances Regulation (2016/426)",
          "value": "gas_appliances",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Règlement Appareils à gaz (2016/426)"
            },
            "nl": {
              "label": "Verordening Gastoestellen (2016/426)"
            },
            "de": {
              "label": "Gasgeräteverordnung (2016/426)"
            },
            "es": {
              "label": "Reglamento de Aparatos de Gas (2016/426)"
            }
          }
        },
        {
          "label": "Medical Devices Regulation — MDR (2017/745)",
          "value": "medical_devices",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Règlement Dispositifs médicaux — MDR (2017/745)"
            },
            "nl": {
              "label": "Verordening Medische Hulpmiddelen — MDR (2017/745)"
            },
            "de": {
              "label": "Medizinprodukteverordnung — MDR (2017/745)"
            },
            "es": {
              "label": "Reglamento de Productos Sanitarios — MDR (2017/745)"
            }
          }
        },
        {
          "label": "In-Vitro Diagnostic Medical Devices — IVDR (2017/746)",
          "value": "ivd_medical_devices",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Règlement Dispositifs médicaux de diagnostic in vitro — IVDR (2017/746)"
            },
            "nl": {
              "label": "Verordening In-vitrodiagnostiek — IVDR (2017/746)"
            },
            "de": {
              "label": "Verordnung über In-vitro-Diagnostika — IVDR (2017/746)"
            },
            "es": {
              "label": "Reglamento de Productos Sanitarios para Diagnóstico In Vitro — IVDR (2017/746)"
            }
          }
        },
        {
          "label": "Other Annex I, Section A legislation",
          "value": "other",
          "next": "q_annex_i_third_party_ca",
          "translations": {
            "fr": {
              "label": "Autre législation de l'Annexe I, Section A"
            },
            "nl": {
              "label": "Andere Bijlage I, Sectie A-wetgeving"
            },
            "de": {
              "label": "Sonstige Rechtsvorschriften aus Anhang I, Abschnitt A"
            },
            "es": {
              "label": "Otra legislación del Anexo I, Sección A"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_i",
      "note": "This list covers Annex I, Section A products only. Section B products (motor vehicles, civil aviation under Regulation (EU) 2018/1139, rail, marine, etc.) are handled separately with limited AI Act applicability.",
      "translations": {
        "fr": {
          "title": "Législation produit applicable",
          "body": "Quelle législation d'harmonisation de l'Union (Annexe I, Section A) s'applique à votre produit ?",
          "note": "Cette liste couvre uniquement les produits de l'Annexe I, Section A. Les produits de la Section B (véhicules à moteur, aviation civile au titre du Règlement (UE) 2018/1139, ferroviaire, maritime, etc.) sont traités séparément avec une applicabilité limitée du Règlement IA."
        },
        "nl": {
          "title": "Toepasselijke productwetgeving",
          "body": "Welke EU-harmonisatiewetgeving (Bijlage I, Sectie A) is van toepassing op uw product?",
          "note": "Deze lijst betreft alleen Bijlage I, Sectie A-producten. Sectie B-producten (motorvoertuigen, burgerluchtvaart onder Verordening (EU) 2018/1139, spoorwegen, scheepvaart, enz.) worden apart behandeld met beperkte toepasselijkheid van de AI-verordening."
        },
        "de": {
          "title": "Anwendbare Produktgesetzgebung",
          "body": "Welche EU-Harmonisierungsrechtsvorschriften (Anhang I, Abschnitt A) gelten für Ihr Produkt?",
          "note": "Diese Liste umfasst nur Produkte aus Anhang I, Abschnitt A. Produkte aus Abschnitt B (Kraftfahrzeuge, Zivilluftfahrt gemäß Verordnung (EU) 2018/1139, Schienenverkehr, Schifffahrt usw.) werden separat behandelt und unterliegen nur eingeschränkten Anforderungen der KI-Verordnung."
        },
        "es": {
          "title": "Legislación de productos aplicable",
          "body": "¿Qué legislación de armonización de la Unión (Anexo I, Sección A) se aplica a su producto?",
          "note": "Esta lista cubre únicamente los productos del Anexo I, Sección A. Los productos de la Sección B (vehículos de motor, aviación civil bajo el Reglamento (UE) 2018/1139, ferrocarril, marítimo, etc.) se tratan por separado con una aplicabilidad limitada del Reglamento de IA."
        }
      }
    },
    "q_annex_i_third_party_ca": {
      "id": "q_annex_i_third_party_ca",
      "type": "question",
      "stage": "S6",
      "title": "Third-Party Conformity Assessment Required?",
      "body": "Under the applicable product legislation you selected, is your product required to undergo a **third-party conformity assessment** (i.e., assessment by a notified body, NOT self-assessment)?",
      "legal_ref": "Art. 6(1)(b)",
      "note": "Art. 6(1) requires BOTH conditions: (a) AI system is a safety component/product covered by Annex I AND (b) product requires third-party conformity assessment. If only self-assessment is required, your system is NOT high-risk under Art. 6(1).",
      "options": [
        {
          "label": "Yes — third-party conformity assessment is required",
          "value": "yes",
          "set": {
            "risk_category": "high_risk_annex_i"
          },
          "next": "q_annex_i_also_annex_iii",
          "translations": {
            "fr": {
              "label": "Oui — une évaluation de conformité par un tiers est requise"
            },
            "nl": {
              "label": "Ja — conformiteitsbeoordeling door derden is vereist"
            },
            "de": {
              "label": "Ja — Konformitätsbewertung durch Dritte ist erforderlich"
            },
            "es": {
              "label": "Sí — se requiere evaluación de conformidad por terceros"
            }
          }
        },
        {
          "label": "No — only self-assessment / internal conformity assessment required",
          "value": "no",
          "next": "q_high_risk_annex_iii_area",
          "translations": {
            "fr": {
              "label": "Non — seule une auto-évaluation / évaluation interne est requise"
            },
            "nl": {
              "label": "Nee — alleen zelfbeoordeling / interne conformiteitsbeoordeling vereist"
            },
            "de": {
              "label": "Nein — nur Selbstbewertung / interne Konformitätsbewertung erforderlich"
            },
            "es": {
              "label": "No — solo se requiere autoevaluación / evaluación interna de conformidad"
            }
          }
        },
        {
          "label": "I'm not sure — apply precautionary classification as high-risk (recommended: consult product legislation expert)",
          "value": "unsure",
          "flag": "precautionary_high_risk",
          "set": {
            "risk_category": "high_risk_annex_i"
          },
          "next": "q_annex_i_also_annex_iii",
          "translations": {
            "fr": {
              "label": "Je ne suis pas sûr — appliquer la classification de précaution comme haut risque (recommandé : consulter un expert en législation produit)"
            },
            "nl": {
              "label": "Ik weet het niet zeker — voorzorgsclassificatie als hoog risico toepassen (aanbevolen: raadpleeg een expert in productwetgeving)"
            },
            "de": {
              "label": "Ich bin nicht sicher — vorsorgliche Hochrisiko-Einstufung anwenden (empfohlen: Experten für Produktvorschriften konsultieren)"
            },
            "es": {
              "label": "No estoy seguro/a — aplicar la clasificación de precaución como alto riesgo (recomendado: consultar a un experto en legislación de productos)"
            }
          }
        }
      ],
      "previous": "q_annex_i_legislation",
      "translations": {
        "fr": {
          "title": "Évaluation de conformité par un tiers requise ?",
          "body": "En vertu de la législation produit applicable que vous avez sélectionnée, votre produit doit-il faire l'objet d'une **évaluation de conformité par un tiers** (c'est-à-dire par un organisme notifié, et NON une auto-évaluation) ?",
          "note": "L'art. 6(1) exige les DEUX conditions : (a) le système d'IA est un composant de sécurité/produit couvert par l'Annexe I ET (b) le produit nécessite une évaluation de conformité par un tiers. Si seule l'auto-évaluation est requise, votre système n'est PAS à haut risque au titre de l'art. 6(1)."
        },
        "nl": {
          "title": "Conformiteitsbeoordeling door derden vereist?",
          "body": "Is uw product op grond van de toepasselijke productwetgeving die u heeft geselecteerd onderworpen aan een **conformiteitsbeoordeling door een derde partij** (d.w.z. door een aangemelde instantie, NIET zelfbeoordeling)?",
          "note": "Art. 6(1) vereist BEIDE voorwaarden: (a) het AI-systeem is een veiligheidscomponent/product dat onder Bijlage I valt EN (b) het product vereist een conformiteitsbeoordeling door derden. Als alleen zelfbeoordeling vereist is, is uw systeem NIET hoog risico onder art. 6(1)."
        },
        "de": {
          "title": "Konformitätsbewertung durch Dritte erforderlich?",
          "body": "Muss Ihr Produkt gemäß den von Ihnen ausgewählten Produktvorschriften einer **Konformitätsbewertung durch Dritte** unterzogen werden (d. h. Bewertung durch eine benannte Stelle, NICHT Selbstbewertung)?",
          "note": "Art. 6(1) erfordert BEIDE Bedingungen: (a) das KI-System ist eine Sicherheitskomponente/ein Produkt gemäß Anhang I UND (b) das Produkt erfordert eine Konformitätsbewertung durch Dritte. Wenn nur eine Selbstbewertung erforderlich ist, ist Ihr System NICHT hochriskant gemäß Art. 6(1)."
        },
        "es": {
          "title": "¿Se requiere evaluación de conformidad por terceros?",
          "body": "Según la legislación de productos aplicable que ha seleccionado, ¿su producto debe someterse a una **evaluación de conformidad por un tercero** (es decir, evaluación por un organismo notificado, NO autoevaluación)?",
          "note": "El art. 6(1) exige AMBAS condiciones: (a) el sistema de IA es un componente de seguridad/producto cubierto por el Anexo I Y (b) el producto requiere una evaluación de conformidad por terceros. Si solo se requiere autoevaluación, su sistema NO es de alto riesgo según el art. 6(1)."
        }
      }
    },
    "q_high_risk_annex_iii_area": {
      "id": "q_high_risk_annex_iii_area",
      "type": "question",
      "stage": "S6",
      "title": "High-Risk: Application Area (Annex III)",
      "body": "Is your AI system intended to be used in any of the following areas?",
      "legal_ref": "Art. 6(2), Annex III",
      "attribute": "annex_iii_area",
      "options": [
        {
          "label": "1. Biometrics (remote identification, categorisation by sensitive attributes, emotion recognition)",
          "value": "biometrics",
          "next": "q_annex_iii_biometrics",
          "translations": {
            "fr": {
              "label": "1. Biométrie (identification à distance, catégorisation par attributs sensibles, reconnaissance des émotions)"
            },
            "nl": {
              "label": "1. Biometrie (identificatie op afstand, categorisering op basis van gevoelige kenmerken, emotieherkenning)"
            },
            "de": {
              "label": "1. Biometrie (biometrische Fernidentifizierung, Kategorisierung nach sensiblen Merkmalen, Emotionserkennung)"
            },
            "es": {
              "label": "1. Biometría (identificación remota, categorización por atributos sensibles, reconocimiento de emociones)"
            }
          }
        },
        {
          "label": "2. Critical infrastructure (safety components for digital infrastructure, road traffic, water/gas/heating/electricity, transport)",
          "value": "critical_infrastructure",
          "next": "q_annex_iii_critical_infra",
          "translations": {
            "fr": {
              "label": "2. Infrastructures critiques (composants de sécurité pour les infrastructures numériques, la circulation routière, l'eau/gaz/chauffage/électricité, les transports)"
            },
            "nl": {
              "label": "2. Kritieke infrastructuur (veiligheidscomponenten voor digitale infrastructuur, wegverkeer, water/gas/verwarming/elektriciteit, vervoer)"
            },
            "de": {
              "label": "2. Kritische Infrastruktur (Sicherheitskomponenten für digitale Infrastruktur, Straßenverkehr, Wasser-/Gas-/Heizungs-/Stromversorgung, Verkehr)"
            },
            "es": {
              "label": "2. Infraestructuras críticas (componentes de seguridad para infraestructuras digitales, tráfico vial, agua/gas/calefacción/electricidad, transporte)"
            }
          }
        },
        {
          "label": "3. Education & vocational training (admission, evaluation, level assessment, exam monitoring)",
          "value": "education",
          "next": "q_annex_iii_education",
          "translations": {
            "fr": {
              "label": "3. Éducation et formation professionnelle (admission, évaluation, détermination du niveau, surveillance d'examens)"
            },
            "nl": {
              "label": "3. Onderwijs en beroepsopleiding (toelating, beoordeling, niveaubepaling, examentoezicht)"
            },
            "de": {
              "label": "3. Bildung und Berufsausbildung (Zulassung, Bewertung, Einstufung, Prüfungsüberwachung)"
            },
            "es": {
              "label": "3. Educación y formación profesional (admisión, evaluación, determinación del nivel, supervisión de exámenes)"
            }
          }
        },
        {
          "label": "4. Employment & workers' management (recruitment, selection, HR decisions, task allocation, monitoring)",
          "value": "employment",
          "next": "q_annex_iii_employment",
          "translations": {
            "fr": {
              "label": "4. Emploi et gestion des travailleurs (recrutement, sélection, décisions RH, affectation de tâches, surveillance)"
            },
            "nl": {
              "label": "4. Werkgelegenheid en personeelsbeheer (werving, selectie, HR-beslissingen, taakverdeling, monitoring)"
            },
            "de": {
              "label": "4. Beschäftigung und Personalmanagement (Einstellung, Auswahl, HR-Entscheidungen, Aufgabenzuweisung, Überwachung)"
            },
            "es": {
              "label": "4. Empleo y gestión de trabajadores (contratación, selección, decisiones de RRHH, asignación de tareas, supervisión)"
            }
          }
        },
        {
          "label": "5. Essential services (public benefits eligibility, credit scoring, insurance pricing, emergency dispatch)",
          "value": "essential_services",
          "next": "q_annex_iii_essential_services",
          "translations": {
            "fr": {
              "label": "5. Services essentiels (éligibilité aux prestations publiques, notation de crédit, tarification d'assurance, répartition des secours d'urgence)"
            },
            "nl": {
              "label": "5. Essentiële diensten (aanspraak op overheidsuitkeringen, kredietscore, verzekeringsprijsstelling, dispatching van hulpdiensten)"
            },
            "de": {
              "label": "5. Grundlegende Dienste (Anspruch auf öffentliche Leistungen, Kreditwürdigkeitsprüfung, Versicherungspreisgestaltung, Notfalldisposition)"
            },
            "es": {
              "label": "5. Servicios esenciales (elegibilidad para prestaciones públicas, puntuación crediticia, tarificación de seguros, despacho de emergencias)"
            }
          }
        },
        {
          "label": "6. Law enforcement (victim risk, polygraphs, evidence reliability, recidivism, profiling)",
          "value": "law_enforcement",
          "next": "q_annex_iii_law_enforcement",
          "translations": {
            "fr": {
              "label": "6. Répression (risque pour les victimes, polygraphes, fiabilité des preuves, récidive, profilage)"
            },
            "nl": {
              "label": "6. Rechtshandhaving (risico voor slachtoffers, leugendetectors, bewijsbetrouwbaarheid, recidive, profilering)"
            },
            "de": {
              "label": "6. Strafverfolgung (Opferrisiko, Polygraphen, Beweiszuverlässigkeit, Rückfallprognose, Profiling)"
            },
            "es": {
              "label": "6. Aplicación de la ley (riesgo para víctimas, polígrafos, fiabilidad de pruebas, reincidencia, elaboración de perfiles)"
            }
          }
        },
        {
          "label": "7. Migration, asylum & border control (polygraphs, risk assessment, applications, identification)",
          "value": "migration_asylum_border",
          "next": "q_annex_iii_migration",
          "translations": {
            "fr": {
              "label": "7. Migration, asile et contrôle aux frontières (polygraphes, évaluation des risques, demandes, identification)"
            },
            "nl": {
              "label": "7. Migratie, asiel en grenscontrole (leugendetectors, risicobeoordeling, aanvragen, identificatie)"
            },
            "de": {
              "label": "7. Migration, Asyl und Grenzkontrolle (Polygraphen, Risikobewertung, Anträge, Identifizierung)"
            },
            "es": {
              "label": "7. Migración, asilo y control fronterizo (polígrafos, evaluación de riesgos, solicitudes, identificación)"
            }
          }
        },
        {
          "label": "8. Administration of justice & democratic processes (judicial assistance, election influence)",
          "value": "justice_democracy",
          "next": "q_annex_iii_justice",
          "translations": {
            "fr": {
              "label": "8. Administration de la justice et processus démocratiques (assistance judiciaire, influence sur les élections)"
            },
            "nl": {
              "label": "8. Rechtsbedeling en democratische processen (rechterlijke bijstand, beïnvloeding van verkiezingen)"
            },
            "de": {
              "label": "8. Justizverwaltung und demokratische Prozesse (richterliche Unterstützung, Wahlbeeinflussung)"
            },
            "es": {
              "label": "8. Administración de justicia y procesos democráticos (asistencia judicial, influencia en elecciones)"
            }
          }
        },
        {
          "label": "None of the above",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun des domaines ci-dessus"
            },
            "nl": {
              "label": "Geen van bovenstaande"
            },
            "de": {
              "label": "Keiner der oben genannten Bereiche"
            },
            "es": {
              "label": "Ninguno de los anteriores"
            }
          }
        }
      ],
      "previous": [
        "q_high_risk_annex_i",
        "q_annex_i_third_party_ca",
        "q_annex_i_also_annex_iii"
      ],
      "translations": {
        "fr": {
          "title": "Haut risque : domaine d'application (Annexe III)",
          "body": "Votre système d'IA est-il destiné à être utilisé dans l'un des domaines suivants ?"
        },
        "nl": {
          "title": "Hoog risico: toepassingsgebied (Bijlage III)",
          "body": "Is uw AI-systeem bedoeld om te worden gebruikt in een van de volgende gebieden?"
        },
        "de": {
          "title": "Hochrisiko: Anwendungsbereich (Anhang III)",
          "body": "Soll Ihr KI-System in einem der folgenden Bereiche eingesetzt werden?"
        },
        "es": {
          "title": "Alto riesgo: ámbito de aplicación (Anexo III)",
          "body": "¿Su sistema de IA está destinado a ser utilizado en alguno de los siguientes ámbitos?"
        }
      }
    },
    "q_annex_iii_biometrics": {
      "id": "q_annex_iii_biometrics",
      "type": "question",
      "stage": "S6",
      "title": "Biometrics — Specific Use Case",
      "body": "Which biometric use case applies?",
      "legal_ref": "Annex III, point 1",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Remote biometric identification (identifying persons at a distance without their active involvement — NOT mere identity verification)",
          "value": "1a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Identification biométrique à distance (identification de personnes à distance sans leur participation active — et NON simple vérification d'identité)"
            },
            "nl": {
              "label": "(a) Biometrische identificatie op afstand (identificatie van personen op afstand zonder hun actieve medewerking — NIET louter identiteitsverificatie)"
            },
            "de": {
              "label": "(a) Biometrische Fernidentifizierung (Identifizierung von Personen aus der Ferne ohne deren aktive Mitwirkung — NICHT bloße Identitätsverifizierung)"
            },
            "es": {
              "label": "(a) Identificación biométrica remota (identificación de personas a distancia sin su participación activa — NO simple verificación de identidad)"
            }
          }
        },
        {
          "label": "(b) Biometric categorisation by sensitive/protected attributes (race, beliefs, sexual orientation, etc.)",
          "value": "1b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Catégorisation biométrique par attributs sensibles/protégés (race, convictions, orientation sexuelle, etc.)"
            },
            "nl": {
              "label": "(b) Biometrische categorisering op basis van gevoelige/beschermde kenmerken (ras, overtuigingen, seksuele geaardheid, enz.)"
            },
            "de": {
              "label": "(b) Biometrische Kategorisierung nach sensiblen/geschützten Merkmalen (Rasse, Überzeugungen, sexuelle Orientierung usw.)"
            },
            "es": {
              "label": "(b) Categorización biométrica por atributos sensibles/protegidos (raza, creencias, orientación sexual, etc.)"
            }
          }
        },
        {
          "label": "(c) Emotion recognition",
          "value": "1c",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(c) Reconnaissance des émotions"
            },
            "nl": {
              "label": "(c) Emotieherkenning"
            },
            "de": {
              "label": "(c) Emotionserkennung"
            },
            "es": {
              "label": "(c) Reconocimiento de emociones"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Biométrie — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation biométrique s'applique ?"
        },
        "nl": {
          "title": "Biometrie — Specifiek gebruik",
          "body": "Welk biometrisch gebruik is van toepassing?"
        },
        "de": {
          "title": "Biometrie — Spezifischer Anwendungsfall",
          "body": "Welcher biometrische Anwendungsfall trifft zu?"
        },
        "es": {
          "title": "Biometría — Caso de uso específico",
          "body": "¿Qué caso de uso biométrico se aplica?"
        }
      }
    },
    "q_annex_iii_critical_infra": {
      "id": "q_annex_iii_critical_infra",
      "type": "question",
      "stage": "S6",
      "title": "Critical Infrastructure — Specific Use Case",
      "body": "Is your AI system intended to be used as a **safety component** in the management and operation of:\n- Critical digital infrastructure\n- Road traffic\n- Supply of water, gas, heating, or electricity\n- Transport (rail, air, road, waterborne)?\n\nNote: Annex III point 2 covers safety components of critical infrastructure including digital infrastructure, road traffic, water, gas, heating, electricity supply, and transport systems.",
      "legal_ref": "Annex III, point 2",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "Yes — safety component in one of these areas",
          "value": "2",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "Oui — composant de sécurité dans l'un de ces domaines"
            },
            "nl": {
              "label": "Ja — veiligheidscomponent in een van deze gebieden"
            },
            "de": {
              "label": "Ja — Sicherheitskomponente in einem dieser Bereiche"
            },
            "es": {
              "label": "Sí — componente de seguridad en uno de estos ámbitos"
            }
          }
        },
        {
          "label": "No — not a safety component in critical infrastructure",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Non — pas un composant de sécurité d'une infrastructure critique"
            },
            "nl": {
              "label": "Nee — geen veiligheidscomponent van kritieke infrastructuur"
            },
            "de": {
              "label": "Nein — keine Sicherheitskomponente einer kritischen Infrastruktur"
            },
            "es": {
              "label": "No — no es un componente de seguridad de una infraestructura crítica"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Infrastructures critiques — Cas d'utilisation spécifique",
          "body": "Votre système d'IA est-il destiné à être utilisé comme **composant de sécurité** dans la gestion et l'exploitation de :\n- Infrastructures numériques critiques\n- Circulation routière\n- Fourniture d'eau, de gaz, de chauffage ou d'électricité\n- Transports (ferroviaire, aérien, routier, par voie d'eau) ?\n\nNote : Le point 2 de l'Annexe III couvre les composants de sécurité des infrastructures critiques, y compris l'infrastructure numérique, la circulation routière, l'approvisionnement en eau, gaz, chauffage, électricité et les systèmes de transport."
        },
        "nl": {
          "title": "Kritieke infrastructuur — Specifiek gebruik",
          "body": "Is uw AI-systeem bedoeld om te worden gebruikt als **veiligheidscomponent** bij het beheer en de exploitatie van:\n- Kritieke digitale infrastructuur\n- Wegverkeer\n- Levering van water, gas, verwarming of elektriciteit\n- Vervoer (spoor, lucht, weg, water)?\n\nOpmerking: Punt 2 van Bijlage III betreft veiligheidscomponenten van kritieke infrastructuur, waaronder digitale infrastructuur, wegverkeer, water-, gas-, verwarmings- en elektriciteitsvoorziening en vervoerssystemen."
        },
        "de": {
          "title": "Kritische Infrastruktur — Spezifischer Anwendungsfall",
          "body": "Soll Ihr KI-System als **Sicherheitskomponente** beim Management und Betrieb folgender Bereiche eingesetzt werden:\n- Kritische digitale Infrastruktur\n- Straßenverkehr\n- Versorgung mit Wasser, Gas, Heizung oder Strom\n- Verkehr (Schiene, Luft, Straße, Wasserstraßen)?\n\nHinweis: Anhang III Nummer 2 umfasst Sicherheitskomponenten kritischer Infrastrukturen einschließlich digitaler Infrastruktur, Straßenverkehr, Wasser-, Gas-, Heizungs- und Stromversorgung sowie Verkehrssysteme."
        },
        "es": {
          "title": "Infraestructuras críticas — Caso de uso específico",
          "body": "¿Su sistema de IA está destinado a ser utilizado como **componente de seguridad** en la gestión y operación de:\n- Infraestructuras digitales críticas\n- Tráfico vial\n- Suministro de agua, gas, calefacción o electricidad\n- Transporte (ferroviario, aéreo, por carretera, marítimo)?\n\nNota: El punto 2 del Anexo III cubre los componentes de seguridad de infraestructuras críticas, incluida la infraestructura digital, el tráfico vial, el suministro de agua, gas, calefacción, electricidad y los sistemas de transporte."
        }
      }
    },
    "q_annex_iii_education": {
      "id": "q_annex_iii_education",
      "type": "question",
      "stage": "S6",
      "title": "Education — Specific Use Case",
      "body": "Which education use case applies?",
      "legal_ref": "Annex III, point 3",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Determining access/admission or assigning persons to educational institutions",
          "value": "3a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Déterminer l'accès/l'admission ou l'affectation de personnes à des établissements d'enseignement"
            },
            "nl": {
              "label": "(a) Bepalen van toegang/toelating of toewijzing van personen aan onderwijsinstellingen"
            },
            "de": {
              "label": "(a) Bestimmung des Zugangs/der Zulassung oder Zuweisung von Personen an Bildungseinrichtungen"
            },
            "es": {
              "label": "(a) Determinar el acceso/admisión o la asignación de personas a instituciones educativas"
            }
          }
        },
        {
          "label": "(b) Evaluating learning outcomes (including steering the learning process)",
          "value": "3b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Évaluer les résultats d'apprentissage (y compris orienter le processus d'apprentissage)"
            },
            "nl": {
              "label": "(b) Beoordelen van leerresultaten (inclusief het sturen van het leerproces)"
            },
            "de": {
              "label": "(b) Bewertung von Lernergebnissen (einschließlich Steuerung des Lernprozesses)"
            },
            "es": {
              "label": "(b) Evaluar los resultados de aprendizaje (incluida la orientación del proceso de aprendizaje)"
            }
          }
        },
        {
          "label": "(c) Assessing the appropriate level of education a person will receive/access",
          "value": "3c",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(c) Évaluer le niveau d'éducation approprié qu'une personne recevra ou auquel elle aura accès"
            },
            "nl": {
              "label": "(c) Beoordelen van het passende onderwijsniveau dat een persoon zal ontvangen of waartoe hij/zij toegang krijgt"
            },
            "de": {
              "label": "(c) Bewertung des angemessenen Bildungsniveaus, das eine Person erhalten wird/zu dem sie Zugang erhält"
            },
            "es": {
              "label": "(c) Evaluar el nivel educativo apropiado que una persona recibirá o al que tendrá acceso"
            }
          }
        },
        {
          "label": "(d) Monitoring and detecting prohibited behaviour during tests",
          "value": "3d",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(d) Surveillance et détection de comportements interdits lors d'examens"
            },
            "nl": {
              "label": "(d) Toezicht op en detectie van verboden gedrag tijdens examens"
            },
            "de": {
              "label": "(d) Überwachung und Erkennung verbotenen Verhaltens bei Prüfungen"
            },
            "es": {
              "label": "(d) Supervisión y detección de comportamientos prohibidos durante exámenes"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Éducation — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation dans le domaine de l'éducation s'applique ?"
        },
        "nl": {
          "title": "Onderwijs — Specifiek gebruik",
          "body": "Welk onderwijsgerelateerd gebruik is van toepassing?"
        },
        "de": {
          "title": "Bildung — Spezifischer Anwendungsfall",
          "body": "Welcher Anwendungsfall im Bildungsbereich trifft zu?"
        },
        "es": {
          "title": "Educación — Caso de uso específico",
          "body": "¿Qué caso de uso en el ámbito educativo se aplica?"
        }
      }
    },
    "q_annex_iii_employment": {
      "id": "q_annex_iii_employment",
      "type": "question",
      "stage": "S6",
      "title": "Employment — Specific Use Case",
      "body": "Which employment use case applies?",
      "legal_ref": "Annex III, point 4",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Recruitment or selection (targeted job ads, filtering applications, evaluating candidates)",
          "value": "4a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Recrutement ou sélection (annonces d'emploi ciblées, filtrage de candidatures, évaluation de candidats)"
            },
            "nl": {
              "label": "(a) Werving of selectie (gerichte vacatureadvertenties, filtering van sollicitaties, beoordeling van kandidaten)"
            },
            "de": {
              "label": "(a) Einstellung oder Auswahl (gezielte Stellenanzeigen, Filterung von Bewerbungen, Bewertung von Bewerbern)"
            },
            "es": {
              "label": "(a) Contratación o selección (anuncios de empleo dirigidos, filtrado de candidaturas, evaluación de candidatos)"
            }
          }
        },
        {
          "label": "(b) Decisions on work relationships (promotion, termination, task allocation, performance monitoring)",
          "value": "4b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Décisions relatives aux relations de travail (promotion, licenciement, affectation de tâches, suivi des performances)"
            },
            "nl": {
              "label": "(b) Beslissingen over arbeidsrelaties (promotie, ontslag, taakverdeling, prestatiemonitoring)"
            },
            "de": {
              "label": "(b) Entscheidungen über Arbeitsverhältnisse (Beförderung, Kündigung, Aufgabenzuweisung, Leistungsüberwachung)"
            },
            "es": {
              "label": "(b) Decisiones sobre relaciones laborales (promoción, despido, asignación de tareas, supervisión del rendimiento)"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Emploi — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation dans le domaine de l'emploi s'applique ?"
        },
        "nl": {
          "title": "Werkgelegenheid — Specifiek gebruik",
          "body": "Welk werkgelegenheidsgerelateerd gebruik is van toepassing?"
        },
        "de": {
          "title": "Beschäftigung — Spezifischer Anwendungsfall",
          "body": "Welcher Anwendungsfall im Beschäftigungsbereich trifft zu?"
        },
        "es": {
          "title": "Empleo — Caso de uso específico",
          "body": "¿Qué caso de uso en el ámbito del empleo se aplica?"
        }
      }
    },
    "q_annex_iii_essential_services": {
      "id": "q_annex_iii_essential_services",
      "type": "question",
      "stage": "S6",
      "title": "Essential Services — Specific Use Case",
      "body": "Which essential services use case applies?",
      "legal_ref": "Annex III, point 5",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Evaluating eligibility for public assistance benefits/services (including healthcare)",
          "value": "5a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Évaluation de l'éligibilité aux prestations et services d'aide publique (y compris les soins de santé)"
            },
            "nl": {
              "label": "(a) Beoordeling van de aanspraak op overheidsuitkeringen/-diensten (inclusief gezondheidszorg)"
            },
            "de": {
              "label": "(a) Bewertung der Anspruchsberechtigung für öffentliche Unterstützungsleistungen/-dienste (einschließlich Gesundheitsversorgung)"
            },
            "es": {
              "label": "(a) Evaluación de la elegibilidad para prestaciones y servicios de asistencia pública (incluida la atención sanitaria)"
            }
          }
        },
        {
          "label": "(b) Credit scoring / creditworthiness assessment (except fraud detection)",
          "value": "5b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Notation de crédit / évaluation de la solvabilité (sauf détection de fraude)"
            },
            "nl": {
              "label": "(b) Kredietscore / beoordeling van kredietwaardigheid (behalve fraudedetectie)"
            },
            "de": {
              "label": "(b) Kreditwürdigkeitsprüfung / Bonitätsbewertung (ausgenommen Betrugserkennung)"
            },
            "es": {
              "label": "(b) Puntuación crediticia / evaluación de solvencia (excepto detección de fraude)"
            }
          }
        },
        {
          "label": "(c) Risk assessment and pricing for life/health insurance",
          "value": "5c",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(c) Évaluation des risques et tarification pour l'assurance vie et santé"
            },
            "nl": {
              "label": "(c) Risicobeoordeling en prijsstelling voor levens- en ziektekostenverzekeringen"
            },
            "de": {
              "label": "(c) Risikobewertung und Preisgestaltung für Lebens- und Krankenversicherungen"
            },
            "es": {
              "label": "(c) Evaluación de riesgos y tarificación de seguros de vida y salud"
            }
          }
        },
        {
          "label": "(d) Emergency call evaluation/classification or dispatch prioritisation (including healthcare triage)",
          "value": "5d",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(d) Évaluation/classification des appels d'urgence ou hiérarchisation de l'envoi de secours (y compris le triage médical)"
            },
            "nl": {
              "label": "(d) Evaluatie/classificatie van noodoproepen of prioritering van hulpverlening (inclusief medische triage)"
            },
            "de": {
              "label": "(d) Bewertung/Klassifizierung von Notrufen oder Priorisierung der Notfalldisposition (einschließlich medizinischer Triage)"
            },
            "es": {
              "label": "(d) Evaluación/clasificación de llamadas de emergencia o priorización del despacho de emergencias (incluido el triaje médico)"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Services essentiels — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation dans le domaine des services essentiels s'applique ?"
        },
        "nl": {
          "title": "Essentiële diensten — Specifiek gebruik",
          "body": "Welk gebruik in het kader van essentiële diensten is van toepassing?"
        },
        "de": {
          "title": "Grundlegende Dienste — Spezifischer Anwendungsfall",
          "body": "Welcher Anwendungsfall im Bereich grundlegender Dienste trifft zu?"
        },
        "es": {
          "title": "Servicios esenciales — Caso de uso específico",
          "body": "¿Qué caso de uso en el ámbito de los servicios esenciales se aplica?"
        }
      }
    },
    "q_annex_iii_law_enforcement": {
      "id": "q_annex_iii_law_enforcement",
      "type": "question",
      "stage": "S6",
      "title": "Law Enforcement — Specific Use Case",
      "body": "Which law enforcement use case applies?",
      "legal_ref": "Annex III, point 6",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Assessing risk of a person becoming a victim of criminal offences",
          "value": "6a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Évaluation du risque qu'une personne devienne victime d'infractions pénales"
            },
            "nl": {
              "label": "(a) Beoordeling van het risico dat een persoon slachtoffer wordt van strafbare feiten"
            },
            "de": {
              "label": "(a) Bewertung des Risikos, dass eine Person Opfer von Straftaten wird"
            },
            "es": {
              "label": "(a) Evaluación del riesgo de que una persona sea víctima de delitos"
            }
          }
        },
        {
          "label": "(b) Polygraphs or similar tools",
          "value": "6b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Polygraphes ou outils similaires"
            },
            "nl": {
              "label": "(b) Leugendetectors of soortgelijke instrumenten"
            },
            "de": {
              "label": "(b) Polygraphen oder ähnliche Instrumente"
            },
            "es": {
              "label": "(b) Polígrafos o instrumentos similares"
            }
          }
        },
        {
          "label": "(c) Evaluating reliability of evidence",
          "value": "6c",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(c) Évaluation de la fiabilité des preuves"
            },
            "nl": {
              "label": "(c) Beoordeling van de betrouwbaarheid van bewijs"
            },
            "de": {
              "label": "(c) Bewertung der Zuverlässigkeit von Beweismitteln"
            },
            "es": {
              "label": "(c) Evaluación de la fiabilidad de las pruebas"
            }
          }
        },
        {
          "label": "(d) Assessing offending/re-offending risk (not solely based on profiling)",
          "value": "6d",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(d) Évaluation du risque de récidive (ne reposant pas uniquement sur le profilage)"
            },
            "nl": {
              "label": "(d) Beoordeling van het recidiverisico (niet uitsluitend gebaseerd op profilering)"
            },
            "de": {
              "label": "(d) Bewertung des Rückfallrisikos (nicht ausschließlich auf Profiling basierend)"
            },
            "es": {
              "label": "(d) Evaluación del riesgo de reincidencia (no basada únicamente en la elaboración de perfiles)"
            }
          }
        },
        {
          "label": "(e) Profiling in detection/investigation/prosecution of criminal offences",
          "value": "6e",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(e) Profilage dans le cadre de la détection, de l'enquête ou de la poursuite d'infractions pénales"
            },
            "nl": {
              "label": "(e) Profilering bij de opsporing, het onderzoek of de vervolging van strafbare feiten"
            },
            "de": {
              "label": "(e) Profiling bei der Aufdeckung, Ermittlung oder Verfolgung von Straftaten"
            },
            "es": {
              "label": "(e) Elaboración de perfiles en la detección, investigación o enjuiciamiento de delitos"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Répression — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation dans le domaine de la répression s'applique ?"
        },
        "nl": {
          "title": "Rechtshandhaving — Specifiek gebruik",
          "body": "Welk rechtshandhavingsgerelateerd gebruik is van toepassing?"
        },
        "de": {
          "title": "Strafverfolgung — Spezifischer Anwendungsfall",
          "body": "Welcher Anwendungsfall im Bereich der Strafverfolgung trifft zu?"
        },
        "es": {
          "title": "Aplicación de la ley — Caso de uso específico",
          "body": "¿Qué caso de uso en el ámbito de la aplicación de la ley se aplica?"
        }
      }
    },
    "q_annex_iii_migration": {
      "id": "q_annex_iii_migration",
      "type": "question",
      "stage": "S6",
      "title": "Migration, Asylum & Border Control — Specific Use Case",
      "body": "Which migration/asylum/border use case applies?",
      "legal_ref": "Annex III, point 7",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Polygraphs or similar tools",
          "value": "7a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Polygraphes ou outils similaires"
            },
            "nl": {
              "label": "(a) Leugendetectors of soortgelijke instrumenten"
            },
            "de": {
              "label": "(a) Polygraphen oder ähnliche Instrumente"
            },
            "es": {
              "label": "(a) Polígrafos o instrumentos similares"
            }
          }
        },
        {
          "label": "(b) Risk assessment (security, irregular migration, health) at entry",
          "value": "7b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Évaluation des risques (sécurité, migration irrégulière, santé) à l'entrée"
            },
            "nl": {
              "label": "(b) Risicobeoordeling (veiligheid, irreguliere migratie, gezondheid) bij binnenkomst"
            },
            "de": {
              "label": "(b) Risikobewertung (Sicherheit, irreguläre Migration, Gesundheit) bei der Einreise"
            },
            "es": {
              "label": "(b) Evaluación de riesgos (seguridad, migración irregular, salud) en la entrada"
            }
          }
        },
        {
          "label": "(c) Examining asylum/visa/residence applications and eligibility",
          "value": "7c",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(c) Examen des demandes d'asile, de visa et de permis de séjour et éligibilité"
            },
            "nl": {
              "label": "(c) Behandeling van asiel-, visum- en verblijfsvergunningaanvragen en beoordeling van aanspraken"
            },
            "de": {
              "label": "(c) Prüfung von Asyl-/Visum-/Aufenthaltsanträgen und Anspruchsberechtigung"
            },
            "es": {
              "label": "(c) Examen de solicitudes de asilo, visado y permisos de residencia y elegibilidad"
            }
          }
        },
        {
          "label": "(d) Detecting/recognising/identifying persons (except travel document verification)",
          "value": "7d",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(d) Détection/reconnaissance/identification de personnes (sauf vérification de documents de voyage)"
            },
            "nl": {
              "label": "(d) Detectie/herkenning/identificatie van personen (behalve verificatie van reisdocumenten)"
            },
            "de": {
              "label": "(d) Erkennung/Identifizierung von Personen (ausgenommen Reisedokumentenüberprüfung)"
            },
            "es": {
              "label": "(d) Detección/reconocimiento/identificación de personas (excepto verificación de documentos de viaje)"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Migration, asile et contrôle aux frontières — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation dans le domaine de la migration/l'asile/les frontières s'applique ?"
        },
        "nl": {
          "title": "Migratie, asiel en grenscontrole — Specifiek gebruik",
          "body": "Welk migratie-/asiel-/grensgerelateerd gebruik is van toepassing?"
        },
        "de": {
          "title": "Migration, Asyl und Grenzkontrolle — Spezifischer Anwendungsfall",
          "body": "Welcher Anwendungsfall im Bereich Migration/Asyl/Grenzkontrolle trifft zu?"
        },
        "es": {
          "title": "Migración, asilo y control fronterizo — Caso de uso específico",
          "body": "¿Qué caso de uso en el ámbito de la migración/asilo/fronteras se aplica?"
        }
      }
    },
    "q_annex_iii_justice": {
      "id": "q_annex_iii_justice",
      "type": "question",
      "stage": "S6",
      "title": "Justice & Democracy — Specific Use Case",
      "body": "Which justice/democracy use case applies?",
      "legal_ref": "Annex III, point 8",
      "attribute": "annex_iii_use_case",
      "options": [
        {
          "label": "(a) Assisting judicial authorities in researching/interpreting/applying the law or alternative dispute resolution",
          "value": "8a",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(a) Assistance aux autorités judiciaires dans la recherche, l'interprétation et l'application du droit ou le règlement extrajudiciaire des litiges"
            },
            "nl": {
              "label": "(a) Bijstand aan rechterlijke instanties bij het onderzoeken, interpreteren en toepassen van het recht of alternatieve geschillenbeslechting"
            },
            "de": {
              "label": "(a) Unterstützung von Justizbehörden bei der Erforschung, Auslegung und Anwendung des Rechts oder der alternativen Streitbeilegung"
            },
            "es": {
              "label": "(a) Asistencia a las autoridades judiciales en la investigación, interpretación y aplicación del derecho o la resolución alternativa de litigios"
            }
          }
        },
        {
          "label": "(b) Influencing election/referendum outcomes or voting behaviour (NOT admin/logistic campaign tools)",
          "value": "8b",
          "next": "q_art6_3_profiling",
          "translations": {
            "fr": {
              "label": "(b) Influence sur les résultats des élections/référendums ou le comportement de vote (et NON les outils administratifs/logistiques de campagne)"
            },
            "nl": {
              "label": "(b) Beïnvloeding van verkiezings-/referendumresultaten of stemgedrag (NIET administratieve/logistieke campagnemiddelen)"
            },
            "de": {
              "label": "(b) Beeinflussung von Wahl-/Abstimmungsergebnissen oder des Wahlverhaltens (NICHT administrative/logistische Kampagnenwerkzeuge)"
            },
            "es": {
              "label": "(b) Influencia en los resultados electorales/referendarios o en el comportamiento de voto (NO herramientas administrativas/logísticas de campaña)"
            }
          }
        },
        {
          "label": "None of these specific use cases",
          "value": "none",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Aucun de ces cas d'utilisation spécifiques"
            },
            "nl": {
              "label": "Geen van deze specifieke gebruiken"
            },
            "de": {
              "label": "Keiner dieser spezifischen Anwendungsfälle"
            },
            "es": {
              "label": "Ninguno de estos casos de uso específicos"
            }
          }
        }
      ],
      "previous": "q_high_risk_annex_iii_area",
      "translations": {
        "fr": {
          "title": "Justice et démocratie — Cas d'utilisation spécifique",
          "body": "Quel cas d'utilisation dans le domaine de la justice/la démocratie s'applique ?"
        },
        "nl": {
          "title": "Justitie en democratie — Specifiek gebruik",
          "body": "Welk justitie-/democratiegerelateerd gebruik is van toepassing?"
        },
        "de": {
          "title": "Justiz und Demokratie — Spezifischer Anwendungsfall",
          "body": "Welcher Anwendungsfall im Bereich Justiz/Demokratie trifft zu?"
        },
        "es": {
          "title": "Justicia y democracia — Caso de uso específico",
          "body": "¿Qué caso de uso en el ámbito de la justicia/democracia se aplica?"
        }
      }
    },
    "q_art6_3_profiling": {
      "id": "q_art6_3_profiling",
      "type": "question",
      "stage": "S7",
      "title": "Art. 6(3) Filter: Profiling Check",
      "body": "Does your AI system perform **profiling of natural persons**?\n\nProfiling means any form of automated processing of personal data to evaluate personal aspects relating to a natural person (e.g., work performance, economic situation, health, personal preferences, interests, reliability, behaviour, location, movements).",
      "legal_ref": "Art. 6(3), third subparagraph",
      "note": "If your system performs profiling, it is ALWAYS considered high-risk — the Art. 6(3) filter cannot be applied.",
      "attribute": "performs_profiling",
      "options": [
        {
          "label": "Yes — the system performs profiling of natural persons",
          "value": true,
          "next": "q_sector",
          "set": {
            "risk_category": "high_risk_annex_iii",
            "art6_3_filter_applied": false
          },
          "translations": {
            "fr": {
              "label": "Oui — le système effectue du profilage de personnes physiques"
            },
            "nl": {
              "label": "Ja — het systeem voert profilering van natuurlijke personen uit"
            },
            "de": {
              "label": "Ja — das System führt Profiling natürlicher Personen durch"
            },
            "es": {
              "label": "Sí — el sistema realiza elaboración de perfiles de personas físicas"
            }
          }
        },
        {
          "label": "No — it does not perform profiling",
          "value": false,
          "next": "q_art6_3_filter",
          "translations": {
            "fr": {
              "label": "Non — il n'effectue pas de profilage"
            },
            "nl": {
              "label": "Nee — het voert geen profilering uit"
            },
            "de": {
              "label": "Nein — es führt kein Profiling durch"
            },
            "es": {
              "label": "No — no realiza elaboración de perfiles"
            }
          }
        }
      ],
      "previous": [
        "q_annex_iii_biometrics",
        "q_annex_iii_critical_infra",
        "q_annex_iii_education",
        "q_annex_iii_employment",
        "q_annex_iii_essential_services",
        "q_annex_iii_law_enforcement",
        "q_annex_iii_migration",
        "q_annex_iii_justice"
      ],
      "translations": {
        "fr": {
          "title": "Filtre de l'art. 6(3) : vérification du profilage",
          "body": "Votre système d'IA effectue-t-il du **profilage de personnes physiques** ?\n\nLe profilage désigne toute forme de traitement automatisé de données à caractère personnel visant à évaluer des aspects personnels relatifs à une personne physique (par ex. : performances professionnelles, situation économique, santé, préférences personnelles, intérêts, fiabilité, comportement, localisation, déplacements).",
          "note": "Si votre système effectue du profilage, il est TOUJOURS considéré comme à haut risque — le filtre de l'art. 6(3) ne peut pas être appliqué."
        },
        "nl": {
          "title": "Art. 6(3)-filter: profilering",
          "body": "Voert uw AI-systeem **profilering van natuurlijke personen** uit?\n\nProfilering betekent elke vorm van geautomatiseerde verwerking van persoonsgegevens om persoonlijke aspecten van een natuurlijk persoon te beoordelen (bv. werkprestaties, economische situatie, gezondheid, persoonlijke voorkeuren, interesses, betrouwbaarheid, gedrag, locatie, verplaatsingen).",
          "note": "Als uw systeem profilering uitvoert, wordt het ALTIJD als hoog risico beschouwd — het art. 6(3)-filter kan niet worden toegepast."
        },
        "de": {
          "title": "Art. 6(3)-Filter: Profiling-Prüfung",
          "body": "Führt Ihr KI-System **Profiling natürlicher Personen** durch?\n\nProfiling bezeichnet jede Form der automatisierten Verarbeitung personenbezogener Daten zur Bewertung persönlicher Aspekte einer natürlichen Person (z. B. Arbeitsleistung, wirtschaftliche Lage, Gesundheit, persönliche Vorlieben, Interessen, Zuverlässigkeit, Verhalten, Aufenthaltsort, Bewegungen).",
          "note": "Wenn Ihr System Profiling durchführt, gilt es IMMER als Hochrisiko — der Art. 6(3)-Filter kann nicht angewendet werden."
        },
        "es": {
          "title": "Filtro del art. 6(3): verificación de elaboración de perfiles",
          "body": "¿Su sistema de IA realiza **elaboración de perfiles de personas físicas**?\n\nLa elaboración de perfiles significa cualquier forma de tratamiento automatizado de datos personales destinada a evaluar aspectos personales relativos a una persona física (por ej.: rendimiento profesional, situación económica, salud, preferencias personales, intereses, fiabilidad, comportamiento, ubicación, desplazamientos).",
          "note": "Si su sistema realiza elaboración de perfiles, SIEMPRE se considera de alto riesgo — el filtro del art. 6(3) no puede aplicarse."
        }
      }
    },
    "q_art6_3_filter": {
      "id": "q_art6_3_filter",
      "type": "question",
      "stage": "S7",
      "title": "Art. 6(3) Filter: Significant Risk Assessment",
      "body": "Even though your system falls under Annex III, it may NOT be considered high-risk if it does not pose a significant risk of harm. Does your AI system meet **any** of the following conditions?\n\n(a) It performs a **narrow procedural task**\n(b) It **improves the result** of a previously completed human activity\n(c) It **detects decision-making patterns** or deviations, without replacing or influencing human assessment (with proper human review)\n(d) It performs a **preparatory task** to a relevant assessment",
      "legal_ref": "Art. 6(3), second subparagraph",
      "note": "If you apply this filter, you must document the assessment before placing the system on the market and register in the EU database (Art. 6(4), Art. 49(2)).\n\nCAUTION: For systems in areas 1 (biometrics), 6 (law enforcement), 7 (migration), and 8 (justice/democracy), applying this filter requires particular care. The FRA (2025) has noted that the 'narrow procedural task' exception can still pose fundamental rights risks in these sensitive areas. Consider consulting an expert.",
      "attribute": "art6_3_filter_applied",
      "options": [
        {
          "label": "Yes — at least one condition (a)-(d) is met, and the system does NOT pose a significant risk",
          "value": true,
          "set": {
            "risk_category": "minimal",
            "art6_3_filter_applied": true
          },
          "next": "q_sector",
          "flag": "art6_3_documentation_required",
          "translations": {
            "fr": {
              "label": "Oui — au moins une condition (a)-(d) est remplie et le système ne présente PAS de risque significatif"
            },
            "nl": {
              "label": "Ja — minstens één voorwaarde (a)-(d) is vervuld en het systeem levert GEEN significant risico op"
            },
            "de": {
              "label": "Ja — mindestens eine Bedingung (a)-(d) ist erfüllt und das System stellt KEIN erhebliches Risiko dar"
            },
            "es": {
              "label": "Sí — se cumple al menos una condición (a)-(d) y el sistema NO plantea un riesgo significativo"
            }
          }
        },
        {
          "label": "No — none of these conditions apply, or the system still poses a significant risk",
          "value": false,
          "set": {
            "risk_category": "high_risk_annex_iii",
            "art6_3_filter_applied": false
          },
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Non — aucune de ces conditions ne s'applique, ou le système présente toujours un risque significatif"
            },
            "nl": {
              "label": "Nee — geen van deze voorwaarden is van toepassing, of het systeem levert nog steeds een significant risico op"
            },
            "de": {
              "label": "Nein — keine dieser Bedingungen trifft zu, oder das System stellt weiterhin ein erhebliches Risiko dar"
            },
            "es": {
              "label": "No — ninguna de estas condiciones se aplica, o el sistema aún plantea un riesgo significativo"
            }
          }
        },
        {
          "label": "I'm not sure",
          "value": "unsure",
          "flag": "consult_expert",
          "set": {
            "risk_category": "high_risk_annex_iii",
            "art6_3_filter_applied": false
          },
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Je ne suis pas sûr"
            },
            "nl": {
              "label": "Ik weet het niet zeker"
            },
            "de": {
              "label": "Ich bin nicht sicher"
            },
            "es": {
              "label": "No estoy seguro/a"
            }
          }
        }
      ],
      "previous": "q_art6_3_profiling",
      "translations": {
        "fr": {
          "title": "Filtre de l'art. 6(3) : évaluation du risque significatif",
          "body": "Bien que votre système relève de l'Annexe III, il peut NE PAS être considéré comme à haut risque s'il ne présente pas de risque significatif de préjudice. Votre système d'IA remplit-il **l'une** des conditions suivantes ?\n\n(a) Il effectue une **tâche procédurale étroite**\n(b) Il **améliore le résultat** d'une activité humaine préalablement réalisée\n(c) Il **détecte des schémas décisionnels** ou des écarts, sans remplacer ni influencer l'appréciation humaine (avec un examen humain approprié)\n(d) Il effectue une **tâche préparatoire** à une évaluation pertinente",
          "note": "Si vous appliquez ce filtre, vous devez documenter l'évaluation avant la mise sur le marché du système et l'enregistrer dans la base de données de l'UE (art. 6(4), art. 49(2)).\n\nATTENTION : Pour les systèmes dans les domaines 1 (biométrie), 6 (répression), 7 (migration) et 8 (justice/démocratie), l'application de ce filtre requiert une prudence particulière. L'Agence des droits fondamentaux (2025) a noté que l'exception de « tâche procédurale étroite » peut encore poser des risques pour les droits fondamentaux dans ces domaines sensibles. Envisagez de consulter un expert."
        },
        "nl": {
          "title": "Art. 6(3)-filter: beoordeling van significant risico",
          "body": "Hoewel uw systeem onder Bijlage III valt, kan het NIET als hoog risico worden beschouwd als het geen significant risico op schade oplevert. Voldoet uw AI-systeem aan **een** van de volgende voorwaarden?\n\n(a) Het voert een **beperkte procedurele taak** uit\n(b) Het **verbetert het resultaat** van een eerder voltooide menselijke activiteit\n(c) Het **detecteert besluitvormingspatronen** of afwijkingen, zonder menselijke beoordeling te vervangen of te beïnvloeden (met passend menselijk toezicht)\n(d) Het voert een **voorbereidende taak** uit voor een relevante beoordeling",
          "note": "Als u dit filter toepast, moet u de beoordeling documenteren voordat het systeem op de markt wordt gebracht en het registreren in de EU-database (art. 6(4), art. 49(2)).\n\nLET OP: Voor systemen in de gebieden 1 (biometrie), 6 (rechtshandhaving), 7 (migratie) en 8 (justitie/democratie) vereist het toepassen van dit filter bijzondere zorgvuldigheid. Het Bureau voor de grondrechten (2025) heeft opgemerkt dat de uitzondering voor ‘beperkte procedurele taken’ nog steeds risico’s voor de grondrechten kan opleveren in deze gevoelige gebieden. Overweeg een expert te raadplegen."
        },
        "de": {
          "title": "Art. 6(3)-Filter: Bewertung des erheblichen Risikos",
          "body": "Obwohl Ihr System unter Anhang III fällt, kann es NICHT als Hochrisiko eingestuft werden, wenn es kein erhebliches Schadensrisiko darstellt. Erfüllt Ihr KI-System **eine** der folgenden Bedingungen?\n\n(a) Es führt eine **eng begrenzte Verfahrensaufgabe** durch\n(b) Es **verbessert das Ergebnis** einer zuvor abgeschlossenen menschlichen Tätigkeit\n(c) Es **erkennt Entscheidungsmuster** oder Abweichungen, ohne die menschliche Bewertung zu ersetzen oder zu beeinflussen (mit angemessener menschlicher Überprüfung)\n(d) Es führt eine **vorbereitende Aufgabe** für eine relevante Bewertung durch",
          "note": "Wenn Sie diesen Filter anwenden, müssen Sie die Bewertung vor dem Inverkehrbringen des Systems dokumentieren und es in der EU-Datenbank registrieren (Art. 6(4), Art. 49(2)).\n\nACHTUNG: Für Systeme in den Bereichen 1 (Biometrie), 6 (Strafverfolgung), 7 (Migration) und 8 (Justiz/Demokratie) erfordert die Anwendung dieses Filters besondere Sorgfalt. Die Grundrechteagentur (2025) hat darauf hingewiesen, dass die Ausnahme für „eng begrenzte Verfahrensaufgaben“ in diesen sensiblen Bereichen weiterhin Grundrechtsrisiken bergen kann. Ziehen Sie in Betracht, einen Experten zu konsultieren."
        },
        "es": {
          "title": "Filtro del art. 6(3): evaluación del riesgo significativo",
          "body": "Aunque su sistema está incluido en el Anexo III, puede NO considerarse de alto riesgo si no plantea un riesgo significativo de daño. ¿Su sistema de IA cumple **alguna** de las siguientes condiciones?\n\n(a) Realiza una **tarea procedimental limitada**\n(b) **Mejora el resultado** de una actividad humana previamente completada\n(c) **Detecta patrones de toma de decisiones** o desviaciones, sin sustituir ni influir en la evaluación humana (con revisión humana adecuada)\n(d) Realiza una **tarea preparatoria** para una evaluación pertinente",
          "note": "Si aplica este filtro, debe documentar la evaluación antes de comercializar el sistema y registrarlo en la base de datos de la UE (art. 6(4), art. 49(2)).\n\nPRECAUCIÓN: Para sistemas en las áreas 1 (biometría), 6 (aplicación de la ley), 7 (migración) y 8 (justicia/democracia), la aplicación de este filtro requiere especial cautela. La Agencia de los Derechos Fundamentales (2025) ha señalado que la excepción de «tarea procedimental limitada» puede seguir planteando riesgos para los derechos fundamentales en estos ámbitos sensibles. Considere consultar a un experto."
        }
      }
    },
    "q_transparency_chatbot": {
      "id": "q_transparency_chatbot",
      "type": "question",
      "stage": "S9",
      "title": "Transparency: AI Interaction",
      "body": "Is your AI system intended to **interact directly with natural persons** (e.g., chatbot, virtual assistant, AI customer service)?",
      "legal_ref": "Art. 50(1)",
      "note": "Exception: systems authorised by law for detecting/preventing/investigating/prosecuting criminal offences.",
      "options": [
        {
          "label": "Yes",
          "value": "yes",
          "set_array": {
            "transparency_obligations": "chatbot_disclosure"
          },
          "next": "q_transparency_synthetic",
          "translations": {
            "fr": {
              "label": "Oui"
            },
            "nl": {
              "label": "Ja"
            },
            "de": {
              "label": "Ja"
            },
            "es": {
              "label": "Sí"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_transparency_synthetic",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_sector",
      "translations": {
        "fr": {
          "title": "Transparence : interaction avec l'IA",
          "body": "Votre système d'IA est-il destiné à **interagir directement avec des personnes physiques** (par ex. : chatbot, assistant virtuel, service client IA) ?",
          "note": "Exception : systèmes autorisés par la loi pour la détection, la prévention, les enquêtes ou la poursuite d'infractions pénales."
        },
        "nl": {
          "title": "Transparantie: AI-interactie",
          "body": "Is uw AI-systeem bedoeld om **rechtstreeks met natuurlijke personen te interageren** (bv. chatbot, virtuele assistent, AI-klantenservice)?",
          "note": "Uitzondering: systemen die wettelijk zijn toegestaan voor het opsporen, voorkomen, onderzoeken of vervolgen van strafbare feiten."
        },
        "de": {
          "title": "Transparenz: KI-Interaktion",
          "body": "Ist Ihr KI-System dazu bestimmt, **direkt mit natürlichen Personen zu interagieren** (z. B. Chatbot, virtueller Assistent, KI-Kundenservice)?",
          "note": "Ausnahme: Systeme, die gesetzlich für die Aufdeckung, Verhütung, Ermittlung oder Verfolgung von Straftaten zugelassen sind."
        },
        "es": {
          "title": "Transparencia: interacción con la IA",
          "body": "¿Su sistema de IA está destinado a **interactuar directamente con personas físicas** (por ej.: chatbot, asistente virtual, servicio de atención al cliente con IA)?",
          "note": "Excepción: sistemas autorizados por ley para la detección, prevención, investigación o enjuiciamiento de delitos."
        }
      }
    },
    "q_transparency_synthetic": {
      "id": "q_transparency_synthetic",
      "type": "question",
      "stage": "S9",
      "title": "Transparency: Synthetic Content Generation",
      "body": "Does your AI system generate **synthetic audio, image, video, or text content**?",
      "legal_ref": "Art. 50(2)",
      "note": "Providers must ensure outputs are marked in machine-readable format as artificially generated. Exception: assistive function for standard editing that doesn't substantially alter input.",
      "options": [
        {
          "label": "Yes",
          "value": "yes",
          "set_array": {
            "transparency_obligations": "synthetic_content_marking"
          },
          "next": "q_transparency_deepfake",
          "translations": {
            "fr": {
              "label": "Oui"
            },
            "nl": {
              "label": "Ja"
            },
            "de": {
              "label": "Ja"
            },
            "es": {
              "label": "Sí"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_transparency_deepfake",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_transparency_chatbot",
      "translations": {
        "fr": {
          "title": "Transparence : génération de contenu synthétique",
          "body": "Votre système d'IA génère-t-il du **contenu synthétique audio, image, vidéo ou texte** ?",
          "note": "Les fournisseurs doivent veiller à ce que les résultats soient marqués dans un format lisible par machine comme étant générés artificiellement. Exception : fonction d'assistance pour l'édition standard qui ne modifie pas substantiellement les données d'entrée."
        },
        "nl": {
          "title": "Transparantie: generatie van synthetische inhoud",
          "body": "Genereert uw AI-systeem **synthetische audio-, beeld-, video- of tekstinhoud**?",
          "note": "Aanbieders moeten ervoor zorgen dat de output machineleesbaar wordt gemarkeerd als kunstmatig gegenereerd. Uitzondering: ondersteunende functie voor standaardbewerking die de invoer niet wezenlijk wijzigt."
        },
        "de": {
          "title": "Transparenz: Erzeugung synthetischer Inhalte",
          "body": "Erzeugt Ihr KI-System **synthetische Audio-, Bild-, Video- oder Textinhalte**?",
          "note": "Anbieter müssen sicherstellen, dass die Ergebnisse maschinenlesbar als künstlich erzeugt gekennzeichnet werden. Ausnahme: Assistenzfunktion für Standardbearbeitung, die die Eingabe nicht wesentlich verändert."
        },
        "es": {
          "title": "Transparencia: generación de contenido sintético",
          "body": "¿Su sistema de IA genera **contenido sintético de audio, imagen, vídeo o texto**?",
          "note": "Los proveedores deben garantizar que los resultados estén marcados en un formato legible por máquina como generados artificialmente. Excepción: función de asistencia para la edición estándar que no altera sustancialmente los datos de entrada."
        }
      }
    },
    "q_transparency_deepfake": {
      "id": "q_transparency_deepfake",
      "type": "question",
      "stage": "S9",
      "title": "Transparency: Deep Fakes",
      "body": "Does your AI system generate or manipulate image, audio, or video content that constitutes a **deep fake** (content resembling existing persons/objects/places that would falsely appear authentic)?",
      "legal_ref": "Art. 50(4)",
      "note": "Deployers must disclose that content is AI-generated/manipulated. Limited exception for evidently artistic, creative, satirical, or fictional works.",
      "options": [
        {
          "label": "Yes",
          "value": "yes",
          "set_array": {
            "transparency_obligations": "deepfake_disclosure"
          },
          "next": "q_transparency_ai_text",
          "translations": {
            "fr": {
              "label": "Oui"
            },
            "nl": {
              "label": "Ja"
            },
            "de": {
              "label": "Ja"
            },
            "es": {
              "label": "Sí"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_transparency_ai_text",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_transparency_synthetic",
      "translations": {
        "fr": {
          "title": "Transparence : hypertrucages (deep fakes)",
          "body": "Votre système d'IA génère-t-il ou manipule-t-il du contenu image, audio ou vidéo constituant un **hypertrucage** (contenu ressemblant à des personnes, objets ou lieux existants qui semblerait faussement authentique) ?",
          "note": "Les déployeurs doivent divulguer que le contenu est généré/manipulé par l'IA. Exception limitée pour les œuvres manifestement artistiques, créatives, satiriques ou fictives."
        },
        "nl": {
          "title": "Transparantie: deepfakes",
          "body": "Genereert of manipuleert uw AI-systeem beeld-, audio- of video-inhoud die een **deepfake** vormt (inhoud die bestaande personen, objecten of plaatsen nabootst en valselijk authentiek zou lijken)?",
          "note": "Gebruiksverantwoordelijken moeten bekendmaken dat de inhoud door AI is gegenereerd/gemanipuleerd. Beperkte uitzondering voor duidelijk artistieke, creatieve, satirische of fictieve werken."
        },
        "de": {
          "title": "Transparenz: Deepfakes",
          "body": "Erzeugt oder manipuliert Ihr KI-System Bild-, Audio- oder Videoinhalte, die einen **Deepfake** darstellen (Inhalte, die bestehenden Personen, Gegenständen oder Orten ähneln und fälschlicherweise authentisch wirken würden)?",
          "note": "Betreiber müssen offenlegen, dass der Inhalt KI-generiert/-manipuliert ist. Begrenzte Ausnahme für offensichtlich künstlerische, kreative, satirische oder fiktive Werke."
        },
        "es": {
          "title": "Transparencia: ultrafalsificaciones (deep fakes)",
          "body": "¿Su sistema de IA genera o manipula contenido de imagen, audio o vídeo que constituye una **ultrafalsificación** (contenido que se asemeja a personas, objetos o lugares existentes y que parecería falsamente auténtico)?",
          "note": "Los responsables del despliegue deben revelar que el contenido es generado/manipulado por IA. Excepción limitada para obras manifiestamente artísticas, creativas, satíricas o ficticias."
        }
      }
    },
    "q_transparency_ai_text": {
      "id": "q_transparency_ai_text",
      "type": "question",
      "stage": "S9",
      "title": "Transparency: AI-Generated Text for Public Interest",
      "body": "Does your AI system generate or manipulate **text** that is published with the purpose of informing the public on **matters of public interest**?",
      "legal_ref": "Art. 50(4), second subparagraph",
      "note": "Exception: content that has undergone human review or editorial control where a person holds editorial responsibility.",
      "options": [
        {
          "label": "Yes — and no human editorial review/control",
          "value": "yes",
          "set_array": {
            "transparency_obligations": "ai_generated_text_disclosure"
          },
          "next": "q_transparency_emotion_biometric",
          "translations": {
            "fr": {
              "label": "Oui — et sans révision/contrôle éditorial humain"
            },
            "nl": {
              "label": "Ja — en zonder menselijke redactionele controle"
            },
            "de": {
              "label": "Ja — und ohne menschliche redaktionelle Überprüfung/Kontrolle"
            },
            "es": {
              "label": "Sí — y sin revisión/control editorial humano"
            }
          }
        },
        {
          "label": "Yes — but with human editorial review/control",
          "value": "exempt",
          "next": "q_transparency_emotion_biometric",
          "translations": {
            "fr": {
              "label": "Oui — mais avec révision/contrôle éditorial humain"
            },
            "nl": {
              "label": "Ja — maar met menselijke redactionele controle"
            },
            "de": {
              "label": "Ja — aber mit menschlicher redaktioneller Überprüfung/Kontrolle"
            },
            "es": {
              "label": "Sí — pero con revisión/control editorial humano"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_transparency_emotion_biometric",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_transparency_deepfake",
      "translations": {
        "fr": {
          "title": "Transparence : texte généré par l'IA pour l'intérêt public",
          "body": "Votre système d'IA génère-t-il ou manipule-t-il du **texte** publié dans le but d'informer le public sur des **questions d'intérêt public** ?",
          "note": "Exception : contenu ayant fait l'objet d'un examen ou d'un contrôle éditorial humain lorsqu'une personne physique ou morale assume la responsabilité éditoriale."
        },
        "nl": {
          "title": "Transparantie: door AI gegenereerde tekst voor het algemeen belang",
          "body": "Genereert of manipuleert uw AI-systeem **tekst** die wordt gepubliceerd met het doel het publiek te informeren over **aangelegenheden van algemeen belang**?",
          "note": "Uitzondering: inhoud die menselijke redactionele controle heeft ondergaan, waarbij een natuurlijke of rechtspersoon de redactionele verantwoordelijkheid draagt."
        },
        "de": {
          "title": "Transparenz: KI-generierter Text im öffentlichen Interesse",
          "body": "Erzeugt oder manipuliert Ihr KI-System **Text**, der mit dem Zweck veröffentlicht wird, die Öffentlichkeit über **Angelegenheiten von öffentlichem Interesse** zu informieren?",
          "note": "Ausnahme: Inhalte, die einer menschlichen redaktionellen Überprüfung oder Kontrolle unterzogen wurden, wobei eine natürliche oder juristische Person die redaktionelle Verantwortung trägt."
        },
        "es": {
          "title": "Transparencia: texto generado por IA para el interés público",
          "body": "¿Su sistema de IA genera o manipula **texto** publicado con el fin de informar al público sobre **asuntos de interés público**?",
          "note": "Excepción: contenido sometido a revisión o control editorial humano cuando una persona física o jurídica asume la responsabilidad editorial."
        }
      }
    },
    "q_transparency_emotion_biometric": {
      "id": "q_transparency_emotion_biometric",
      "type": "question",
      "stage": "S9",
      "title": "Transparency: Emotion Recognition / Biometric Categorisation",
      "body": "Is your AI system an **emotion recognition system** or a **biometric categorisation system**?\n\nIf YES: **deployers** must inform the natural persons exposed to the system. This obligation falls on whoever uses/deploys the system, not the provider.",
      "legal_ref": "Art. 50(3)",
      "note": "Deployers must inform the natural persons exposed to the system. Exception: systems permitted by law for detecting/preventing/investigating criminal offences.",
      "options": [
        {
          "label": "Yes",
          "value": "yes",
          "set_array": {
            "transparency_obligations": "emotion_biometric_disclosure"
          },
          "next": "q_open_source_check",
          "translations": {
            "fr": {
              "label": "Oui"
            },
            "nl": {
              "label": "Ja"
            },
            "de": {
              "label": "Ja"
            },
            "es": {
              "label": "Sí"
            }
          }
        },
        {
          "label": "No",
          "value": "no",
          "next": "q_open_source_check",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_transparency_ai_text",
      "translations": {
        "fr": {
          "title": "Transparence : reconnaissance des émotions / catégorisation biométrique",
          "body": "Votre système d'IA est-il un **système de reconnaissance des émotions** ou un **système de catégorisation biométrique** ?\n\nSi OUI : les **déployeurs** doivent informer les personnes physiques exposées au système. Cette obligation incombe à celui qui utilise/déploie le système, et non au fournisseur.",
          "note": "Les déployeurs doivent informer les personnes physiques exposées au système. Exception : systèmes autorisés par la loi pour la détection, la prévention, les enquêtes ou la poursuite d'infractions pénales."
        },
        "nl": {
          "title": "Transparantie: emotieherkenning / biometrische categorisering",
          "body": "Is uw AI-systeem een **emotieherkenningssysteem** of een **biometrisch categoriseringssysteem**?\n\nIndien JA: **gebruiksverantwoordelijken** moeten de natuurlijke personen die aan het systeem worden blootgesteld, informeren. Deze verplichting rust op degene die het systeem gebruikt/inzet, niet op de aanbieder.",
          "note": "Gebruiksverantwoordelijken moeten de aan het systeem blootgestelde natuurlijke personen informeren. Uitzondering: systemen die wettelijk zijn toegestaan voor het opsporen, voorkomen, onderzoeken of vervolgen van strafbare feiten."
        },
        "de": {
          "title": "Transparenz: Emotionserkennung / biometrische Kategorisierung",
          "body": "Ist Ihr KI-System ein **Emotionserkennungssystem** oder ein **biometrisches Kategorisierungssystem**?\n\nWenn JA: **Betreiber** müssen die natürlichen Personen, die dem System ausgesetzt sind, informieren. Diese Pflicht obliegt demjenigen, der das System nutzt/einsetzt, nicht dem Anbieter.",
          "note": "Betreiber müssen die dem System ausgesetzten natürlichen Personen informieren. Ausnahme: Systeme, die gesetzlich für die Aufdeckung, Verhütung, Ermittlung oder Verfolgung von Straftaten zugelassen sind."
        },
        "es": {
          "title": "Transparencia: reconocimiento de emociones / categorización biométrica",
          "body": "¿Su sistema de IA es un **sistema de reconocimiento de emociones** o un **sistema de categorización biométrica**?\n\nEn caso afirmativo: los **responsables del despliegue** deben informar a las personas físicas expuestas al sistema. Esta obligación recae sobre quien utiliza/despliega el sistema, no sobre el proveedor.",
          "note": "Los responsables del despliegue deben informar a las personas físicas expuestas al sistema. Excepción: sistemas autorizados por ley para la detección, prevención, investigación o enjuiciamiento de delitos."
        }
      }
    },
    "q_open_source_check": {
      "id": "q_open_source_check",
      "type": "question",
      "stage": "S10",
      "title": "Open-Source Exemption",
      "body": "Is your AI system released under a **free and open-source licence**?",
      "legal_ref": "Art. 2(12)",
      "note": "Open-source AI systems are exempt from the AI Act UNLESS they are: (1) high-risk, (2) prohibited under Art. 5, or (3) subject to Art. 50 transparency obligations. If any of these apply, the AI Act requirements still apply despite the open-source licence.",
      "options": [
        {
          "label": "Yes — open-source",
          "value": "yes",
          "next": "q_open_source_high_risk_check",
          "translations": {
            "fr": {
              "label": "Oui — open source"
            },
            "nl": {
              "label": "Ja — open source"
            },
            "de": {
              "label": "Ja — Open Source"
            },
            "es": {
              "label": "Sí — código abierto"
            }
          }
        },
        {
          "label": "No — not open-source",
          "value": "no",
          "next": "q_role",
          "translations": {
            "fr": {
              "label": "Non — pas open source"
            },
            "nl": {
              "label": "Nee — geen open source"
            },
            "de": {
              "label": "Nein — kein Open Source"
            },
            "es": {
              "label": "No — no es código abierto"
            }
          }
        }
      ],
      "previous": "q_transparency_emotion_biometric",
      "translations": {
        "fr": {
          "title": "Exemption open source",
          "body": "Votre système d'IA est-il diffusé sous une **licence libre et open source** ?",
          "note": "Les systèmes d'IA open source sont exemptés du Règlement IA SAUF s'ils sont : (1) à haut risque, (2) interdits au titre de l'art. 5, ou (3) soumis aux obligations de transparence de l'art. 50. Si l'une de ces conditions s'applique, les exigences du Règlement IA restent applicables malgré la licence open source."
        },
        "nl": {
          "title": "Open-source-vrijstelling",
          "body": "Wordt uw AI-systeem uitgebracht onder een **vrije en open-sourcelicentie**?",
          "note": "Open-source AI-systemen zijn vrijgesteld van de AI-verordening TENZIJ ze: (1) hoog risico zijn, (2) verboden zijn onder art. 5, of (3) onderworpen zijn aan transparantieverplichtingen van art. 50. Als een van deze voorwaarden van toepassing is, gelden de vereisten van de AI-verordening ondanks de open-sourcelicentie."
        },
        "de": {
          "title": "Open-Source-Ausnahme",
          "body": "Wird Ihr KI-System unter einer **freien und quelloffenen Lizenz** veröffentlicht?",
          "note": "Quelloffene KI-Systeme sind von der KI-Verordnung ausgenommen, ES SEI DENN, sie sind: (1) Hochrisiko-Systeme, (2) nach Art. 5 verboten oder (3) Transparenzpflichten nach Art. 50 unterworfen. Wenn eine dieser Bedingungen zutrifft, gelten die Anforderungen der KI-Verordnung trotz der Open-Source-Lizenz."
        },
        "es": {
          "title": "Exención de código abierto",
          "body": "¿Su sistema de IA se publica bajo una **licencia libre y de código abierto**?",
          "note": "Los sistemas de IA de código abierto están exentos del Reglamento de IA SALVO que sean: (1) de alto riesgo, (2) estén prohibidos en virtud del art. 5, o (3) estén sujetos a las obligaciones de transparencia del art. 50. Si se cumple alguna de estas condiciones, los requisitos del Reglamento de IA se aplican a pesar de la licencia de código abierto."
        }
      }
    },
    "q_open_source_high_risk_check": {
      "id": "q_open_source_high_risk_check",
      "type": "logic",
      "stage": "S10",
      "title": "Open-Source: Checking Applicability",
      "description": "Check if the open-source system still falls under AI Act due to being high-risk, prohibited, or having transparency obligations",
      "conditions": [
        {
          "if": "risk_category == 'high_risk_annex_i' || risk_category == 'high_risk_annex_iii' || risk_category == 'prohibited'",
          "then": "q_role",
          "note": "Open-source exemption does NOT apply to high-risk or prohibited systems"
        },
        {
          "if": "transparency_obligations.length > 0",
          "then": "q_role",
          "note": "Open-source exemption does NOT apply to Art. 50 systems"
        },
        {
          "else": "result_open_source_exempt"
        }
      ],
      "previous": "q_open_source_check",
      "translations": {
        "fr": {
          "title": "Open source : vérification de l'applicabilité"
        },
        "nl": {
          "title": "Open source: controle van toepasselijkheid"
        },
        "de": {
          "title": "Open Source: Prüfung der Anwendbarkeit"
        },
        "es": {
          "title": "Código abierto: verificación de la aplicabilidad"
        }
      }
    },
    "q_role": {
      "id": "q_role",
      "type": "question",
      "stage": "S11",
      "title": "Your Role in the AI Value Chain",
      "body": "What is your primary role in relation to this AI system?",
      "legal_ref": "Art. 3(3)-(8)",
      "attribute": "role",
      "options": [
        {
          "label": "Provider — I develop (or commission the development of) the AI system and place it on the market or put it into service under my own name/trademark",
          "value": "provider",
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "Fournisseur — Je développe (ou commande le développement) le système d'IA et le mets sur le marché ou en service sous mon propre nom/ma propre marque"
            },
            "nl": {
              "label": "Aanbieder — Ik ontwikkel (of laat ontwikkelen) het AI-systeem en breng het op de markt of neem het in gebruik onder mijn eigen naam/merk"
            },
            "de": {
              "label": "Anbieter — Ich entwickle (oder beauftrage die Entwicklung) das KI-System und bringe es unter meinem eigenen Namen/meiner eigenen Marke in Verkehr oder in Betrieb"
            },
            "es": {
              "label": "Proveedor — Desarrollo (o encargo el desarrollo de) el sistema de IA y lo comercializo o pongo en servicio bajo mi propio nombre/marca"
            }
          }
        },
        {
          "label": "Deployer — I use the AI system under my own authority (professional/commercial use)",
          "value": "deployer",
          "next": "q_deployer_becomes_provider",
          "translations": {
            "fr": {
              "label": "Déployeur — J'utilise le système d'IA sous ma propre autorité (usage professionnel/commercial)"
            },
            "nl": {
              "label": "Gebruiksverantwoordelijke — Ik gebruik het AI-systeem onder mijn eigen verantwoordelijkheid (professioneel/commercieel gebruik)"
            },
            "de": {
              "label": "Betreiber — Ich nutze das KI-System unter meiner eigenen Verantwortung (berufliche/gewerbliche Nutzung)"
            },
            "es": {
              "label": "Responsable del despliegue — Utilizo el sistema de IA bajo mi propia autoridad (uso profesional/comercial)"
            }
          }
        },
        {
          "label": "Importer — I bring an AI system from a non-EU provider into the EU market",
          "value": "importer",
          "next": "q_art25_importer_distributor",
          "translations": {
            "fr": {
              "label": "Importateur — J'introduis un système d'IA d'un fournisseur non européen sur le marché de l'UE"
            },
            "nl": {
              "label": "Importeur — Ik breng een AI-systeem van een niet-EU-aanbieder op de EU-markt"
            },
            "de": {
              "label": "Einführer — Ich bringe ein KI-System eines Nicht-EU-Anbieters auf den EU-Markt"
            },
            "es": {
              "label": "Importador — Introduzco un sistema de IA de un proveedor no europeo en el mercado de la UE"
            }
          }
        },
        {
          "label": "Distributor — I make the AI system available on the EU market (but I'm not the provider or importer)",
          "value": "distributor",
          "next": "q_art25_importer_distributor",
          "translations": {
            "fr": {
              "label": "Distributeur — Je mets le système d'IA à disposition sur le marché de l'UE (mais je ne suis ni le fournisseur ni l'importateur)"
            },
            "nl": {
              "label": "Distributeur — Ik maak het AI-systeem beschikbaar op de EU-markt (maar ik ben niet de aanbieder of importeur)"
            },
            "de": {
              "label": "Händler — Ich mache das KI-System auf dem EU-Markt verfügbar (bin aber weder Anbieter noch Einführer)"
            },
            "es": {
              "label": "Distribuidor — Pongo el sistema de IA a disposición en el mercado de la UE (pero no soy el proveedor ni el importador)"
            }
          }
        },
        {
          "label": "Product manufacturer — I integrate AI into my product and place it on the EU market under my name",
          "value": "product_manufacturer",
          "next": "q_role_result_router",
          "note": "Under Art. 25(3), if you integrate a high-risk AI system (Annex I, Section A) into your product and place it on the market under your name, you are considered the PROVIDER of the high-risk AI system and must comply with Art. 16 provider obligations.",
          "translations": {
            "fr": {
              "label": "Fabricant de produits — J'intègre l'IA dans mon produit et le mets sur le marché de l'UE sous mon nom"
            },
            "nl": {
              "label": "Productfabrikant — Ik integreer AI in mijn product en breng het op de EU-markt onder mijn naam"
            },
            "de": {
              "label": "Produkthersteller — Ich integriere KI in mein Produkt und bringe es unter meinem Namen auf den EU-Markt"
            },
            "es": {
              "label": "Fabricante de productos — Integro la IA en mi producto y lo comercializo en el mercado de la UE bajo mi nombre"
            }
          }
        },
        {
          "label": "Authorised representative — I represent a non-EU provider in the EU",
          "value": "authorised_representative",
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "Mandataire — Je représente un fournisseur non européen dans l'UE"
            },
            "nl": {
              "label": "Gemachtigde — Ik vertegenwoordig een niet-EU-aanbieder in de EU"
            },
            "de": {
              "label": "Bevollmächtigter — Ich vertrete einen Nicht-EU-Anbieter in der EU"
            },
            "es": {
              "label": "Representante autorizado — Represento a un proveedor no europeo en la UE"
            }
          }
        }
      ],
      "previous": [
        "q_open_source_check",
        "q_open_source_high_risk_check"
      ],
      "translations": {
        "fr": {
          "title": "Votre rôle dans la chaîne de valeur de l'IA",
          "body": "Quel est votre rôle principal par rapport à ce système d'IA ?"
        },
        "nl": {
          "title": "Uw rol in de AI-waardeketen",
          "body": "Wat is uw primaire rol met betrekking tot dit AI-systeem?"
        },
        "de": {
          "title": "Ihre Rolle in der KI-Wertschöpfungskette",
          "body": "Was ist Ihre primäre Rolle in Bezug auf dieses KI-System?"
        },
        "es": {
          "title": "Su rol en la cadena de valor de la IA",
          "body": "¿Cuál es su rol principal en relación con este sistema de IA?"
        }
      }
    },
    "q_deployer_becomes_provider": {
      "id": "q_deployer_becomes_provider",
      "type": "question",
      "stage": "S11",
      "title": "Deployer as Provider Check",
      "body": "As a deployer, do any of the following apply to you?\n\n(a) You put your **own name or trademark** on a high-risk AI system already on the market\n(b) You make a **substantial modification** to a high-risk AI system (not foreseen by the original provider), affecting compliance or changing the intended purpose\n(c) You **change the intended purpose** of a non-high-risk AI system so that it becomes high-risk",
      "legal_ref": "Art. 25(1)",
      "attribute": "deployer_becomes_provider",
      "options": [
        {
          "label": "(a) Yes — I put my own name/trademark on it",
          "value": true,
          "set": {
            "deployer_becomes_provider_reason": "own_name_trademark",
            "role": "provider"
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "(a) Oui — J'appose mon propre nom/ma propre marque"
            },
            "nl": {
              "label": "(a) Ja — Ik plaats mijn eigen naam/merk erop"
            },
            "de": {
              "label": "(a) Ja — Ich versehe es mit meinem eigenen Namen/meiner eigenen Marke"
            },
            "es": {
              "label": "(a) Sí — Pongo mi propio nombre/marca"
            }
          }
        },
        {
          "label": "(b) Yes — I made a substantial modification",
          "value": true,
          "set": {
            "deployer_becomes_provider_reason": "substantial_modification",
            "role": "provider"
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "(b) Oui — J'ai apporté une modification substantielle"
            },
            "nl": {
              "label": "(b) Ja — Ik heb een substantiële wijziging aangebracht"
            },
            "de": {
              "label": "(b) Ja — Ich habe eine wesentliche Veränderung vorgenommen"
            },
            "es": {
              "label": "(b) Sí — He realizado una modificación sustancial"
            }
          }
        },
        {
          "label": "(c) Yes — I repurposed it for a high-risk use",
          "value": true,
          "set": {
            "deployer_becomes_provider_reason": "repurpose_to_high_risk",
            "role": "provider",
            "risk_category": "high_risk_annex_iii"
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "(c) Oui — Je l'ai réaffecté à un usage à haut risque"
            },
            "nl": {
              "label": "(c) Ja — Ik heb het herbestemd voor hoog-risicotoepassing"
            },
            "de": {
              "label": "(c) Ja — Ich habe es für einen Hochrisiko-Einsatz umgewidmet"
            },
            "es": {
              "label": "(c) Sí — Lo he reasignado a un uso de alto riesgo"
            }
          }
        },
        {
          "label": "No — none of the above apply",
          "value": false,
          "set": {
            "deployer_becomes_provider": false
          },
          "next": "q_deployer_public_authority",
          "translations": {
            "fr": {
              "label": "Non — aucune des situations ci-dessus ne s'applique"
            },
            "nl": {
              "label": "Nee — geen van bovenstaande situaties is van toepassing"
            },
            "de": {
              "label": "Nein — keine der oben genannten Situationen trifft zu"
            },
            "es": {
              "label": "No — ninguna de las situaciones anteriores se aplica"
            }
          }
        }
      ],
      "previous": "q_role",
      "translations": {
        "fr": {
          "title": "Vérification du déployeur en tant que fournisseur",
          "body": "En tant que déployeur, l'une des situations suivantes s'applique-t-elle à vous ?\n\n(a) Vous apposez votre **propre nom ou marque** sur un système d'IA à haut risque déjà sur le marché\n(b) Vous apportez une **modification substantielle** à un système d'IA à haut risque (non prévue par le fournisseur initial), affectant la conformité ou modifiant la finalité prévue\n(c) Vous **modifiez la finalité** d'un système d'IA non à haut risque de sorte qu'il devient à haut risque"
        },
        "nl": {
          "title": "Controle: gebruiksverantwoordelijke als aanbieder",
          "body": "Is als gebruiksverantwoordelijke een van de volgende situaties op u van toepassing?\n\n(a) U plaatst uw **eigen naam of merk** op een hoog-risico-AI-systeem dat al op de markt is\n(b) U brengt een **substantiële wijziging** aan in een hoog-risico-AI-systeem (niet voorzien door de oorspronkelijke aanbieder), die de conformiteit beïnvloedt of het beoogde doel wijzigt\n(c) U **wijzigt het beoogde doel** van een niet-hoog-risico-AI-systeem zodat het hoog risico wordt"
        },
        "de": {
          "title": "Prüfung: Betreiber als Anbieter",
          "body": "Trifft als Betreiber eine der folgenden Situationen auf Sie zu?\n\n(a) Sie versehen ein bereits auf dem Markt befindliches Hochrisiko-KI-System mit Ihrem **eigenen Namen oder Ihrer eigenen Marke**\n(b) Sie nehmen eine **wesentliche Veränderung** an einem Hochrisiko-KI-System vor (nicht vom ursprünglichen Anbieter vorgesehen), die die Konformität beeinträchtigt oder die Zweckbestimmung ändert\n(c) Sie **ändern die Zweckbestimmung** eines Nicht-Hochrisiko-KI-Systems, sodass es zu einem Hochrisiko-System wird"
        },
        "es": {
          "title": "Verificación: responsable del despliegue como proveedor",
          "body": "Como responsable del despliegue, ¿se aplica alguna de las siguientes situaciones?\n\n(a) Usted pone su **propio nombre o marca** en un sistema de IA de alto riesgo ya comercializado\n(b) Usted realiza una **modificación sustancial** en un sistema de IA de alto riesgo (no prevista por el proveedor original), que afecta la conformidad o modifica la finalidad prevista\n(c) Usted **modifica la finalidad** de un sistema de IA no de alto riesgo de modo que se convierte en alto riesgo"
        }
      }
    },
    "q_deployer_public_authority": {
      "id": "q_deployer_public_authority",
      "type": "question",
      "stage": "S11",
      "title": "Public Authority Deployer",
      "body": "Are you a **public authority** or do you act **on behalf of a public authority**?\n\nOr are you a private entity providing:\n- Public services\n- Credit scoring/creditworthiness assessment\n- Life/health insurance risk assessment?",
      "legal_ref": "Art. 27, Art. 26(8)",
      "note": "Public authority deployers of high-risk AI must register in the EU database and conduct a Fundamental Rights Impact Assessment (FRIA). Note: the FRIA obligation under Art. 27 does not apply to high-risk AI systems in the area of critical infrastructure (Annex III, point 2) per Art. 27(10).",
      "attribute": "is_public_authority",
      "options": [
        {
          "label": "Yes — public authority or acting on behalf of one",
          "value": true,
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "Oui — autorité publique ou agissant pour le compte d'une autorité publique"
            },
            "nl": {
              "label": "Ja — overheidsinstantie of handelend namens een overheidsinstantie"
            },
            "de": {
              "label": "Ja — öffentliche Stelle oder im Auftrag einer öffentlichen Stelle handelnd"
            },
            "es": {
              "label": "Sí — autoridad pública o que actúa en nombre de una autoridad pública"
            }
          }
        },
        {
          "label": "Yes — private entity providing public services, credit scoring, or insurance risk assessment",
          "value": true,
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "Oui — entité privée fournissant des services publics, une notation de crédit ou une évaluation des risques d'assurance"
            },
            "nl": {
              "label": "Ja — particuliere entiteit die openbare diensten, kredietscore of beoordeling van verzekeringsrisico's verleent"
            },
            "de": {
              "label": "Ja — privates Unternehmen, das öffentliche Dienste, Kreditwürdigkeitsprüfung oder Versicherungsrisikobewertung erbringt"
            },
            "es": {
              "label": "Sí — entidad privada que presta servicios públicos, puntuación crediticia o evaluación de riesgos de seguros"
            }
          }
        },
        {
          "label": "No",
          "value": false,
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "Non"
            },
            "nl": {
              "label": "Nee"
            },
            "de": {
              "label": "Nein"
            },
            "es": {
              "label": "No"
            }
          }
        }
      ],
      "previous": "q_deployer_becomes_provider",
      "translations": {
        "fr": {
          "title": "Déployeur : autorité publique",
          "body": "Êtes-vous une **autorité publique** ou agissez-vous **pour le compte d'une autorité publique** ?\n\nOu êtes-vous une entité privée fournissant :\n- Des services publics\n- Une notation de crédit / évaluation de la solvabilité\n- Une évaluation des risques en assurance vie/santé ?",
          "note": "Les déployeurs relevant d'autorités publiques de systèmes d'IA à haut risque doivent s'enregistrer dans la base de données de l'UE et réaliser une analyse d'impact sur les droits fondamentaux (AIDRF)."
        },
        "nl": {
          "title": "Gebruiksverantwoordelijke: overheidsinstantie",
          "body": "Bent u een **overheidsinstantie** of handelt u **namens een overheidsinstantie**?\n\nOf bent u een particuliere entiteit die het volgende verleent:\n- Openbare diensten\n- Kredietscore / beoordeling van kredietwaardigheid\n- Risicobeoordeling voor levens-/ziektekostenverzekeringen?",
          "note": "Gebruiksverantwoordelijken die overheidsinstanties zijn en hoog-risico-AI-systemen inzetten, moeten zich registreren in de EU-database en een effectbeoordeling voor de grondrechten (FRIA) uitvoeren."
        },
        "de": {
          "title": "Betreiber: öffentliche Stelle",
          "body": "Sind Sie eine **öffentliche Stelle** oder handeln Sie **im Auftrag einer öffentlichen Stelle**?\n\nOder sind Sie ein privates Unternehmen, das Folgendes erbringt:\n- Öffentliche Dienste\n- Kreditwürdigkeitsprüfung / Bonitätsbewertung\n- Risikobewertung für Lebens-/Krankenversicherungen?",
          "note": "Betreiber als öffentliche Stellen von Hochrisiko-KI-Systemen müssen sich in der EU-Datenbank registrieren und eine Grundrechte-Folgenabschätzung (FRIA) durchführen."
        },
        "es": {
          "title": "Responsable del despliegue: autoridad pública",
          "body": "¿Es usted una **autoridad pública** o actúa **en nombre de una autoridad pública**?\n\n¿O es una entidad privada que presta:\n- Servicios públicos\n- Puntuación crediticia / evaluación de solvencia\n- Evaluación de riesgos de seguros de vida/salud?",
          "note": "Los responsables del despliegue que sean autoridades públicas de sistemas de IA de alto riesgo deben registrarse en la base de datos de la UE y realizar una evaluación de impacto en los derechos fundamentales (EIDRF)."
        }
      }
    },
    "q_role_result_router": {
      "id": "q_role_result_router",
      "type": "logic",
      "stage": "S12",
      "title": "Route to Result",
      "description": "Routes to the appropriate result page based on risk category",
      "conditions": [
        {
          "if": "(risk_category == 'high_risk_annex_i' || risk_category == 'high_risk_annex_iii') && is_gpai_model == true && gpai_role == 'provider'",
          "then": "result_high_risk_gpai",
          "note": "Combined high-risk + GPAI provider result (includes deployers/importers/distributors who became providers via Art. 25)"
        },
        {
          "if": "risk_category == 'high_risk_annex_i' || risk_category == 'high_risk_annex_iii'",
          "then": "result_high_risk"
        },
        {
          "if": "is_gpai_model == true && gpai_role == 'provider'",
          "then": "result_gpai",
          "note": "GPAI model providers get GPAI-specific obligations; transparency obligations shown on result_gpai page if applicable"
        },
        {
          "if": "transparency_obligations.length > 0",
          "then": "result_transparency",
          "note": "Non-high-risk, non-GPAI-provider systems with transparency obligations"
        },
        {
          "else": "result_minimal_risk"
        }
      ],
      "previous": [
        "q_role",
        "q_deployer_becomes_provider",
        "q_deployer_public_authority",
        "q_art25_importer_distributor"
      ]
    },
    "result_out_of_scope_no_nexus": {
      "id": "result_out_of_scope_no_nexus",
      "type": "result",
      "verdict": "OUT_OF_SCOPE",
      "verdict_class": "out-of-scope",
      "title": "Not in Scope of the EU AI Act",
      "summary": "Based on your answers, your AI system does not have a sufficient connection to the EU market. The EU AI Act does not apply to your situation.",
      "legal_ref": "Art. 2(1)",
      "obligations": [],
      "recommendations": [
        "Monitor changes — if you later place systems on the EU market or your outputs are used in the EU, the AI Act may apply.",
        "Other jurisdictions may have their own AI regulations."
      ],
      "previous": "q_territorial_nexus",
      "translations": {
        "fr": {
          "title": "Hors du champ d’application du règlement européen sur l’IA",
          "summary": "D’après vos réponses, votre système d’IA n’a pas de lien suffisant avec le marché de l’UE. Le règlement européen sur l’IA ne s’applique pas à votre situation.",
          "verdict": "HORS CHAMP",
          "recommendations": [
            "Monitor changes — if you later place systems on the EU market or your outputs are used in the EU, the AI Act may apply.",
            "Other jurisdictions may have their own AI regulations."
          ]
        },
        "nl": {
          "title": "Niet binnen het toepassingsgebied van de EU AI-verordening",
          "summary": "Op basis van uw antwoorden heeft uw AI-systeem geen voldoende band met de EU-markt. De EU AI-verordening is niet van toepassing op uw situatie.",
          "verdict": "BUITEN TOEPASSINGSGEBIED",
          "recommendations": [
            "Monitor changes — if you later place systems on the EU market or your outputs are used in the EU, the AI Act may apply.",
            "Other jurisdictions may have their own AI regulations."
          ]
        },
        "de": {
          "title": "Nicht im Anwendungsbereich der EU-KI-Verordnung",
          "summary": "Basierend auf Ihren Antworten hat Ihr KI-System keine ausreichende Verbindung zum EU-Markt. Die EU-KI-Verordnung gilt nicht für Ihre Situation.",
          "verdict": "NICHT IM ANWENDUNGSBEREICH",
          "recommendations": [
            "Monitor changes — if you later place systems on the EU market or your outputs are used in the EU, the KI-Gesetz may apply.",
            "Other jurisdictions may have their own AI regulations."
          ]
        },
        "es": {
          "title": "Fuera del ámbito de aplicación del Reglamento Europeo de IA",
          "summary": "Según sus respuestas, su sistema de IA no tiene una conexión suficiente con el mercado de la UE. El Reglamento Europeo de IA no se aplica a su situación.",
          "verdict": "FUERA DEL ÁMBITO",
          "recommendations": [
            "Monitor changes — if you later place systems on the EU market or your outputs are used in the EU, the Ley de IA may apply.",
            "Other jurisdictions may have their own AI regulations."
          ]
        }
      }
    },
    "result_excluded_military": {
      "id": "result_excluded_military",
      "type": "result",
      "verdict": "EXCLUDED",
      "verdict_class": "excluded",
      "title": "Excluded — Military, Defence, or National Security",
      "summary": "Your AI system is excluded from the scope of the EU AI Act because it is used exclusively for military, defence, or national security purposes.",
      "legal_ref": "Art. 2(3)",
      "obligations": [],
      "recommendations": [
        "This exclusion only applies to systems used EXCLUSIVELY for these purposes. Dual-use systems may still be in scope.",
        "Member State national security competences are not affected."
      ],
      "previous": "q_exclusion_military",
      "translations": {
        "fr": {
          "title": "Exclu — Militaire, défense ou sécurité nationale",
          "summary": "Votre système d’IA est exclu du champ d’application du règlement européen sur l’IA car il est utilisé exclusivement à des fins militaires, de défense ou de sécurité nationale.",
          "verdict": "EXCLU",
          "recommendations": [
            "This exclusion only applies to systems used EXCLUSIVELY for these purposes. Dual-use systems may still be in scope.",
            "Member State national security competences are not affected."
          ]
        },
        "nl": {
          "title": "Uitgesloten — Militair, defensie of nationale veiligheid",
          "summary": "Uw AI-systeem is uitgesloten van het toepassingsgebied van de EU AI-verordening omdat het uitsluitend wordt gebruikt voor militaire, defensie- of nationale veiligheidsdoeleinden.",
          "verdict": "UITGESLOTEN",
          "recommendations": [
            "This exclusion only applies to systems used EXCLUSIVELY for these purposes. Dual-use systems may still be in scope.",
            "Member State national security competences are not affected."
          ]
        },
        "de": {
          "title": "Ausgenommen — Militär, Verteidigung oder nationale Sicherheit",
          "summary": "Ihr KI-System ist vom Anwendungsbereich der EU-KI-Verordnung ausgenommen, da es ausschließlich für militärische, verteidigungs- oder nationale Sicherheitszwecke eingesetzt wird.",
          "verdict": "AUSGENOMMEN",
          "recommendations": [
            "This exclusion only applies to systems used EXCLUSIVELY for these purposes. Dual-use systems may still be in scope.",
            "Member State national security competences are not affected."
          ]
        },
        "es": {
          "title": "Excluido — Militar, defensa o seguridad nacional",
          "summary": "Su sistema de IA está excluido del ámbito de aplicación del Reglamento Europeo de IA porque se utiliza exclusivamente para fines militares, de defensa o de seguridad nacional.",
          "verdict": "EXCLUIDO",
          "recommendations": [
            "This exclusion only applies to systems used EXCLUSIVELY for these purposes. Dual-use systems may still be in scope.",
            "Member State national security competences are not affected."
          ]
        }
      }
    },
    "result_excluded_research": {
      "id": "result_excluded_research",
      "type": "result",
      "verdict": "EXCLUDED",
      "verdict_class": "excluded",
      "title": "Excluded — Scientific Research & Development",
      "summary": "Your AI system/model is excluded from the scope of the EU AI Act because it is developed and used for the sole purpose of scientific research and development.",
      "legal_ref": "Art. 2(6)",
      "obligations": [],
      "recommendations": [
        "This exclusion only covers pure scientific R&D. If the system is later placed on the market or put into service, the AI Act will apply.",
        "Other legislation (e.g., GDPR) may still apply to your R&D activities."
      ],
      "previous": "q_exclusion_research",
      "translations": {
        "fr": {
          "title": "Exclu — Recherche et développement scientifique",
          "summary": "Votre système/modèle d’IA est exclu du champ d’application du règlement européen sur l’IA car il est développé et utilisé aux seules fins de recherche et de développement scientifique.",
          "verdict": "EXCLU",
          "recommendations": [
            "This exclusion only covers pure scientific R&D. If the system is later placed on the market or put into service, the AI Act will apply.",
            "Other legislation (e.g., GDPR) may still apply to your R&D activities."
          ]
        },
        "nl": {
          "title": "Uitgesloten — Wetenschappelijk onderzoek en ontwikkeling",
          "summary": "Uw AI-systeem/-model is uitgesloten van het toepassingsgebied van de EU AI-verordening omdat het uitsluitend wordt ontwikkeld en gebruikt voor wetenschappelijk onderzoek en ontwikkeling.",
          "verdict": "UITGESLOTEN",
          "recommendations": [
            "This exclusion only covers pure scientific R&D. If the system is later placed on the market or put into service, the AI Act will apply.",
            "Other legislation (e.g., GDPR) may still apply to your R&D activities."
          ]
        },
        "de": {
          "title": "Ausgenommen — Wissenschaftliche Forschung und Entwicklung",
          "summary": "Ihr KI-System/-Modell ist vom Anwendungsbereich der EU-KI-Verordnung ausgenommen, da es ausschließlich für wissenschaftliche Forschung und Entwicklung entwickelt und verwendet wird.",
          "verdict": "AUSGENOMMEN",
          "recommendations": [
            "This exclusion only covers pure scientific R&D. If the system is later placed on the market or put into service, the KI-Gesetz will apply.",
            "Other legislation (e.g., GDPR) may still apply to your R&D activities."
          ]
        },
        "es": {
          "title": "Excluido — Investigación y desarrollo científico",
          "summary": "Su sistema/modelo de IA está excluido del ámbito de aplicación del Reglamento Europeo de IA porque se desarrolla y utiliza con el único fin de investigación y desarrollo científico.",
          "verdict": "EXCLUIDO",
          "recommendations": [
            "This exclusion only covers pure scientific R&D. If the system is later placed on the market or put into service, the Ley de IA will apply.",
            "Other legislation (e.g., GDPR) may still apply to your R&D activities."
          ]
        }
      }
    },
    "result_excluded_pre_market": {
      "id": "result_excluded_pre_market",
      "type": "result",
      "verdict": "EXCLUDED",
      "verdict_class": "excluded",
      "title": "Excluded — Pre-Market R&D",
      "summary": "Your AI system/model is currently excluded from the AI Act because it is still in the research, testing, or development phase prior to being placed on the market or put into service.",
      "legal_ref": "Art. 2(8)",
      "obligations": [],
      "recommendations": [
        "Once you place the system on the market or put it into service, the AI Act will apply. Plan for compliance now.",
        "Testing in real-world conditions is NOT covered by this exclusion — specific rules apply (Art. 57-63).",
        "Consider the AI Act requirements during development to ensure smoother compliance later."
      ],
      "previous": "q_exclusion_pre_market",
      "translations": {
        "fr": {
          "title": "Exclu — R&D pré-commercialisation",
          "summary": "Votre système/modèle d’IA est actuellement exclu du règlement sur l’IA car il est encore en phase de recherche, de test ou de développement avant sa mise sur le marché ou sa mise en service.",
          "verdict": "EXCLU",
          "recommendations": [
            "Once you place the system on the market or put it into service, the AI Act will apply. Plan for compliance now.",
            "Testing in real-world conditions is NOT covered by this exclusion — specific rules apply (Art. 57-63).",
            "Consider the AI Act requirements during development to ensure smoother compliance later."
          ]
        },
        "nl": {
          "title": "Uitgesloten — Pre-markt O&O",
          "summary": "Uw AI-systeem/-model is momenteel uitgesloten van de AI-verordening omdat het zich nog in de onderzoeks-, test- of ontwikkelingsfase bevindt voordat het op de markt wordt gebracht of in gebruik wordt genomen.",
          "verdict": "UITGESLOTEN",
          "recommendations": [
            "Once you place the system on the market or put it into service, the AI Act will apply. Plan for compliance now.",
            "Testing in real-world conditions is NOT covered by this exclusion — specific rules apply (Art. 57-63).",
            "Consider the AI Act requirements during development to ensure smoother compliance later."
          ]
        },
        "de": {
          "title": "Ausgenommen — Vormarktliche Forschung und Entwicklung",
          "summary": "Ihr KI-System/-Modell ist derzeit von der KI-Verordnung ausgenommen, da es sich noch in der Forschungs-, Test- oder Entwicklungsphase befindet, bevor es auf den Markt gebracht oder in Betrieb genommen wird.",
          "verdict": "AUSGENOMMEN",
          "recommendations": [
            "Once you place the system on the market or put it into service, the KI-Gesetz will apply. Plan for compliance now.",
            "Testing in real-world conditions is NOT covered by this exclusion — specific rules apply (Art. 57-63).",
            "Consider the KI-Gesetz requirements during development to ensure smoother compliance later."
          ]
        },
        "es": {
          "title": "Excluido — I+D precomercialización",
          "summary": "Su sistema/modelo de IA está actualmente excluido del Reglamento de IA porque aún se encuentra en fase de investigación, prueba o desarrollo antes de su comercialización o puesta en servicio.",
          "verdict": "EXCLUIDO",
          "recommendations": [
            "Once you place the system on the market or put it into service, the Ley de IA will apply. Plan for compliance now.",
            "Testing in real-world conditions is NOT covered by this exclusion — specific rules apply (Art. 57-63).",
            "Consider the Ley de IA requirements during development to ensure smoother compliance later."
          ]
        }
      }
    },
    "result_excluded_personal": {
      "id": "result_excluded_personal",
      "type": "result",
      "verdict": "EXCLUDED",
      "verdict_class": "excluded",
      "title": "Excluded — Personal Non-Professional Use",
      "summary": "As a natural person using AI systems for purely personal, non-professional activities, you are not subject to deployer obligations under the AI Act.",
      "legal_ref": "Art. 2(10)",
      "obligations": [],
      "recommendations": [
        "Provider obligations still apply to whoever provided the AI system to you.",
        "As an EU-based affected person, you have rights under the AI Act (e.g., right to explanation for high-risk AI decisions)."
      ],
      "previous": "q_exclusion_personal",
      "translations": {
        "fr": {
          "title": "Exclu — Utilisation personnelle non professionnelle",
          "summary": "En tant que personne physique utilisant des systèmes d’IA à des fins purement personnelles et non professionnelles, vous n’êtes pas soumis aux obligations des déployeurs au titre du règlement sur l’IA.",
          "verdict": "EXCLU",
          "recommendations": [
            "Provider obligations still apply to whoever provided the système d'IA to you.",
            "As an EU-based affected person, you have rights under the AI Act (e.g., right to explanation for à haut risque AI decisions)."
          ]
        },
        "nl": {
          "title": "Uitgesloten — Persoonlijk niet-professioneel gebruik",
          "summary": "Als natuurlijke persoon die AI-systemen gebruikt voor zuiver persoonlijke, niet-professionele activiteiten, bent u niet onderworpen aan de verplichtingen voor gebruiksverantwoordelijken krachtens de AI-verordening.",
          "verdict": "UITGESLOTEN",
          "recommendations": [
            "Provider obligations still apply to whoever provided the AI-systeem to you.",
            "As an EU-based affected person, you have rights under the AI Act (e.g., right to explanation for hoog risico AI decisions)."
          ]
        },
        "de": {
          "title": "Ausgenommen — Persönliche, nicht berufliche Nutzung",
          "summary": "Als natürliche Person, die KI-Systeme ausschließlich für persönliche, nicht berufliche Zwecke nutzt, unterliegen Sie nicht den Betreiberpflichten der KI-Verordnung.",
          "verdict": "AUSGENOMMEN",
          "recommendations": [
            "Provider obligations still apply to whoever provided the KI-System to you.",
            "As an EU-based affected person, you have rights under the KI-Gesetz (e.g., right to explanation for Hochrisiko AI decisions)."
          ]
        },
        "es": {
          "title": "Excluido — Uso personal no profesional",
          "summary": "Como persona física que utiliza sistemas de IA para actividades puramente personales y no profesionales, usted no está sujeto a las obligaciones de los responsables del despliegue en virtud del Reglamento de IA.",
          "verdict": "EXCLUIDO",
          "recommendations": [
            "Provider obligations still apply to whoever provided the sistema de IA to you.",
            "As an EU-based affected person, you have rights under the Ley de IA (e.g., right to explanation for alto riesgo AI decisions)."
          ]
        }
      }
    },
    "result_excluded_intl_cooperation": {
      "id": "result_excluded_intl_cooperation",
      "type": "result",
      "verdict": "EXCLUDED",
      "verdict_class": "excluded",
      "title": "Excluded — International Law Enforcement Cooperation",
      "summary": "Your use of AI systems is excluded from the AI Act because you are a third-country public authority or international organisation using AI in the framework of international law enforcement/judicial cooperation with the EU, with adequate fundamental rights safeguards.",
      "legal_ref": "Art. 2(4)",
      "obligations": [],
      "recommendations": [
        "This exclusion requires adequate safeguards for fundamental rights and freedoms.",
        "If safeguards are insufficient, the exclusion does not apply."
      ],
      "previous": "q_exclusion_intl_cooperation",
      "translations": {
        "fr": {
          "title": "Exclu — Coopération internationale en matière répressive",
          "summary": "Votre utilisation de systèmes d’IA est exclue du règlement sur l’IA car vous êtes une autorité publique d’un pays tiers ou une organisation internationale utilisant l’IA dans le cadre de la coopération internationale en matière répressive/judiciaire avec l’UE, avec des garanties adéquates pour les droits fondamentaux.",
          "verdict": "EXCLU",
          "recommendations": [
            "This exclusion requires adequate safeguards for fundamental rights and freedoms.",
            "If safeguards are insufficient, the exclusion does not apply."
          ]
        },
        "nl": {
          "title": "Uitgesloten — Internationale samenwerking op het gebied van rechtshandhaving",
          "summary": "Uw gebruik van AI-systemen is uitgesloten van de AI-verordening omdat u een overheidsinstantie van een derde land of een internationale organisatie bent die AI gebruikt in het kader van internationale samenwerking op het gebied van rechtshandhaving/justitie met de EU, met adequate waarborgen voor de grondrechten.",
          "verdict": "UITGESLOTEN",
          "recommendations": [
            "This exclusion requires adequate safeguards for fundamental rights and freedoms.",
            "If safeguards are insufficient, the exclusion does not apply."
          ]
        },
        "de": {
          "title": "Ausgenommen — Internationale Zusammenarbeit der Strafverfolgungsbehörden",
          "summary": "Ihre Nutzung von KI-Systemen ist von der KI-Verordnung ausgenommen, da Sie eine Behörde eines Drittstaats oder eine internationale Organisation sind, die KI im Rahmen der internationalen Zusammenarbeit bei der Strafverfolgung/Justiz mit der EU einsetzt, mit angemessenen Garantien für die Grundrechte.",
          "verdict": "AUSGENOMMEN",
          "recommendations": [
            "This exclusion requires adequate safeguards for fundamental rights and freedoms.",
            "If safeguards are insufficient, the exclusion does not apply."
          ]
        },
        "es": {
          "title": "Excluido — Cooperación internacional en materia de aplicación de la ley",
          "summary": "Su uso de sistemas de IA está excluido del Reglamento de IA porque usted es una autoridad pública de un tercer país o una organización internacional que utiliza la IA en el marco de la cooperación internacional en materia de aplicación de la ley/judicial con la UE, con garantías adecuadas para los derechos fundamentales.",
          "verdict": "EXCLUIDO",
          "recommendations": [
            "This exclusion requires adequate safeguards for fundamental rights and freedoms.",
            "If safeguards are insufficient, the exclusion does not apply."
          ]
        }
      }
    },
    "result_not_ai_system": {
      "id": "result_not_ai_system",
      "type": "result",
      "verdict": "OUT_OF_SCOPE",
      "verdict_class": "out-of-scope",
      "title": "Not an AI System Under the AI Act",
      "summary": "Based on your answers, your system does not meet the definition of an 'AI system' under Art. 3(1) of the AI Act. The regulation does not apply.",
      "legal_ref": "Art. 3(1)",
      "obligations": [],
      "recommendations": [
        "Other EU legislation may still apply (GDPR, Product Safety, CRA, etc.).",
        "If your system evolves to include AI capabilities (e.g., adding machine learning), reassess.",
        "Consider AI literacy obligations even for non-AI systems if your organisation uses AI elsewhere (Art. 4)."
      ],
      "previous": [
        "q_is_ai_system",
        "q_is_ai_system_retry"
      ],
      "translations": {
        "fr": {
          "title": "Pas un système d’IA au sens du règlement sur l’IA",
          "summary": "D’après vos réponses, votre système ne répond pas à la définition d’un « système d’IA » au sens de l’art. 3(1) du règlement sur l’IA. La réglementation ne s’applique pas.",
          "verdict": "HORS CHAMP",
          "recommendations": [
            "Other EU legislation may still apply (GDPR, Product Safety, CRA, etc.).",
            "If votre système evolves to include AI capabilities (e.g., adding machine learning), reassess.",
            "Consider AI literacy obligations even for non-système d'IAs if your organisation uses AI elsewhere (Art. 4)."
          ]
        },
        "nl": {
          "title": "Geen AI-systeem volgens de AI-verordening",
          "summary": "Op basis van uw antwoorden voldoet uw systeem niet aan de definitie van een ‘AI-systeem’ volgens art. 3(1) van de AI-verordening. De verordening is niet van toepassing.",
          "verdict": "BUITEN TOEPASSINGSGEBIED",
          "recommendations": [
            "Other EU legislation may still apply (GDPR, Product Safety, CRA, etc.).",
            "If uw systeem evolves to include AI capabilities (e.g., adding machine learning), reassess.",
            "Consider AI literacy obligations even for non-AI-systeems if your organisation uses AI elsewhere (Art. 4)."
          ]
        },
        "de": {
          "title": "Kein KI-System im Sinne der KI-Verordnung",
          "summary": "Basierend auf Ihren Antworten erfüllt Ihr System nicht die Definition eines „KI-Systems“ gemäß Art. 3(1) der KI-Verordnung. Die Verordnung gilt nicht.",
          "verdict": "NICHT IM ANWENDUNGSBEREICH",
          "recommendations": [
            "Other EU legislation may still apply (GDPR, Product Safety, CRA, etc.).",
            "If Ihr System evolves to include AI capabilities (e.g., adding machine learning), reassess.",
            "Consider AI literacy obligations even for non-KI-Systems if your organisation uses AI elsewhere (Art. 4)."
          ]
        },
        "es": {
          "title": "No es un sistema de IA según el Reglamento de IA",
          "summary": "Según sus respuestas, su sistema no cumple la definición de «sistema de IA» del art. 3(1) del Reglamento de IA. La normativa no se aplica.",
          "verdict": "FUERA DEL ÁMBITO",
          "recommendations": [
            "Other EU legislation may still apply (GDPR, Product Safety, CRA, etc.).",
            "If su sistema evolves to include AI capabilities (e.g., adding machine learning), reassess.",
            "Consider AI literacy obligations even for non-sistema de IAs if your organisation uses AI elsewhere (Art. 4)."
          ]
        }
      }
    },
    "result_consult_expert_ai_definition": {
      "id": "result_consult_expert_ai_definition",
      "type": "result",
      "verdict": "CONSULT_EXPERT",
      "verdict_class": "consult-expert",
      "title": "Expert Consultation Recommended",
      "summary": "Whether your system qualifies as an 'AI system' under the AI Act is a nuanced question that depends on its specific architecture and functionality. We recommend consulting a legal or technical expert.",
      "legal_ref": "Art. 3(1), EC Guidelines on AI System Definition (2025)",
      "obligations": [],
      "recommendations": [
        "Consult the EC Guidelines on the definition of AI systems (Feb 2025) for detailed examples.",
        "Consider engaging a legal expert specialising in EU AI regulation.",
        "Even if not an AI system, other legislation may apply (GDPR, product safety, etc.)."
      ],
      "previous": "q_is_ai_system_retry",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "Consultation d’un expert recommandée",
          "summary": "La question de savoir si votre système est un « système d’IA » au sens du règlement sur l’IA est nuancée et dépend de son architecture et de ses fonctionnalités spécifiques. Nous recommandons de consulter un expert juridique ou technique.",
          "verdict": "CONSULTER UN EXPERT",
          "recommendations": [
            "Consult the EC Guidelines on the definition of système d'IAs (Feb 2025) for detailed examples.",
            "Consider engaging a legal expert specialising in EU AI regulation.",
            "Even if not an système d'IA, other legislation may apply (GDPR, product safety, etc.)."
          ],
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "Raadpleging van een expert aanbevolen",
          "summary": "Of uw systeem kwalificeert als een ‘AI-systeem’ volgens de AI-verordening is een genuanceerde vraag die afhangt van de specifieke architectuur en functionaliteit. Wij raden aan een juridisch of technisch expert te raadplegen.",
          "verdict": "RAADPLEEG EEN EXPERT",
          "recommendations": [
            "Consult the EC Guidelines on the definition of AI-systeems (Feb 2025) for detailed examples.",
            "Consider engaging a legal expert specialising in EU AI regulation.",
            "Even if not an AI-systeem, other legislation may apply (GDPR, product safety, etc.)."
          ],
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "Expertenberatung empfohlen",
          "summary": "Ob Ihr System als „KI-System“ im Sinne der KI-Verordnung gilt, ist eine differenzierte Frage, die von seiner spezifischen Architektur und Funktionalität abhängt. Wir empfehlen die Konsultation eines juristischen oder technischen Experten.",
          "verdict": "EXPERTEN KONSULTIEREN",
          "recommendations": [
            "Consult the EC Guidelines on the definition of KI-Systems (Feb 2025) for detailed examples.",
            "Consider engaging a legal expert specialising in EU AI regulation.",
            "Even if not an KI-System, other legislation may apply (GDPR, product safety, etc.)."
          ],
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "Se recomienda la consulta de un experto",
          "summary": "Si su sistema se considera un «sistema de IA» en virtud del Reglamento de IA es una cuestión matizada que depende de su arquitectura y funcionalidad específicas. Recomendamos consultar a un experto jurídico o técnico.",
          "verdict": "CONSULTAR EXPERTO",
          "recommendations": [
            "Consult the EC Guidelines on the definition of sistema de IAs (Feb 2025) for detailed examples.",
            "Consider engaging a legal expert specialising in EU AI regulation.",
            "Even if not an sistema de IA, other legislation may apply (GDPR, product safety, etc.)."
          ],
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_affected_person": {
      "id": "result_affected_person",
      "type": "result",
      "verdict": "IN_SCOPE",
      "verdict_class": "in-scope",
      "title": "In Scope — As an Affected Person",
      "summary": "As an affected person located in the EU, you have rights under the AI Act but no compliance obligations.",
      "legal_ref": "Art. 2(1)(g)",
      "obligations": [],
      "rights": [
        "Right to lodge complaints with market surveillance authorities",
        "Right to explanation of individual decision-making by high-risk AI systems",
        "Right to be informed when interacting with an AI system (chatbots, etc.)",
        "Right to be informed about deep fakes and AI-generated content",
        {
          "title": "Right to Explanation",
          "ref": "Art. 86",
          "summary": "Any affected person subject to a decision based on the output of a high-risk AI system that produces legal effects or similarly significantly affects them has the right to obtain clear and meaningful explanations of the role of the AI system in the decision-making procedure and the main elements of the decision taken."
        },
        {
          "title": "Right to Lodge a Complaint",
          "ref": "Art. 87",
          "summary": "Without prejudice to other administrative or judicial remedies, any natural or legal person having grounds to consider that there has been an infringement of the AI Act may lodge a complaint with the relevant market surveillance authority."
        }
      ],
      "previous": "q_territorial_nexus",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "Dans le champ d’application — En tant que personne concernée",
          "summary": "En tant que personne concernée située dans l’UE, vous disposez de droits au titre du règlement sur l’IA mais n’avez pas d’obligations de conformité.",
          "verdict": "DANS LE CHAMP",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "Binnen het toepassingsgebied — Als betrokkene",
          "summary": "Als betrokkene die zich in de EU bevindt, hebt u rechten krachtens de AI-verordening maar geen nalevingsverplichtingen.",
          "verdict": "BINNEN TOEPASSINGSGEBIED",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "Im Anwendungsbereich — Als betroffene Person",
          "summary": "Als betroffene Person in der EU haben Sie Rechte nach der KI-Verordnung, aber keine Compliance-Pflichten.",
          "verdict": "IM ANWENDUNGSBEREICH",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "Dentro del ámbito — Como persona afectada",
          "summary": "Como persona afectada ubicada en la UE, usted tiene derechos en virtud del Reglamento de IA pero no tiene obligaciones de cumplimiento.",
          "verdict": "DENTRO DEL ÁMBITO",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_open_source_exempt": {
      "id": "result_open_source_exempt",
      "type": "result",
      "verdict": "EXEMPT",
      "verdict_class": "exempt",
      "title": "Open-Source Exemption Applies",
      "summary": "Your AI system is released under a free and open-source licence and is not high-risk, not prohibited, and not subject to transparency obligations. The AI Act does not impose obligations on you.",
      "legal_ref": "Art. 2(12)",
      "obligations": [],
      "recommendations": [
        "If the system is later used for a high-risk purpose by a deployer, they become the provider and must comply with high-risk requirements.",
        "Monitor whether your system's usage changes to covered categories."
      ],
      "previous": "q_open_source_high_risk_check",
      "translations": {
        "fr": {
          "title": "L’exemption open source s’applique",
          "summary": "Votre système d’IA est publié sous une licence libre et open source et n’est ni à haut risque, ni interdit, ni soumis à des obligations de transparence. Le règlement sur l’IA ne vous impose pas d’obligations.",
          "verdict": "EXCLU",
          "recommendations": [
            "If the system is later used for a à haut risque purpose by a deployer, they become the provider and must comply with à haut risque requirements.",
            "Monitor whether votre système's usage changes to covered categories."
          ]
        },
        "nl": {
          "title": "Open-source vrijstelling van toepassing",
          "summary": "Uw AI-systeem is vrijgegeven onder een vrije en open-source licentie en is niet hoog risico, niet verboden en niet onderworpen aan transparantieverplichtingen. De AI-verordening legt u geen verplichtingen op.",
          "verdict": "UITGESLOTEN",
          "recommendations": [
            "If the system is later used for a hoog risico purpose by a deployer, they become the provider and must comply with hoog risico requirements.",
            "Monitor whether uw systeem's usage changes to covered categories."
          ]
        },
        "de": {
          "title": "Open-Source-Ausnahme gilt",
          "summary": "Ihr KI-System wird unter einer freien und quelloffenen Lizenz bereitgestellt und ist weder hochriskant noch verboten und unterliegt keinen Transparenzpflichten. Die KI-Verordnung erlegt Ihnen keine Verpflichtungen auf.",
          "verdict": "AUSGENOMMEN",
          "recommendations": [
            "If the system is later used for a Hochrisiko purpose by a deployer, they become the provider and must comply with Hochrisiko requirements.",
            "Monitor whether Ihr System's usage changes to covered categories."
          ]
        },
        "es": {
          "title": "Se aplica la exención de código abierto",
          "summary": "Su sistema de IA se publica bajo una licencia libre y de código abierto y no es de alto riesgo, no está prohibido ni está sujeto a obligaciones de transparencia. El Reglamento de IA no le impone obligaciones.",
          "verdict": "EXCLUIDO",
          "recommendations": [
            "If the system is later used for a alto riesgo purpose by a deployer, they become the provider and must comply with alto riesgo requirements.",
            "Monitor whether su sistema's usage changes to covered categories."
          ]
        }
      }
    },
    "result_prohibited_manipulation": {
      "id": "result_prohibited_manipulation",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Subliminal Manipulation",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. AI systems that deploy subliminal, manipulative, or deceptive techniques to materially distort behaviour and cause significant harm are banned.",
      "legal_ref": "Art. 5(1)(a)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "recommendations": [
        "Immediately review your system's design and intended use.",
        "Consult legal counsel to determine if your system can be redesigned to avoid this prohibition.",
        "Consider whether the system could be redesigned to remove manipulative/deceptive elements."
      ],
      "previous": "q_prohibited_subliminal",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Manipulation subliminale",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes d’IA qui déploient des techniques subliminales, manipulatrices ou trompeuses pour altérer substantiellement le comportement et causer un préjudice significatif sont interdits.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Subliminale manipulatie",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. AI-systemen die subliminale, manipulatieve of misleidende technieken inzetten om gedrag wezenlijk te verstoren en aanzienlijke schade te veroorzaken, zijn verboden.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Unterschwellige Manipulation",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. KI-Systeme, die unterschwellige, manipulative oder täuschende Techniken einsetzen, um das Verhalten wesentlich zu beeinflussen und erheblichen Schaden zu verursachen, sind verboten.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Manipulación subliminal",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de IA que emplean técnicas subliminales, manipuladoras o engañosas para distorsionar materialmente el comportamiento y causar un daño significativo están prohibidos.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_prohibited_vulnerability": {
      "id": "result_prohibited_vulnerability",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Exploiting Vulnerabilities",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. AI systems that exploit vulnerabilities due to age, disability, or social/economic situation to materially distort behaviour and cause significant harm are banned.",
      "legal_ref": "Art. 5(1)(b)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "recommendations": [
        "Immediately review your system's design and intended use.",
        "Consult legal counsel to determine if your system can be redesigned to avoid this prohibition.",
        "Consider whether the system could be redesigned to remove the exploitative elements."
      ],
      "previous": "q_prohibited_vulnerability",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Exploitation de vulnérabilités",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes d’IA qui exploitent les vulnérabilités dues à l’âge, au handicap ou à la situation sociale ou économique pour altérer substantiellement le comportement et causer un préjudice significatif sont interdits.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Misbruik van kwetsbaarheden",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. AI-systemen die kwetsbaarheden misbruiken als gevolg van leeftijd, handicap of sociale of economische situatie om gedrag wezenlijk te verstoren en aanzienlijke schade te veroorzaken, zijn verboden.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Ausnutzung von Schutzbedürftigkeit",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. KI-Systeme, die Schutzbedürftigkeiten aufgrund von Alter, Behinderung oder sozialer oder wirtschaftlicher Situation ausnutzen, um das Verhalten wesentlich zu beeinflussen und erheblichen Schaden zu verursachen, sind verboten.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Explotación de vulnerabilidades",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de IA que explotan vulnerabilidades debidas a la edad, discapacidad o situación social o económica para distorsionar materialmente el comportamiento y causar un daño significativo están prohibidos.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_prohibited_social_scoring": {
      "id": "result_prohibited_social_scoring",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Social Scoring",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. Social scoring systems that lead to detrimental treatment in unrelated contexts or disproportionate to behaviour are banned.",
      "legal_ref": "Art. 5(1)(c)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "recommendations": [
        "Immediately review your system's design and intended use.",
        "Consult legal counsel to determine if your system can be redesigned to avoid this prohibition.",
        "Consider whether the social scoring mechanism can be removed or limited to avoid detrimental treatment."
      ],
      "previous": "q_prohibited_social_scoring",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Notation sociale",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes de notation sociale qui conduisent à un traitement préjudiciable dans des contextes sans rapport ou disproportionné par rapport au comportement sont interdits.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Sociale scoring",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. Sociale scoringsystemen die leiden tot nadelige behandeling in ongerelateerde contexten of onevenredig aan het gedrag zijn verboden.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Sozialbewertung",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. Sozialbewertungssysteme, die zu nachteiliger Behandlung in nicht zusammenhängenden Kontexten oder unverhältnismäßig zum Verhalten führen, sind verboten.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Puntuación social",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de puntuación social que conducen a un trato perjudicial en contextos no relacionados o desproporcionado con respecto al comportamiento están prohibidos.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_prohibited_predictive_policing": {
      "id": "result_prohibited_predictive_policing",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Individual Predictive Policing",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. AI systems that assess or predict criminal offence risk based solely on profiling or personality traits are banned.",
      "legal_ref": "Art. 5(1)(d)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "note": "Exception: AI systems supporting human assessment based on objective, verifiable facts directly linked to criminal activity are permitted.",
      "previous": "q_prohibited_predictive_policing",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Police prédictive individuelle",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes d’IA qui évaluent ou prédisent le risque d’infraction pénale sur la seule base du profilage ou de traits de personnalité sont interdits.",
          "note": "Exception : les systèmes d’IA soutenant l’évaluation humaine fondée sur des faits objectifs et vérifiables directement liés à une activité criminelle sont autorisés.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique.",
            "Consulter un conseiller juridique spécialisé en réglementation de l'IA.",
            "Documenter les étapes de mise en conformité et les actions correctives."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Individuele voorspellende politieactiviteiten",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. AI-systemen die het risico op strafbare feiten beoordelen of voorspellen uitsluitend op basis van profilering of persoonlijkheidskenmerken zijn verboden.",
          "note": "Uitzondering: AI-systemen die menselijke beoordeling ondersteunen op basis van objectieve, verifieerbare feiten die rechtstreeks verband houden met criminele activiteit zijn toegestaan.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren.",
            "Een juridisch adviseur raadplegen die gespecialiseerd is in AI-regelgeving.",
            "Nalevingsstappen en corrigerende maatregelen documenteren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Individuelle vorausschauende Polizeiarbeit",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. KI-Systeme, die das Risiko einer Straftat ausschließlich auf der Grundlage von Profiling oder Persönlichkeitsmerkmalen bewerten oder vorhersagen, sind verboten.",
          "note": "Ausnahme: KI-Systeme, die die menschliche Bewertung auf der Grundlage objektiver, überprüfbarer Fakten unterstützen, die in direktem Zusammenhang mit krimineller Aktivität stehen, sind zulässig.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren.",
            "Einen auf KI-Regulierung spezialisierten Rechtsberater konsultieren.",
            "Compliance-Schritte und Korrekturmaßnahmen dokumentieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Vigilancia policial predictiva individual",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de IA que evalúan o predicen el riesgo de comisión de delitos basándose únicamente en la elaboración de perfiles o rasgos de personalidad están prohibidos.",
          "note": "Excepción: los sistemas de IA que apoyan la evaluación humana basada en hechos objetivos y verificables directamente vinculados a la actividad delictiva están permitidos.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica.",
            "Consultar a un asesor jurídico especializado en regulación de IA.",
            "Documentar los pasos de cumplimiento y las acciones correctivas."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      },
      "recommendations": [
        "Cease any individual risk assessment AI practices based solely on profiling or personality traits that predict criminal offences (Art. 5(1)(d))",
        "Document the exception analysis if your system falls under the narrow exception for augmenting human assessments based on objective, verifiable facts directly linked to criminal activity",
        "Seek legal advice to confirm whether your specific use case qualifies for an exception",
        "Review and document existing AI systems for compliance before the prohibition takes effect (2 February 2025)",
        "Inform affected persons about AI-based risk assessments as required by transparency obligations"
      ]
    },
    "result_prohibited_facial_scraping": {
      "id": "result_prohibited_facial_scraping",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Facial Recognition DB Scraping",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. AI systems that create or expand facial recognition databases through untargeted scraping from the internet or CCTV are banned.",
      "legal_ref": "Art. 5(1)(e)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "recommendations": [
        "Immediately review your system's design and intended use.",
        "Consult legal counsel to determine if your system can be redesigned to avoid this prohibition.",
        "Consider alternative approaches to facial recognition that do not involve untargeted scraping."
      ],
      "previous": "q_prohibited_facial_scraping",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Extraction de bases de données de reconnaissance faciale",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes d’IA qui créent ou élargissent des bases de données de reconnaissance faciale par extraction non ciblée à partir d’internet ou de vidéosurveillance sont interdits.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Scraping van gezichtsherkenningsdatabases",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. AI-systemen die gezichtsherkenningsdatabases creëren of uitbreiden door middel van niet-gerichte scraping van het internet of CCTV zijn verboden.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Scraping von Gesichtserkennungsdatenbanken",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. KI-Systeme, die Gesichtserkennungsdatenbanken durch ungezieltes Scraping aus dem Internet oder von Videoüberwachungssystemen erstellen oder erweitern, sind verboten.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Extracción de bases de datos de reconocimiento facial",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de IA que crean o amplían bases de datos de reconocimiento facial mediante extracción no selectiva de internet o de circuitos cerrados de televisión están prohibidos.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_prohibited_emotion": {
      "id": "result_prohibited_emotion",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Emotion Recognition in Workplace/Education",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. AI systems that infer emotions in the workplace or education institutions are banned (unless for medical or safety reasons).",
      "legal_ref": "Art. 5(1)(f)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "recommendations": [
        "Immediately review your system's design and intended use.",
        "Consult legal counsel to determine if your system qualifies for the medical/safety exception.",
        "Consider whether emotion recognition functionality can be removed or limited to permitted contexts."
      ],
      "previous": "q_prohibited_emotion_work_edu",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Reconnaissance des émotions sur le lieu de travail/dans l’éducation",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes d’IA qui infèrent les émotions sur le lieu de travail ou dans les établissements d’enseignement sont interdits (sauf pour des raisons médicales ou de sécurité).",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Emotieherkenning op de werkplek/in het onderwijs",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. AI-systemen die emoties afleiden op de werkplek of in onderwijsinstellingen zijn verboden (tenzij om medische of veiligheidsredenen).",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Emotionserkennung am Arbeitsplatz/in der Bildung",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. KI-Systeme, die Emotionen am Arbeitsplatz oder in Bildungseinrichtungen ableiten, sind verboten (es sei denn aus medizinischen oder sicherheitsrelevanten Gründen).",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Reconocimiento de emociones en el lugar de trabajo/educación",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de IA que infieren emociones en el lugar de trabajo o en centros educativos están prohibidos (salvo por razones médicas o de seguridad).",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_prohibited_biometric_sensitive": {
      "id": "result_prohibited_biometric_sensitive",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Biometric Categorisation by Sensitive Attributes",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. AI systems that categorise persons based on biometric data to infer race, political opinions, trade union membership, religious beliefs, sex life, or sexual orientation are banned.",
      "legal_ref": "Art. 5(1)(g)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "recommendations": [
        "Immediately review your system's design and intended use.",
        "Consult legal counsel to determine if your system can be redesigned to avoid this prohibition.",
        "Consider whether biometric categorisation by sensitive attributes can be removed from the system."
      ],
      "previous": "q_prohibited_biometric_sensitive",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Catégorisation biométrique par attributs sensibles",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. Les systèmes d’IA qui catégorisent les personnes sur la base de données biométriques pour déduire la race, les opinions politiques, l’appartenance syndicale, les convictions religieuses, la vie sexuelle ou l’orientation sexuelle sont interdits.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Biometrische categorisering op basis van gevoelige kenmerken",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. AI-systemen die personen categoriseren op basis van biometrische gegevens om ras, politieke opvattingen, vakbondslidmaatschap, religieuze overtuigingen, seksleven of seksuele geaardheid af te leiden zijn verboden.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Biometrische Kategorisierung nach sensiblen Merkmalen",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. KI-Systeme, die Personen auf der Grundlage biometrischer Daten kategorisieren, um Rasse, politische Meinungen, Gewerkschaftszugehörigkeit, religiöse Überzeugungen, Sexualleben oder sexuelle Orientierung abzuleiten, sind verboten.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Categorización biométrica por atributos sensibles",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. Los sistemas de IA que categorizan a las personas sobre la base de datos biométricos para inferir la raza, las opiniones políticas, la afiliación sindical, las convicciones religiosas, la vida sexual o la orientación sexual están prohibidos.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica."
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      }
    },
    "result_prohibited_realtime_rbi": {
      "id": "result_prohibited_realtime_rbi",
      "type": "result",
      "verdict": "PROHIBITED",
      "verdict_class": "prohibited",
      "title": "PROHIBITED — Real-Time Remote Biometric Identification",
      "summary": "Your AI system appears to fall under a PROHIBITED AI practice. Real-time remote biometric identification in publicly accessible spaces for law enforcement is banned, with only narrow exceptions.",
      "legal_ref": "Art. 5(1)(h)",
      "effective_date": "2 February 2025",
      "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
      "obligations": [
        "CEASE all placing on the market, putting into service, and use of this system."
      ],
      "note": "Narrow exceptions exist for: (i) searching for victims of abduction/trafficking, (ii) preventing imminent threats to life, (iii) locating suspects of serious crimes punishable by a custodial sentence of a maximum period of at least four years (Art. 5(1)(h)(iii)) — all requiring prior judicial authorisation.",
      "previous": "q_prohibited_realtime_rbi",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "INTERDIT — Identification biométrique à distance en temps réel",
          "summary": "Votre système d’IA semble relever d’une pratique d’IA INTERDITE. L’identification biométrique à distance en temps réel dans des espaces accessibles au public à des fins répressives est interdite, avec seulement des exceptions très limitées.",
          "note": "Des exceptions limitées existent pour : (i) la recherche de victimes d'enlèvement/traite, (ii) la prévention de menaces imminentes pour la vie, (iii) la localisation de suspects de crimes graves passibles d'une peine privative de liberté d'une durée maximale d'au moins quatre ans (art. 5(1)(h)(iii)) — toutes nécessitant une autorisation judiciaire préalable.",
          "verdict": "INTERDIT",
          "recommendations": [
            "Cette pratique est absolument interdite par l'AI Act de l'UE.",
            "Cesser immédiatement toute activité impliquant cette pratique d'IA interdite.",
            "Réaliser un audit interne pour identifier toute utilisation de cette pratique.",
            "Consulter un conseiller juridique spécialisé en réglementation de l'IA.",
            "Documenter les étapes de mise en conformité et les actions correctives.",
            "Conduct a fundamental rights impact assessment (FRIA) before putting the system into use"
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 février 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          }
        },
        "nl": {
          "title": "VERBODEN — Real-time biometrische identificatie op afstand",
          "summary": "Uw AI-systeem lijkt onder een VERBODEN AI-praktijk te vallen. Real-time biometrische identificatie op afstand in openbaar toegankelijke ruimten voor rechtshandhaving is verboden, met slechts beperkte uitzonderingen.",
          "note": "Beperkte uitzonderingen bestaan voor: (i) het zoeken naar slachtoffers van ontvoering/mensenhandel, (ii) het voorkomen van onmiddellijke bedreigingen voor het leven, (iii) het lokaliseren van verdachten van ernstige misdrijven die strafbaar zijn met een maximale gevangenisstraf van ten minste vier jaar (art. 5(1)(h)(iii)) — allemaal met voorafgaande rechterlijke toestemming.",
          "verdict": "VERBODEN",
          "recommendations": [
            "Deze praktijk is absoluut verboden onder de EU AI Act.",
            "Onmiddellijk alle activiteiten met betrekking tot deze verboden AI-praktijk staken.",
            "Een interne audit uitvoeren om elk gebruik van deze praktijk te identificeren.",
            "Een juridisch adviseur raadplegen die gespecialiseerd is in AI-regelgeving.",
            "Nalevingsstappen en corrigerende maatregelen documenteren.",
            "Conduct a fundamental rights impact assessment (FRIA) before putting the system into use"
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 februari 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          }
        },
        "de": {
          "title": "VERBOTEN — Biometrische Echtzeit-Fernidentifizierung",
          "summary": "Ihr KI-System scheint unter eine VERBOTENE KI-Praktik zu fallen. Die biometrische Echtzeit-Fernidentifizierung in öffentlich zugänglichen Räumen für Strafverfolgungszwecke ist verboten, mit nur engen Ausnahmen.",
          "note": "Enge Ausnahmen bestehen für: (i) die Suche nach Opfern von Entführung/Menschenhandel, (ii) die Abwendung unmittelbarer Bedrohungen für das Leben, (iii) die Lokalisierung von Verdächtigen schwerer Straftaten, die mit einer Freiheitsstrafe mit einer Höchstdauer von mindestens vier Jahren bedroht sind (Art. 5(1)(h)(iii)) — alle erfordern eine vorherige richterliche Genehmigung.",
          "verdict": "VERBOTEN",
          "recommendations": [
            "Diese Praxis ist nach dem EU-KI-Gesetz absolut verboten.",
            "Alle Aktivitäten mit dieser verbotenen KI-Praxis sofort einstellen.",
            "Eine interne Prüfung durchführen, um jede Nutzung dieser Praxis zu identifizieren.",
            "Einen auf KI-Regulierung spezialisierten Rechtsberater konsultieren.",
            "Compliance-Schritte und Korrekturmaßnahmen dokumentieren.",
            "Conduct a fundamental rights impact assessment (FRIA) before putting the system into use"
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. Februar 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          }
        },
        "es": {
          "title": "PROHIBIDO — Identificación biométrica remota en tiempo real",
          "summary": "Su sistema de IA parece estar incluido en una práctica de IA PROHIBIDA. La identificación biométrica remota en tiempo real en espacios de acceso público con fines de aplicación de la ley está prohibida, con solo excepciones muy limitadas.",
          "note": "Existen excepciones limitadas para: (i) la búsqueda de víctimas de secuestro/trata, (ii) la prevención de amenazas inminentes para la vida, (iii) la localización de sospechosos de delitos graves castigados con una pena privativa de libertad de una duración máxima de al menos cuatro años (art. 5(1)(h)(iii)) — todas requieren autorización judicial previa.",
          "verdict": "PROHIBIDO",
          "recommendations": [
            "Esta práctica está absolutamente prohibida por la Ley de IA de la UE.",
            "Cesar inmediatamente toda actividad que implique esta práctica de IA prohibida.",
            "Realizar una auditoría interna para identificar cualquier uso de esta práctica.",
            "Consultar a un asesor jurídico especializado en regulación de IA.",
            "Documentar los pasos de cumplimiento y las acciones correctivas.",
            "Conduct a fundamental rights impact assessment (FRIA) before putting the system into use"
          ],
          "obligations": [
            "CEASE all placing on the market, putting into service, and use of this system."
          ],
          "penalty": "Up to €35 million or 7% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de febrero de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          }
        }
      },
      "recommendations": [
        "Cease any real-time remote biometric identification in publicly accessible spaces for law enforcement unless authorised under Art. 5(2)-(3)",
        "If you believe an exception applies, verify authorisation by a judicial authority or independent administrative authority",
        "Document strict necessity and proportionality assessment for any authorised use",
        "Implement safeguards: time/geographic/personal scope limitations as required by Art. 5(2)",
        "Notify the relevant market surveillance authority and data protection authority before deployment",
        "Conduct a fundamental rights impact assessment (FRIA) before putting the system into use"
      ]
    },
    "result_high_risk_section_b": {
      "id": "result_high_risk_section_b",
      "type": "result",
      "verdict": "HIGH_RISK_LIMITED",
      "verdict_class": "high-risk",
      "title": "HIGH-RISK — Annex I Section B (Limited Applicability)",
      "summary": "Your AI system is classified as high-risk under Art. 6(1) as a safety component of a product covered by Annex I, Section B. However, per Art. 2(2), only LIMITED provisions of the AI Act apply to Section B products. The full Chapter III requirements (Art. 9-15 etc.) are handled through the existing sectoral harmonisation legislation, not directly by the AI Act.",
      "legal_ref": "Art. 2(2), Art. 6(1), Annex I Section B",
      "effective_date": "2 August 2027",
      "applicable_provisions": [
        {
          "ref": "Art. 6(1)",
          "summary": "High-risk classification rule"
        },
        {
          "ref": "Art. 102-109",
          "summary": "Amendments to sectoral legislation incorporating AI Act requirements"
        },
        {
          "ref": "Art. 112",
          "summary": "Evaluation and review provisions"
        },
        {
          "ref": "Art. 57",
          "summary": "Regulatory sandboxes — only insofar as AI Act requirements are integrated into the sectoral legislation"
        }
      ],
      "obligations": [],
      "recommendations": [
        "Comply with the AI-specific requirements integrated into your product's sectoral legislation (e.g., motor vehicle type-approval, aviation safety).",
        "The full AI Act high-risk requirements (risk management, data governance, etc.) do NOT apply directly — they apply through the sectoral legislation's own framework.",
        "Monitor developments in your sector's legislation for AI-specific implementing measures.",
        "AI literacy obligations (Art. 4) still apply since 2 February 2025.",
        "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
      ],
      "previous": "q_high_risk_annex_i",
      "note_delegated_acts": {
        "title": "Delegated Acts — High-Risk List Updates",
        "ref": "Art. 7(1)",
        "summary": "The European Commission is empowered to adopt delegated acts to update the list of high-risk AI systems in Annex III by adding or modifying use cases. Monitor updates as they may affect your system's classification."
      },
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "translations": {
        "fr": {
          "title": "HAUT RISQUE — Annexe I Section B (applicabilité limitée)",
          "summary": "Votre système d’IA est classé à haut risque au titre de l’art. 6(1) en tant que composant de sécurité d’un produit couvert par l’annexe I, section B. Toutefois, conformément à l’art. 2(2), seules des dispositions LIMITÉES du règlement sur l’IA s’appliquent aux produits de la section B.",
          "verdict": "HAUT RISQUE + RISQUE LIMITÉ",
          "recommendations": [
            "Comply with the AI-specific requirements integrated into your product's sectoral legislation (e.g., motor vehicle type-approval, aviation safety).",
            "The full AI Act à haut risque requirements (gestion des risques, data governance, etc.) do NOT apply directly — they apply through the sectoral legislation's own framework.",
            "Monitor developments in your sector's legislation for AI-specific mettre en œuvreing measures.",
            "AI literacy obligations (Art. 4) still apply since 2 February 2025.",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 août 2027",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          },
          "note_delegated_acts": {
            "title": "Actes délégués — Mises à jour de la liste des systèmes à haut risque",
            "ref": "Art. 7(1)",
            "summary": "La Commission européenne est habilitée à adopter des actes délégués pour mettre à jour la liste des systèmes d'IA à haut risque de l'annexe III en ajoutant ou modifiant des cas d'utilisation. Surveillez les mises à jour car elles peuvent affecter la classification de votre système."
          },
          "applicable_provisions": [
            {
              "ref": "Art. 6(1)",
              "summary": "High-risk classification rule"
            },
            {
              "ref": "Art. 102-109",
              "summary": "Amendments to sectoral legislation incorporating AI Act requirements"
            },
            {
              "ref": "Art. 112",
              "summary": "Evaluation and review provisions"
            },
            {
              "ref": "Art. 57",
              "summary": "Regulatory sandboxes — only insofar as AI Act requirements are integrated into the sectoral legislation"
            }
          ]
        },
        "nl": {
          "title": "HOOG RISICO — Bijlage I Sectie B (beperkte toepasselijkheid)",
          "summary": "Uw AI-systeem is geclassificeerd als hoog risico krachtens art. 6(1) als veiligheidscomponent van een product dat onder bijlage I, sectie B valt. Echter, op grond van art. 2(2) zijn slechts BEPERKTE bepalingen van de AI-verordening van toepassing op producten van sectie B.",
          "verdict": "HOOG RISICO + BEPERKT RISICO",
          "recommendations": [
            "Comply with the AI-specific requirements integrated into your product's sectoral legislation (e.g., motor vehicle type-approval, aviation safety).",
            "The full AI Act hoog risico requirements (risicobeheer, data governance, etc.) do NOT apply directly — they apply through the sectoral legislation's own framework.",
            "Monitor developments in your sector's legislation for AI-specific implementeering measures.",
            "AI literacy obligations (Art. 4) still apply since 2 February 2025.",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 augustus 2027",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          },
          "note_delegated_acts": {
            "title": "Gedelegeerde handelingen — Updates van de lijst van AI-systemen met een hoog risico",
            "ref": "Art. 7(1)",
            "summary": "De Europese Commissie is bevoegd om gedelegeerde handelingen vast te stellen om de lijst van AI-systemen met een hoog risico in bijlage III bij te werken door gebruikscategorieën toe te voegen of te wijzigen. Houd updates in de gaten, want deze kunnen de classificatie van uw systeem beïnvloeden."
          },
          "applicable_provisions": [
            {
              "ref": "Art. 6(1)",
              "summary": "High-risk classification rule"
            },
            {
              "ref": "Art. 102-109",
              "summary": "Amendments to sectoral legislation incorporating AI Act requirements"
            },
            {
              "ref": "Art. 112",
              "summary": "Evaluation and review provisions"
            },
            {
              "ref": "Art. 57",
              "summary": "Regulatory sandboxes — only insofar as AI Act requirements are integrated into the sectoral legislation"
            }
          ]
        },
        "de": {
          "title": "HOCHRISIKO — Anhang I Abschnitt B (eingeschränkte Anwendbarkeit)",
          "summary": "Ihr KI-System ist gemäß Art. 6(1) als Hochrisiko-System eingestuft, da es eine Sicherheitskomponente eines unter Anhang I, Abschnitt B fallenden Produkts ist. Gemäß Art. 2(2) gelten jedoch nur EINGESCHRÄNKTE Bestimmungen der KI-Verordnung für Produkte des Abschnitts B.",
          "verdict": "HOCHRISIKO + BEGRENZTES RISIKO",
          "recommendations": [
            "Comply with the AI-specific requirements integrated into your product's sectoral legislation (e.g., motor vehicle type-approval, aviation safety).",
            "The full KI-Gesetz Hochrisiko requirements (Risikomanagement, data governance, etc.) do NOT apply directly — they apply through the sectoral legislation's own framework.",
            "Monitor developments in your sector's legislation for AI-specific implementieren Sieing measures.",
            "AI literacy obligations (Art. 4) still apply since 2 February 2025.",
            "Other EU legislation may apply alongside or independently of the KI-Gesetz, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. August 2027",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          },
          "note_delegated_acts": {
            "title": "Delegierte Rechtsakte — Aktualisierungen der Hochrisiko-Liste",
            "ref": "Art. 7(1)",
            "summary": "Die Europäische Kommission ist ermächtigt, delegierte Rechtsakte zu erlassen, um die Liste der Hochrisiko-KI-Systeme in Anhang III durch Hinzufügen oder Ändern von Anwendungsfällen zu aktualisieren. Überwachen Sie Aktualisierungen, da diese die Klassifizierung Ihres Systems beeinflussen können."
          },
          "applicable_provisions": [
            {
              "ref": "Art. 6(1)",
              "summary": "High-risk classification rule"
            },
            {
              "ref": "Art. 102-109",
              "summary": "Amendments to sectoral legislation incorporating KI-Gesetz requirements"
            },
            {
              "ref": "Art. 112",
              "summary": "Evaluation and review provisions"
            },
            {
              "ref": "Art. 57",
              "summary": "Regulatory sandboxes — only insofar as KI-Gesetz requirements are integrated into the sectoral legislation"
            }
          ]
        },
        "es": {
          "title": "ALTO RIESGO — Anexo I Sección B (aplicabilidad limitada)",
          "summary": "Su sistema de IA está clasificado como de alto riesgo en virtud del art. 6(1) como componente de seguridad de un producto cubierto por el anexo I, sección B. Sin embargo, conforme al art. 2(2), solo se aplican disposiciones LIMITADAS del Reglamento de IA a los productos de la sección B.",
          "verdict": "ALTO RIESGO + RIESGO LIMITADO",
          "recommendations": [
            "Comply with the AI-specific requirements integrated into your product's sectoral legislation (e.g., motor vehicle type-approval, aviation safety).",
            "The full Ley de IA alto riesgo requirements (gestión de riesgos, data governance, etc.) do NOT apply directly — they apply through the sectoral legislation's own framework.",
            "Monitor developments in your sector's legislation for AI-specific implementeing measures.",
            "AI literacy obligations (Art. 4) still apply since 2 February 2025.",
            "Other EU legislation may apply alongside or independently of the Ley de IA, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de agosto de 2027",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          },
          "note_delegated_acts": {
            "title": "Actos delegados — Actualizaciones de la lista de alto riesgo",
            "ref": "Art. 7(1)",
            "summary": "La Comisión Europea está facultada para adoptar actos delegados para actualizar la lista de sistemas de IA de alto riesgo del anexo III, añadiendo o modificando casos de uso. Supervise las actualizaciones, ya que pueden afectar la clasificación de su sistema."
          },
          "applicable_provisions": [
            {
              "ref": "Art. 6(1)",
              "summary": "High-risk classification rule"
            },
            {
              "ref": "Art. 102-109",
              "summary": "Amendments to sectoral legislation incorporating Ley de IA requirements"
            },
            {
              "ref": "Art. 112",
              "summary": "Evaluation and review provisions"
            },
            {
              "ref": "Art. 57",
              "summary": "Regulatory sandboxes — only insofar as Ley de IA requirements are integrated into the sectoral legislation"
            }
          ]
        }
      },
      "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)"
    },
    "result_high_risk": {
      "id": "result_high_risk",
      "type": "result",
      "verdict": "HIGH_RISK",
      "verdict_class": "high-risk",
      "title": "HIGH-RISK AI System",
      "summary": "Your AI system is classified as HIGH-RISK under the EU AI Act. Significant compliance obligations apply.",
      "legal_ref": "Art. 6, Chapter III",
      "effective_date_annex_i": "2 August 2027 (Annex I / product safety route)",
      "effective_date_annex_iii": "2 August 2026 (Annex III / application areas route)",
      "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
      "obligations_provider": [
        {
          "id": "HR-P1",
          "title": "Risk Management System",
          "ref": "Art. 9",
          "summary": "Establish and maintain a risk management system throughout the AI system's lifecycle"
        },
        {
          "id": "HR-P2",
          "title": "Data & Data Governance",
          "ref": "Art. 10",
          "summary": "Ensure training, validation, and testing datasets meet quality criteria"
        },
        {
          "id": "HR-P3",
          "title": "Technical Documentation",
          "ref": "Art. 11, Annex IV",
          "summary": "Draw up and maintain technical documentation demonstrating compliance"
        },
        {
          "id": "HR-P4",
          "title": "Record-Keeping (Logging)",
          "ref": "Art. 12",
          "summary": "Enable automatic recording of events (logs) for traceability"
        },
        {
          "id": "HR-P5",
          "title": "Transparency & Instructions for Use",
          "ref": "Art. 13",
          "summary": "Design for transparency; provide instructions for use to deployers"
        },
        {
          "id": "HR-P6",
          "title": "Human Oversight",
          "ref": "Art. 14",
          "summary": "Design for effective human oversight during use"
        },
        {
          "id": "HR-P7",
          "title": "Accuracy, Robustness & Cybersecurity",
          "ref": "Art. 15",
          "summary": "Achieve appropriate levels of accuracy, robustness, and cybersecurity"
        },
        {
          "id": "HR-P8",
          "title": "Quality Management System",
          "ref": "Art. 17",
          "summary": "Put in place a quality management system"
        },
        {
          "id": "HR-P9",
          "title": "Conformity Assessment",
          "ref": "Art. 43, Annex VI/VII",
          "summary": "Undergo conformity assessment before placing on market"
        },
        {
          "id": "HR-P10",
          "title": "EU Declaration of Conformity",
          "ref": "Art. 47, Annex V",
          "summary": "Draw up EU declaration of conformity"
        },
        {
          "id": "HR-P11",
          "title": "CE Marking",
          "ref": "Art. 48",
          "summary": "Affix CE marking"
        },
        {
          "id": "HR-P12",
          "title": "EU Database Registration",
          "ref": "Art. 49, Annex VIII",
          "summary": "Register in the EU database before placing on market"
        },
        {
          "id": "HR-P13",
          "title": "Post-Market Monitoring",
          "ref": "Art. 72",
          "summary": "Establish post-market monitoring system"
        },
        {
          "id": "HR-P14",
          "title": "Serious Incident Reporting",
          "ref": "Art. 73",
          "summary": "Report serious incidents to market surveillance authorities"
        },
        {
          "id": "HR-P15",
          "title": "Documentation Retention",
          "ref": "Art. 18",
          "summary": "Keep documentation for 10 years after last system on market"
        },
        {
          "id": "P-ACC",
          "title": "Accessibility Requirements",
          "ref": "Art. 16(j)",
          "summary": "Ensure the AI system complies with accessibility requirements in accordance with Directives (EU) 2016/2102 and (EU) 2019/882, where applicable."
        }
      ],
      "obligations_deployer": [
        {
          "id": "HR-D1",
          "title": "Use per Instructions",
          "ref": "Art. 26(1)",
          "summary": "Use the system in accordance with provider's instructions"
        },
        {
          "id": "HR-D2",
          "title": "Human Oversight",
          "ref": "Art. 26(2)",
          "summary": "Assign competent, trained natural persons for human oversight"
        },
        {
          "id": "HR-D3",
          "title": "Input Data Quality",
          "ref": "Art. 26(4)",
          "summary": "Ensure input data is relevant and representative for intended purpose"
        },
        {
          "id": "HR-D4",
          "title": "Monitor Operation",
          "ref": "Art. 26(5)",
          "summary": "Monitor operation and inform provider/distributor of risks"
        },
        {
          "id": "HR-D5",
          "title": "Log Retention",
          "ref": "Art. 26(6)",
          "summary": "Keep automatically generated logs for minimum 6 months"
        },
        {
          "id": "HR-D6",
          "title": "Worker Information",
          "ref": "Art. 26(7)",
          "summary": "Inform workers' representatives and affected workers before deployment"
        },
        {
          "id": "HR-D7",
          "title": "Inform Affected Persons",
          "ref": "Art. 26(8)",
          "summary": "Inform natural persons subject to decisions by the AI system"
        },
        {
          "id": "HR-D8",
          "title": "FRIA (if public authority)",
          "ref": "Art. 27",
          "summary": "Conduct fundamental rights impact assessment before deployment (public authorities and certain private deployers). Note: Art. 27(10) excludes high-risk AI systems used in the area of critical infrastructure (Annex III, point 2) from the FRIA obligation."
        },
        {
          "id": "HR-D9",
          "title": "EU Database Registration (if public authority)",
          "ref": "Art. 49",
          "summary": "Register use in the EU database (public authorities)"
        },
        {
          "id": "D-DPIA",
          "title": "Data Protection Impact Assessment",
          "ref": "Art. 26(10)",
          "summary": "Carry out a DPIA under Regulation (EU) 2016/679 or Directive (EU) 2016/680, using the output of the FRIA as input where applicable."
        }
      ],
      "obligations_importer": [
        {
          "id": "HR-I1",
          "title": "Verify Compliance",
          "ref": "Art. 23",
          "summary": "Verify conformity assessment, CE marking, documentation, and provider obligations before placing on market"
        }
      ],
      "obligations_distributor": [
        {
          "id": "HR-Dist1",
          "title": "Verify Compliance",
          "ref": "Art. 24",
          "summary": "Verify CE marking, documentation; ensure storage/transport conditions don't jeopardise compliance"
        }
      ],
      "obligations_transparency_cumulative": {
        "note": "Art. 50 transparency obligations apply CUMULATIVELY with high-risk requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
        "possible_obligations": [
          {
            "id": "HR-T1",
            "title": "AI Interaction Disclosure",
            "ref": "Art. 50(1)",
            "condition": "chatbot_disclosure",
            "who": "Provider",
            "summary": "Inform persons they are interacting with AI"
          },
          {
            "id": "HR-T2",
            "title": "Synthetic Content Marking",
            "ref": "Art. 50(2)",
            "condition": "synthetic_content_marking",
            "who": "Provider",
            "summary": "Mark outputs in machine-readable format as AI-generated"
          },
          {
            "id": "HR-T3",
            "title": "Emotion/Biometric Disclosure",
            "ref": "Art. 50(3)",
            "condition": "emotion_biometric_disclosure",
            "who": "Deployer",
            "summary": "Inform persons exposed to emotion recognition or biometric categorisation"
          },
          {
            "id": "HR-T4",
            "title": "Deep Fake Disclosure",
            "ref": "Art. 50(4)",
            "condition": "deepfake_disclosure",
            "who": "Deployer",
            "summary": "Disclose that content is AI-generated or manipulated"
          },
          {
            "id": "HR-T5",
            "title": "AI-Generated Text Disclosure",
            "ref": "Art. 50(4)",
            "condition": "ai_generated_text_disclosure",
            "who": "Deployer",
            "summary": "Disclose AI-generated text on public interest matters"
          }
        ]
      },
      "gpai_obligations_if_applicable": {
        "note": "If this high-risk AI system integrates a GPAI model and you are also the GPAI model provider, additional GPAI obligations apply under Art. 53-55. Check is_gpai_model attribute.",
        "cross_reference": "result_gpai"
      },
      "ai_literacy": {
        "title": "AI Literacy",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "Ensure sufficient AI literacy of staff dealing with AI systems — applies to ALL operators"
      },
      "timeline": {
        "annex_iii_systems": "Obligations apply from 2 August 2026",
        "annex_i_systems": "Obligations apply from 2 August 2027",
        "existing_systems": "Systems on market before applicable date: only if significant design changes after that date",
        "large_scale_it": "Large-scale IT systems (Annex X): compliance by 31 December 2030",
        "public_authority_existing_systems": "Public authority deployers of existing high-risk AI systems must comply by 2 August 2030 (Art. 111(2))"
      },
      "previous": "q_role_result_router",
      "note_delegated_acts": {
        "title": "Delegated Acts — High-Risk List Updates",
        "ref": "Art. 7(1)",
        "summary": "The European Commission is empowered to adopt delegated acts to update the list of high-risk AI systems in Annex III by adding or modifying use cases. The Commission must take into account criteria including the intended purpose, severity of harm, degree of autonomy, and number of affected persons. Monitor updates to Annex III as they may affect your system's classification."
      },
      "recommendations": [
        "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
      ],
      "sector_specific_guidance": {
        "healthcare_medical_devices": {
          "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
          "documents": [
            "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformity assessment"
          ],
          "key_points": [
            "Medical device AI requiring third-party CA under MDR/IVDR is high-risk under Art. 6(1)",
            "A single notified body can handle both MDR and AI Act conformity assessment",
            "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
          ]
        },
        "financial_services": {
          "title": "Sector-Specific Guidance: Banking / Financial Services",
          "documents": [
            "E1: AI Act implications for banking/payments (EBA, 2025)",
            "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
            "E5: AI Act overview for financial services (Eurofi, 2024)"
          ],
          "key_points": [
            "Credit scoring / creditworthiness AI is high-risk under Annex III, point 5(b)",
            "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
            "Fraud detection AI is explicitly excluded from the credit scoring high-risk category"
          ]
        },
        "insurance": {
          "title": "Sector-Specific Guidance: Insurance",
          "documents": [
            "E4: Insurance AI governance opinion (EIOPA, 2025)"
          ],
          "key_points": [
            "Life/health insurance risk assessment and pricing AI is high-risk under Annex III, point 5(c)",
            "EIOPA recommends risk-based governance for ALL insurance AI, including non-high-risk",
            "Fairness metrics required: demographic parity, equalized odds, predictive parity"
          ]
        },
        "energy": {
          "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
          "documents": [
            "F1: Energy sector high-risk classification consultation (Eurelectric, 2025)",
            "F2: AI and the energy sector briefing (EPRS, 2025)"
          ],
          "key_points": [
            "'Safety component' should be limited to systems that directly impact infrastructure safety",
            "Predictive maintenance typically NOT high-risk (enhances existing safety, doesn't replace it)",
            "Smart grid management may be high-risk if it has direct safety impact — case-by-case"
          ]
        }
      },
      "fria_guidance": {
        "title": "FRIA Guidance",
        "documents": [
          "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
        ],
        "who_must_conduct": [
          "Public authorities / bodies governed by public law",
          "Private entities providing essential public services",
          "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
        ],
        "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
      },
      "translations": {
        "fr": {
          "title": "Système d’IA à HAUT RISQUE",
          "summary": "Votre système d’IA est classé à HAUT RISQUE au titre du règlement européen sur l’IA. Des obligations de conformité importantes s’appliquent.",
          "verdict": "HAUT RISQUE",
          "recommendations": [
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date_annex_i": "2 août 2027 (Annex I / product safety route)",
          "effective_date_annex_iii": "2 août 2026 (Annex III / application areas route)",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          },
          "note_delegated_acts": {
            "title": "Actes délégués — Mises à jour de la liste des systèmes à haut risque",
            "ref": "Art. 7(1)",
            "summary": "La Commission européenne est habilitée à adopter des actes délégués pour mettre à jour la liste des systèmes d'IA à haut risque de l'annexe III en ajoutant ou modifiant des cas d'utilisation. Surveillez les mises à jour car elles peuvent affecter la classification de votre système."
          },
          "obligations_provider": [
            {
              "id": "HR-P1",
              "title": "Système de gestion des risques",
              "ref": "Art. 9",
              "summary": "Établir et maintenir un système de gestion des risques tout au long du cycle de vie du système d'IA"
            },
            {
              "id": "HR-P2",
              "title": "Gouvernance des données",
              "ref": "Art. 10",
              "summary": "Veiller à ce que les données d'entraînement, de validation et de test respectent les critères de qualité"
            },
            {
              "id": "HR-P3",
              "title": "Documentation technique",
              "ref": "Art. 11, Annex IV",
              "summary": "Rédiger la documentation technique avant la mise sur le marché et la tenir à jour"
            },
            {
              "id": "HR-P4",
              "title": "Journalisation automatique",
              "ref": "Art. 12",
              "summary": "Veiller à ce que le système d'IA dispose de capacités de journalisation automatique"
            },
            {
              "id": "HR-P5",
              "title": "Transparence et information",
              "ref": "Art. 13",
              "summary": "S'assurer que le système est transparent et fournir des instructions d'utilisation aux déployeurs"
            },
            {
              "id": "HR-P6",
              "title": "Contrôle humain",
              "ref": "Art. 14",
              "summary": "Concevoir le système de manière à permettre un contrôle humain effectif"
            },
            {
              "id": "HR-P7",
              "title": "Exactitude, robustesse et cybersécurité",
              "ref": "Art. 15",
              "summary": "Concevoir et développer le système pour atteindre des niveaux appropriés d'exactitude, de robustesse et de cybersécurité"
            },
            {
              "id": "HR-P8",
              "title": "Système de gestion de la qualité",
              "ref": "Art. 17",
              "summary": "Mettre en place un système de gestion de la qualité"
            },
            {
              "id": "HR-P9",
              "title": "Évaluation de conformité",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "Effectuer l'évaluation de conformité applicable avant la mise sur le marché"
            },
            {
              "id": "HR-P10",
              "title": "Déclaration de conformité UE",
              "ref": "Art. 47, Annex V",
              "summary": "Rédiger la déclaration de conformité UE et apposer le marquage CE"
            },
            {
              "id": "HR-P11",
              "title": "Enregistrement dans la base de données de l'UE",
              "ref": "Art. 48",
              "summary": "Enregistrer le système dans la base de données de l'UE avant la mise sur le marché"
            },
            {
              "id": "HR-P12",
              "title": "Mesures correctives et rappel",
              "ref": "Art. 49, Annex VIII",
              "summary": "Prendre des mesures correctives, informer les déployeurs et les autorités de tout non-conformité"
            },
            {
              "id": "HR-P13",
              "title": "Coopération avec les autorités",
              "ref": "Art. 72",
              "summary": "Coopérer avec les autorités nationales compétentes et fournir les informations demandées"
            },
            {
              "id": "HR-P14",
              "title": "Accessibilité",
              "ref": "Art. 73",
              "summary": "Respecter les exigences d'accessibilité conformément aux directives applicables"
            },
            {
              "id": "HR-P15",
              "title": "Surveillance après commercialisation",
              "ref": "Art. 18",
              "summary": "Mettre en place un système de surveillance après commercialisation proportionné"
            },
            {
              "id": "P-ACC",
              "title": "Accessibilité (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Veiller à ce que le système d'IA respecte les exigences d'accessibilité"
            }
          ],
          "obligations_deployer": [
            {
              "id": "HR-D1",
              "title": "Utilisation conforme aux instructions",
              "ref": "Art. 26(1)",
              "summary": "Utiliser le système conformément aux instructions d'utilisation du fournisseur"
            },
            {
              "id": "HR-D2",
              "title": "Contrôle humain",
              "ref": "Art. 26(2)",
              "summary": "S'assurer que les personnes chargées du contrôle humain sont compétentes et habilitées"
            },
            {
              "id": "HR-D3",
              "title": "Pertinence des données d'entrée",
              "ref": "Art. 26(4)",
              "summary": "Veiller à la pertinence des données d'entrée au regard de la finalité du système"
            },
            {
              "id": "HR-D4",
              "title": "Surveillance du fonctionnement",
              "ref": "Art. 26(5)",
              "summary": "Surveiller le fonctionnement du système sur la base des instructions d'utilisation"
            },
            {
              "id": "HR-D5",
              "title": "Conservation des journaux",
              "ref": "Art. 26(6)",
              "summary": "Conserver les journaux générés automatiquement dans la mesure où ils sont sous leur contrôle"
            },
            {
              "id": "HR-D6",
              "title": "Information des travailleurs",
              "ref": "Art. 26(7)",
              "summary": "Informer les représentants des travailleurs et les travailleurs concernés de l'utilisation du système"
            },
            {
              "id": "HR-D7",
              "title": "Coopération avec les autorités",
              "ref": "Art. 26(8)",
              "summary": "Coopérer avec les autorités nationales compétentes"
            },
            {
              "id": "HR-D8",
              "title": "Analyse d'impact sur les droits fondamentaux (FRIA)",
              "ref": "Art. 27",
              "summary": "Réaliser une analyse d'impact sur les droits fondamentaux avant le déploiement (Art. 27). Remarque : l'Art. 27(10) exclut les systèmes de l'annexe III, point 2 (infrastructures critiques) de cette obligation."
            },
            {
              "id": "HR-D9",
              "title": "Enregistrement d'utilisation dans la base de données de l'UE",
              "ref": "Art. 49",
              "summary": "Enregistrer l'utilisation du système dans la base de données de l'UE"
            },
            {
              "id": "D-DPIA",
              "title": "Analyse d'impact relative à la protection des données (AIPD)",
              "ref": "Art. 26(10)",
              "summary": "Réaliser une AIPD conformément au règlement (UE) 2016/679 ou à la directive (UE) 2016/680"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Vérifier la conformité",
              "ref": "Art. 23",
              "summary": "Vérifier que le fournisseur a effectué l'évaluation de conformité et établi la documentation technique"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Vérifier la conformité",
              "ref": "Art. 24",
              "summary": "Vérifier que le système porte le marquage CE et est accompagné de la documentation requise"
            }
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 obligations de transparence apply CUMULATIVELY with à haut risque requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "Divulgation d'interaction avec l'IA",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Fournisseur",
                "summary": "Concevoir le système pour informer les personnes qu'elles interagissent avec une IA (sauf si c'est évident du contexte)"
              },
              {
                "id": "HR-T2",
                "title": "Marquage du contenu synthétique",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Fournisseur",
                "summary": "Marquer les sorties de contenu synthétique (images, audio, vidéo) dans un format lisible par machine"
              },
              {
                "id": "HR-T3",
                "title": "Divulgation de reconnaissance d'émotions / biométrique",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Déployeur",
                "summary": "Informer les personnes de l'utilisation de systèmes de reconnaissance d'émotions ou de catégorisation biométrique"
              },
              {
                "id": "HR-T4",
                "title": "Divulgation de deepfake",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Déployeur",
                "summary": "Divulguer que le contenu a été généré ou manipulé artificiellement (deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Divulgation de texte généré par IA",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Déployeur",
                "summary": "Indiquer que le texte a été généré par IA lorsqu'il est publié pour informer le public"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual évaluation de conformité"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is à haut risque under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act évaluation de conformité",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is à haut risque under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring à haut risque category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is à haut risque under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-à haut risque",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector à haut risque classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT à haut risque (enhances existing safety, doesn't replace it)",
                "Smart grid management may be à haut risque if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        },
        "nl": {
          "title": "HOOG RISICO AI-systeem",
          "summary": "Uw AI-systeem is geclassificeerd als HOOG RISICO krachtens de EU AI-verordening. Er gelden aanzienlijke nalevingsverplichtingen.",
          "verdict": "HOOG RISICO",
          "recommendations": [
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date_annex_i": "2 augustus 2027 (Annex I / product safety route)",
          "effective_date_annex_iii": "2 augustus 2026 (Annex III / application areas route)",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          },
          "note_delegated_acts": {
            "title": "Gedelegeerde handelingen — Updates van de lijst van AI-systemen met een hoog risico",
            "ref": "Art. 7(1)",
            "summary": "De Europese Commissie is bevoegd om gedelegeerde handelingen vast te stellen om de lijst van AI-systemen met een hoog risico in bijlage III bij te werken door gebruikscategorieën toe te voegen of te wijzigen. Houd updates in de gaten, want deze kunnen de classificatie van uw systeem beïnvloeden."
          },
          "obligations_provider": [
            {
              "id": "HR-P1",
              "title": "Risicobeheersysteem",
              "ref": "Art. 9",
              "summary": "Een risicobeheersysteem opzetten en onderhouden gedurende de gehele levenscyclus van het AI-systeem"
            },
            {
              "id": "HR-P2",
              "title": "Datagovernance",
              "ref": "Art. 10",
              "summary": "Ervoor zorgen dat trainings-, validatie- en testgegevens voldoen aan kwaliteitscriteria"
            },
            {
              "id": "HR-P3",
              "title": "Technische documentatie",
              "ref": "Art. 11, Annex IV",
              "summary": "Technische documentatie opstellen vóór het in de handel brengen en deze bijwerken"
            },
            {
              "id": "HR-P4",
              "title": "Automatische registratie",
              "ref": "Art. 12",
              "summary": "Ervoor zorgen dat het AI-systeem over mogelijkheden voor automatische registratie beschikt"
            },
            {
              "id": "HR-P5",
              "title": "Transparantie en informatieverstrekking",
              "ref": "Art. 13",
              "summary": "Ervoor zorgen dat het systeem transparant is en gebruiksinstructies aan gebruiksverantwoordelijken verstrekken"
            },
            {
              "id": "HR-P6",
              "title": "Menselijk toezicht",
              "ref": "Art. 14",
              "summary": "Het systeem zodanig ontwerpen dat effectief menselijk toezicht mogelijk is"
            },
            {
              "id": "HR-P7",
              "title": "Nauwkeurigheid, robuustheid en cyberveiligheid",
              "ref": "Art. 15",
              "summary": "Het systeem ontwerpen en ontwikkelen om passende niveaus van nauwkeurigheid, robuustheid en cyberveiligheid te bereiken"
            },
            {
              "id": "HR-P8",
              "title": "Kwaliteitsmanagementsysteem",
              "ref": "Art. 17",
              "summary": "Een kwaliteitsmanagementsysteem opzetten"
            },
            {
              "id": "HR-P9",
              "title": "Conformiteitsbeoordeling",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "De toepasselijke conformiteitsbeoordeling uitvoeren vóór het in de handel brengen"
            },
            {
              "id": "HR-P10",
              "title": "EU-conformiteitsverklaring",
              "ref": "Art. 47, Annex V",
              "summary": "De EU-conformiteitsverklaring opstellen en de CE-markering aanbrengen"
            },
            {
              "id": "HR-P11",
              "title": "Registratie in de EU-databank",
              "ref": "Art. 48",
              "summary": "Het systeem registreren in de EU-databank vóór het in de handel brengen"
            },
            {
              "id": "HR-P12",
              "title": "Corrigerende maatregelen en terugroeping",
              "ref": "Art. 49, Annex VIII",
              "summary": "Corrigerende maatregelen nemen, gebruiksverantwoordelijken en autoriteiten informeren over non-conformiteit"
            },
            {
              "id": "HR-P13",
              "title": "Samenwerking met autoriteiten",
              "ref": "Art. 72",
              "summary": "Samenwerken met de bevoegde nationale autoriteiten en gevraagde informatie verstrekken"
            },
            {
              "id": "HR-P14",
              "title": "Toegankelijkheid",
              "ref": "Art. 73",
              "summary": "Voldoen aan de toegankelijkheidsvereisten overeenkomstig de toepasselijke richtlijnen"
            },
            {
              "id": "HR-P15",
              "title": "Monitoring na het in de handel brengen",
              "ref": "Art. 18",
              "summary": "Een evenredig monitoringsysteem na het in de handel brengen opzetten"
            },
            {
              "id": "P-ACC",
              "title": "Toegankelijkheid (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Ervoor zorgen dat het AI-systeem voldoet aan de toegankelijkheidsvereisten"
            }
          ],
          "obligations_deployer": [
            {
              "id": "HR-D1",
              "title": "Gebruik volgens instructies",
              "ref": "Art. 26(1)",
              "summary": "Het systeem gebruiken in overeenstemming met de gebruiksinstructies van de aanbieder"
            },
            {
              "id": "HR-D2",
              "title": "Menselijk toezicht",
              "ref": "Art. 26(2)",
              "summary": "Ervoor zorgen dat de personen belast met menselijk toezicht bekwaam en bevoegd zijn"
            },
            {
              "id": "HR-D3",
              "title": "Relevantie invoergegevens",
              "ref": "Art. 26(4)",
              "summary": "Zorgen voor de relevantie van invoergegevens met betrekking tot het beoogde doel van het systeem"
            },
            {
              "id": "HR-D4",
              "title": "Monitoring van de werking",
              "ref": "Art. 26(5)",
              "summary": "De werking van het systeem monitoren op basis van de gebruiksinstructies"
            },
            {
              "id": "HR-D5",
              "title": "Bewaring van logbestanden",
              "ref": "Art. 26(6)",
              "summary": "De automatisch gegenereerde logbestanden bewaren voor zover deze onder hun controle vallen"
            },
            {
              "id": "HR-D6",
              "title": "Informatie aan werknemers",
              "ref": "Art. 26(7)",
              "summary": "De werknemersvertegenwoordigers en betrokken werknemers informeren over het gebruik van het systeem"
            },
            {
              "id": "HR-D7",
              "title": "Samenwerking met autoriteiten",
              "ref": "Art. 26(8)",
              "summary": "Samenwerken met de bevoegde nationale autoriteiten"
            },
            {
              "id": "HR-D8",
              "title": "Effectbeoordeling grondrechten (FRIA)",
              "ref": "Art. 27",
              "summary": "Een effectbeoordeling grondrechten uitvoeren vóór de inzet (Art. 27). Opmerking: Art. 27(10) sluit systemen van bijlage III, punt 2 (kritieke infrastructuur) uit van deze verplichting."
            },
            {
              "id": "HR-D9",
              "title": "Registratie van gebruik in de EU-databank",
              "ref": "Art. 49",
              "summary": "Het gebruik van het systeem registreren in de EU-databank"
            },
            {
              "id": "D-DPIA",
              "title": "Gegevensbeschermingseffectbeoordeling (DPIA)",
              "ref": "Art. 26(10)",
              "summary": "Een DPIA uitvoeren overeenkomstig Verordening (EU) 2016/679 of Richtlijn (EU) 2016/680"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Conformiteit verifiëren",
              "ref": "Art. 23",
              "summary": "Controleren of de aanbieder de conformiteitsbeoordeling heeft uitgevoerd en de technische documentatie heeft opgesteld"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Conformiteit verifiëren",
              "ref": "Art. 24",
              "summary": "Controleren of het systeem de CE-markering draagt en vergezeld gaat van de vereiste documentatie"
            }
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 transparantieverplichtingen apply CUMULATIVELY with hoog risico requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "AI-interactie-openbaarmaking",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Aanbieder",
                "summary": "Het systeem ontwerpen om personen te informeren dat zij met een AI communiceren (tenzij dit uit de context duidelijk is)"
              },
              {
                "id": "HR-T2",
                "title": "Markering van synthetische inhoud",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Aanbieder",
                "summary": "Synthetische inhoud (afbeeldingen, audio, video) markeren in een machineleesbaar formaat"
              },
              {
                "id": "HR-T3",
                "title": "Openbaarmaking emotieherkenning / biometrie",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Gebruiksverantwoordelijke",
                "summary": "Personen informeren over het gebruik van emotieherkennings- of biometrische categoriseringssystemen"
              },
              {
                "id": "HR-T4",
                "title": "Deepfake-openbaarmaking",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Gebruiksverantwoordelijke",
                "summary": "Bekendmaken dat de inhoud kunstmatig is gegenereerd of gemanipuleerd (deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Openbaarmaking van AI-gegenereerde tekst",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Gebruiksverantwoordelijke",
                "summary": "Vermelden dat de tekst door AI is gegenereerd wanneer deze wordt gepubliceerd om het publiek te informeren"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformiteitsbeoordeling"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is hoog risico under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act conformiteitsbeoordeling",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is hoog risico under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring hoog risico category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is hoog risico under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-hoog risico",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector hoog risico classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT hoog risico (enhances existing safety, doesn't replace it)",
                "Smart grid management may be hoog risico if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        },
        "de": {
          "title": "HOCHRISIKO-KI-System",
          "summary": "Ihr KI-System ist als HOCHRISIKO gemäß der EU-KI-Verordnung eingestuft. Es gelten erhebliche Compliance-Pflichten.",
          "verdict": "HOCHRISIKO",
          "recommendations": [
            "Other EU legislation may apply alongside or independently of the KI-Gesetz, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date_annex_i": "2. August 2027 (Annex I / product safety route)",
          "effective_date_annex_iii": "2. August 2026 (Annex III / application areas route)",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          },
          "note_delegated_acts": {
            "title": "Delegierte Rechtsakte — Aktualisierungen der Hochrisiko-Liste",
            "ref": "Art. 7(1)",
            "summary": "Die Europäische Kommission ist ermächtigt, delegierte Rechtsakte zu erlassen, um die Liste der Hochrisiko-KI-Systeme in Anhang III durch Hinzufügen oder Ändern von Anwendungsfällen zu aktualisieren. Überwachen Sie Aktualisierungen, da diese die Klassifizierung Ihres Systems beeinflussen können."
          },
          "obligations_provider": [
            {
              "id": "HR-P1",
              "title": "Risikomanagementsystem",
              "ref": "Art. 9",
              "summary": "Ein Risikomanagementsystem während des gesamten Lebenszyklus des KI-Systems einrichten und aufrechterhalten"
            },
            {
              "id": "HR-P2",
              "title": "Daten-Governance",
              "ref": "Art. 10",
              "summary": "Sicherstellen, dass Trainings-, Validierungs- und Testdaten die Qualitätskriterien erfüllen"
            },
            {
              "id": "HR-P3",
              "title": "Technische Dokumentation",
              "ref": "Art. 11, Annex IV",
              "summary": "Technische Dokumentation vor dem Inverkehrbringen erstellen und aktuell halten"
            },
            {
              "id": "HR-P4",
              "title": "Automatische Protokollierung",
              "ref": "Art. 12",
              "summary": "Sicherstellen, dass das KI-System über automatische Protokollierungsfunktionen verfügt"
            },
            {
              "id": "HR-P5",
              "title": "Transparenz und Informationsbereitstellung",
              "ref": "Art. 13",
              "summary": "Sicherstellen, dass das System transparent ist, und den Betreibern Gebrauchsanweisungen bereitstellen"
            },
            {
              "id": "HR-P6",
              "title": "Menschliche Aufsicht",
              "ref": "Art. 14",
              "summary": "Das System so zu gestalten, dass eine wirksame menschliche Aufsicht ermöglicht wird"
            },
            {
              "id": "HR-P7",
              "title": "Genauigkeit, Robustheit und Cybersicherheit",
              "ref": "Art. 15",
              "summary": "Das System so zu konzipieren und zu entwickeln, dass angemessene Genauigkeit, Robustheit und Cybersicherheit erreicht werden"
            },
            {
              "id": "HR-P8",
              "title": "Qualitätsmanagementsystem",
              "ref": "Art. 17",
              "summary": "Ein Qualitätsmanagementsystem einrichten"
            },
            {
              "id": "HR-P9",
              "title": "Konformitätsbewertung",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "Die anwendbare Konformitätsbewertung vor dem Inverkehrbringen durchführen"
            },
            {
              "id": "HR-P10",
              "title": "EU-Konformitätserklärung",
              "ref": "Art. 47, Annex V",
              "summary": "Die EU-Konformitätserklärung erstellen und die CE-Kennzeichnung anbringen"
            },
            {
              "id": "HR-P11",
              "title": "Registrierung in der EU-Datenbank",
              "ref": "Art. 48",
              "summary": "Das System vor dem Inverkehrbringen in der EU-Datenbank registrieren"
            },
            {
              "id": "HR-P12",
              "title": "Korrekturmaßnahmen und Rückruf",
              "ref": "Art. 49, Annex VIII",
              "summary": "Korrekturmaßnahmen ergreifen, Betreiber und Behörden über Nichtkonformität informieren"
            },
            {
              "id": "HR-P13",
              "title": "Zusammenarbeit mit Behörden",
              "ref": "Art. 72",
              "summary": "Mit den zuständigen nationalen Behörden zusammenarbeiten und angeforderte Informationen bereitstellen"
            },
            {
              "id": "HR-P14",
              "title": "Barrierefreiheit",
              "ref": "Art. 73",
              "summary": "Barrierefreiheitsanforderungen gemäß den geltenden Richtlinien einhalten"
            },
            {
              "id": "HR-P15",
              "title": "Überwachung nach dem Inverkehrbringen",
              "ref": "Art. 18",
              "summary": "Ein verhältnismäßiges Überwachungssystem nach dem Inverkehrbringen einrichten"
            },
            {
              "id": "P-ACC",
              "title": "Barrierefreiheit (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Sicherstellen, dass das KI-System die Barrierefreiheitsanforderungen erfüllt"
            }
          ],
          "obligations_deployer": [
            {
              "id": "HR-D1",
              "title": "Nutzung gemäß Anweisungen",
              "ref": "Art. 26(1)",
              "summary": "Das System gemäß den Gebrauchsanweisungen des Anbieters verwenden"
            },
            {
              "id": "HR-D2",
              "title": "Menschliche Aufsicht",
              "ref": "Art. 26(2)",
              "summary": "Sicherstellen, dass die mit der menschlichen Aufsicht betrauten Personen kompetent und befugt sind"
            },
            {
              "id": "HR-D3",
              "title": "Relevanz der Eingabedaten",
              "ref": "Art. 26(4)",
              "summary": "Die Relevanz der Eingabedaten im Hinblick auf den Zweck des Systems sicherstellen"
            },
            {
              "id": "HR-D4",
              "title": "Überwachung des Betriebs",
              "ref": "Art. 26(5)",
              "summary": "Den Betrieb des Systems auf Grundlage der Gebrauchsanweisungen überwachen"
            },
            {
              "id": "HR-D5",
              "title": "Aufbewahrung von Protokollen",
              "ref": "Art. 26(6)",
              "summary": "Die automatisch generierten Protokolle aufbewahren, soweit diese unter ihrer Kontrolle stehen"
            },
            {
              "id": "HR-D6",
              "title": "Information der Arbeitnehmer",
              "ref": "Art. 26(7)",
              "summary": "Arbeitnehmervertreter und betroffene Arbeitnehmer über die Nutzung des Systems informieren"
            },
            {
              "id": "HR-D7",
              "title": "Zusammenarbeit mit Behörden",
              "ref": "Art. 26(8)",
              "summary": "Mit den zuständigen nationalen Behörden zusammenarbeiten"
            },
            {
              "id": "HR-D8",
              "title": "Grundrechte-Folgenabschätzung (FRIA)",
              "ref": "Art. 27",
              "summary": "Vor dem Einsatz eine Grundrechte-Folgenabschätzung durchführen (Art. 27). Hinweis: Art. 27(10) nimmt Systeme nach Anhang III Nr. 2 (kritische Infrastruktur) von dieser Pflicht aus."
            },
            {
              "id": "HR-D9",
              "title": "Nutzungsregistrierung in der EU-Datenbank",
              "ref": "Art. 49",
              "summary": "Die Nutzung des Systems in der EU-Datenbank registrieren"
            },
            {
              "id": "D-DPIA",
              "title": "Datenschutz-Folgenabschätzung (DSFA)",
              "ref": "Art. 26(10)",
              "summary": "Eine DSFA gemäß Verordnung (EU) 2016/679 oder Richtlinie (EU) 2016/680 durchführen"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Konformität überprüfen",
              "ref": "Art. 23",
              "summary": "Überprüfen, dass der Anbieter die Konformitätsbewertung durchgeführt und die technische Dokumentation erstellt hat"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Konformität überprüfen",
              "ref": "Art. 24",
              "summary": "Überprüfen, dass das System die CE-Kennzeichnung trägt und die erforderliche Dokumentation beiliegt"
            }
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 Transparenzpflichten apply CUMULATIVELY with Hochrisiko requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "KI-Interaktionsoffenlegung",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Anbieter",
                "summary": "Das System so zu gestalten, dass Personen darüber informiert werden, dass sie mit einer KI interagieren (sofern dies nicht aus dem Kontext ersichtlich ist)"
              },
              {
                "id": "HR-T2",
                "title": "Kennzeichnung synthetischer Inhalte",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Anbieter",
                "summary": "Synthetische Inhalte (Bilder, Audio, Video) in einem maschinenlesbaren Format kennzeichnen"
              },
              {
                "id": "HR-T3",
                "title": "Offenlegung von Emotionserkennung / Biometrie",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Betreiber",
                "summary": "Personen über die Verwendung von Emotionserkennungs- oder biometrischen Kategorisierungssystemen informieren"
              },
              {
                "id": "HR-T4",
                "title": "Deepfake-Offenlegung",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Betreiber",
                "summary": "Offenlegen, dass der Inhalt künstlich erzeugt oder manipuliert wurde (Deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Offenlegung von KI-generiertem Text",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Betreiber",
                "summary": "Angeben, dass der Text von KI generiert wurde, wenn er veröffentlicht wird, um die Öffentlichkeit zu informieren"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: KI-Gesetz + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual Konformitätsbewertung"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is Hochrisiko under Art. 6(1)",
                "A single notified body can handle both MDR and KI-Gesetz Konformitätsbewertung",
                "Clinical evaluation data may satisfy some KI-Gesetz requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: KI-Gesetz implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: KI-Gesetz overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is Hochrisiko under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers KI-Gesetz requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring Hochrisiko category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is Hochrisiko under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-Hochrisiko",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector Hochrisiko classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT Hochrisiko (enhances existing safety, doesn't replace it)",
                "Smart grid management may be Hochrisiko if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        },
        "es": {
          "title": "Sistema de IA de ALTO RIESGO",
          "summary": "Su sistema de IA está clasificado como de ALTO RIESGO en virtud del Reglamento Europeo de IA. Se aplican obligaciones de cumplimiento significativas.",
          "verdict": "ALTO RIESGO",
          "recommendations": [
            "Other EU legislation may apply alongside or independently of the Ley de IA, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date_annex_i": "2 de agosto de 2027 (Annex I / product safety route)",
          "effective_date_annex_iii": "2 de agosto de 2026 (Annex III / application areas route)",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          },
          "note_delegated_acts": {
            "title": "Actos delegados — Actualizaciones de la lista de alto riesgo",
            "ref": "Art. 7(1)",
            "summary": "La Comisión Europea está facultada para adoptar actos delegados para actualizar la lista de sistemas de IA de alto riesgo del anexo III, añadiendo o modificando casos de uso. Supervise las actualizaciones, ya que pueden afectar la clasificación de su sistema."
          },
          "obligations_provider": [
            {
              "id": "HR-P1",
              "title": "Sistema de gestión de riesgos",
              "ref": "Art. 9",
              "summary": "Establecer y mantener un sistema de gestión de riesgos a lo largo del ciclo de vida del sistema de IA"
            },
            {
              "id": "HR-P2",
              "title": "Gobernanza de datos",
              "ref": "Art. 10",
              "summary": "Garantizar que los datos de entrenamiento, validación y prueba cumplan los criterios de calidad"
            },
            {
              "id": "HR-P3",
              "title": "Documentación técnica",
              "ref": "Art. 11, Annex IV",
              "summary": "Elaborar la documentación técnica antes de la comercialización y mantenerla actualizada"
            },
            {
              "id": "HR-P4",
              "title": "Registro automático",
              "ref": "Art. 12",
              "summary": "Garantizar que el sistema de IA disponga de capacidades de registro automático"
            },
            {
              "id": "HR-P5",
              "title": "Transparencia e información",
              "ref": "Art. 13",
              "summary": "Asegurar que el sistema sea transparente y proporcionar instrucciones de uso a los implementadores"
            },
            {
              "id": "HR-P6",
              "title": "Supervisión humana",
              "ref": "Art. 14",
              "summary": "Diseñar el sistema de manera que permita una supervisión humana efectiva"
            },
            {
              "id": "HR-P7",
              "title": "Precisión, robustez y ciberseguridad",
              "ref": "Art. 15",
              "summary": "Diseñar y desarrollar el sistema para alcanzar niveles apropiados de precisión, robustez y ciberseguridad"
            },
            {
              "id": "HR-P8",
              "title": "Sistema de gestión de calidad",
              "ref": "Art. 17",
              "summary": "Implementar un sistema de gestión de calidad"
            },
            {
              "id": "HR-P9",
              "title": "Evaluación de conformidad",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "Realizar la evaluación de conformidad aplicable antes de la comercialización"
            },
            {
              "id": "HR-P10",
              "title": "Declaración de conformidad de la UE",
              "ref": "Art. 47, Annex V",
              "summary": "Elaborar la declaración de conformidad de la UE y colocar el marcado CE"
            },
            {
              "id": "HR-P11",
              "title": "Registro en la base de datos de la UE",
              "ref": "Art. 48",
              "summary": "Registrar el sistema en la base de datos de la UE antes de su comercialización"
            },
            {
              "id": "HR-P12",
              "title": "Medidas correctivas y retirada",
              "ref": "Art. 49, Annex VIII",
              "summary": "Adoptar medidas correctivas, informar a los implementadores y autoridades de cualquier no conformidad"
            },
            {
              "id": "HR-P13",
              "title": "Cooperación con las autoridades",
              "ref": "Art. 72",
              "summary": "Cooperar con las autoridades nacionales competentes y proporcionar la información solicitada"
            },
            {
              "id": "HR-P14",
              "title": "Accesibilidad",
              "ref": "Art. 73",
              "summary": "Cumplir los requisitos de accesibilidad de conformidad con las directivas aplicables"
            },
            {
              "id": "HR-P15",
              "title": "Vigilancia poscomercialización",
              "ref": "Art. 18",
              "summary": "Establecer un sistema de vigilancia poscomercialización proporcionado"
            },
            {
              "id": "P-ACC",
              "title": "Accesibilidad (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Garantizar que el sistema de IA cumpla los requisitos de accesibilidad"
            }
          ],
          "obligations_deployer": [
            {
              "id": "HR-D1",
              "title": "Uso conforme a las instrucciones",
              "ref": "Art. 26(1)",
              "summary": "Utilizar el sistema de conformidad con las instrucciones de uso del proveedor"
            },
            {
              "id": "HR-D2",
              "title": "Supervisión humana",
              "ref": "Art. 26(2)",
              "summary": "Asegurar que las personas encargadas de la supervisión humana sean competentes y estén autorizadas"
            },
            {
              "id": "HR-D3",
              "title": "Pertinencia de los datos de entrada",
              "ref": "Art. 26(4)",
              "summary": "Garantizar la pertinencia de los datos de entrada respecto a la finalidad del sistema"
            },
            {
              "id": "HR-D4",
              "title": "Supervisión del funcionamiento",
              "ref": "Art. 26(5)",
              "summary": "Supervisar el funcionamiento del sistema con base en las instrucciones de uso"
            },
            {
              "id": "HR-D5",
              "title": "Conservación de registros",
              "ref": "Art. 26(6)",
              "summary": "Conservar los registros generados automáticamente en la medida en que estén bajo su control"
            },
            {
              "id": "HR-D6",
              "title": "Información a los trabajadores",
              "ref": "Art. 26(7)",
              "summary": "Informar a los representantes de los trabajadores y a los trabajadores afectados sobre el uso del sistema"
            },
            {
              "id": "HR-D7",
              "title": "Cooperación con las autoridades",
              "ref": "Art. 26(8)",
              "summary": "Cooperar con las autoridades nacionales competentes"
            },
            {
              "id": "HR-D8",
              "title": "Evaluación de impacto en derechos fundamentales (FRIA)",
              "ref": "Art. 27",
              "summary": "Realizar una evaluación de impacto en derechos fundamentales antes del despliegue (Art. 27). Nota: el Art. 27(10) excluye los sistemas del anexo III, punto 2 (infraestructuras críticas) de esta obligación."
            },
            {
              "id": "HR-D9",
              "title": "Registro de uso en la base de datos de la UE",
              "ref": "Art. 49",
              "summary": "Registrar el uso del sistema en la base de datos de la UE"
            },
            {
              "id": "D-DPIA",
              "title": "Evaluación de impacto relativa a la protección de datos (EIPD)",
              "ref": "Art. 26(10)",
              "summary": "Realizar una EIPD de conformidad con el Reglamento (UE) 2016/679 o la Directiva (UE) 2016/680"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Verificar la conformidad",
              "ref": "Art. 23",
              "summary": "Verificar que el proveedor ha realizado la evaluación de conformidad y elaborado la documentación técnica"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Verificar la conformidad",
              "ref": "Art. 24",
              "summary": "Verificar que el sistema lleva el marcado CE y va acompañado de la documentación requerida"
            }
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 obligaciones de transparencia apply CUMULATIVELY with alto riesgo requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "Divulgación de interacción con IA",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Proveedor",
                "summary": "Diseñar el sistema para informar a las personas de que interactúan con una IA (salvo que sea evidente por el contexto)"
              },
              {
                "id": "HR-T2",
                "title": "Marcado de contenido sintético",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Proveedor",
                "summary": "Marcar las salidas de contenido sintético (imágenes, audio, vídeo) en un formato legible por máquina"
              },
              {
                "id": "HR-T3",
                "title": "Divulgación de reconocimiento de emociones / biometría",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Implementador",
                "summary": "Informar a las personas del uso de sistemas de reconocimiento de emociones o categorización biométrica"
              },
              {
                "id": "HR-T4",
                "title": "Divulgación de deepfake",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Implementador",
                "summary": "Divulgar que el contenido ha sido generado o manipulado artificialmente (deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Divulgación de texto generado por IA",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Implementador",
                "summary": "Indicar que el texto ha sido generado por IA cuando se publique para informar al público"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: Ley de IA + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual evaluación de conformidad"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is alto riesgo under Art. 6(1)",
                "A single notified body can handle both MDR and Ley de IA evaluación de conformidad",
                "Clinical evaluation data may satisfy some Ley de IA requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: Ley de IA implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: Ley de IA overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is alto riesgo under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers Ley de IA requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring alto riesgo category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is alto riesgo under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-alto riesgo",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector alto riesgo classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT alto riesgo (enhances existing safety, doesn't replace it)",
                "Smart grid management may be alto riesgo if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        }
      }
    },
    "result_transparency": {
      "id": "result_transparency",
      "type": "result",
      "verdict": "TRANSPARENCY",
      "verdict_class": "transparency",
      "title": "Transparency Obligations Apply",
      "summary": "Your AI system is not classified as high-risk but is subject to transparency obligations under Art. 50.",
      "legal_ref": "Art. 50",
      "effective_date": "2 August 2026",
      "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
      "obligations_by_type": {
        "chatbot_disclosure": {
          "title": "AI Interaction Disclosure",
          "ref": "Art. 50(1)",
          "who": "Provider",
          "summary": "Design the system to inform persons they are interacting with AI (unless obvious from context)"
        },
        "synthetic_content_marking": {
          "title": "Synthetic Content Marking",
          "ref": "Art. 50(2)",
          "who": "Provider",
          "summary": "Mark outputs in machine-readable format as artificially generated/manipulated"
        },
        "emotion_biometric_disclosure": {
          "title": "Emotion/Biometric System Disclosure",
          "ref": "Art. 50(3)",
          "who": "Deployer",
          "summary": "Inform natural persons exposed to the system about its operation"
        },
        "deepfake_disclosure": {
          "title": "Deep Fake Disclosure",
          "ref": "Art. 50(4)",
          "who": "Deployer",
          "summary": "Disclose that content has been artificially generated or manipulated"
        },
        "ai_generated_text_disclosure": {
          "title": "AI-Generated Text Disclosure",
          "ref": "Art. 50(4)",
          "who": "Deployer",
          "summary": "Disclose that published text on public interest matters was AI-generated"
        }
      },
      "ai_literacy": {
        "title": "AI Literacy",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "Ensure sufficient AI literacy of staff dealing with AI systems — applies to ALL operators"
      },
      "recommendations": [
        "Even without high-risk classification, consider implementing good AI governance practices.",
        "AI literacy obligations apply to all operators (Art. 4) since 2 February 2025.",
        "Monitor whether your use case could be reclassified as high-risk in future.",
        "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
      ],
      "previous": "q_role_result_router",
      "sector_specific_guidance": {
        "healthcare_medical_devices": {
          "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
          "documents": [
            "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformity assessment"
          ],
          "key_points": [
            "Medical device AI requiring third-party CA under MDR/IVDR is high-risk under Art. 6(1)",
            "A single notified body can handle both MDR and AI Act conformity assessment",
            "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
          ]
        },
        "financial_services": {
          "title": "Sector-Specific Guidance: Banking / Financial Services",
          "documents": [
            "E1: AI Act implications for banking/payments (EBA, 2025)",
            "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
            "E5: AI Act overview for financial services (Eurofi, 2024)"
          ],
          "key_points": [
            "Credit scoring / creditworthiness AI is high-risk under Annex III, point 5(b)",
            "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
            "Fraud detection AI is explicitly excluded from the credit scoring high-risk category"
          ]
        },
        "insurance": {
          "title": "Sector-Specific Guidance: Insurance",
          "documents": [
            "E4: Insurance AI governance opinion (EIOPA, 2025)"
          ],
          "key_points": [
            "Life/health insurance risk assessment and pricing AI is high-risk under Annex III, point 5(c)",
            "EIOPA recommends risk-based governance for ALL insurance AI, including non-high-risk",
            "Fairness metrics required: demographic parity, equalized odds, predictive parity"
          ]
        },
        "energy": {
          "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
          "documents": [
            "F1: Energy sector high-risk classification consultation (Eurelectric, 2025)",
            "F2: AI and the energy sector briefing (EPRS, 2025)"
          ],
          "key_points": [
            "'Safety component' should be limited to systems that directly impact infrastructure safety",
            "Predictive maintenance typically NOT high-risk (enhances existing safety, doesn't replace it)",
            "Smart grid management may be high-risk if it has direct safety impact — case-by-case"
          ]
        }
      },
      "translations": {
        "fr": {
          "title": "Obligations de transparence applicables",
          "summary": "Votre système d’IA n’est pas classé à haut risque mais est soumis à des obligations de transparence au titre de l’art. 50.",
          "verdict": "TRANSPARENCE",
          "recommendations": [
            "Even without à haut risque classification, consider mettre en œuvreing good AI governance practices.",
            "AI literacy obligations s'appliquent to all operators (Art. 4) since 2 February 2025.",
            "Monitor whether your use case could be reclassified as à haut risque in future.",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 août 2026",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          },
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "Divulgation d'interaction avec l'IA",
              "ref": "Art. 50(1)",
              "who": "Fournisseur",
              "summary": "Concevoir le système pour informer les personnes qu'elles interagissent avec une IA (sauf si c'est évident du contexte)"
            },
            "synthetic_content_marking": {
              "title": "Marquage du contenu synthétique",
              "ref": "Art. 50(2)",
              "who": "Fournisseur",
              "summary": "Marquer les sorties de contenu synthétique (images, audio, vidéo) dans un format lisible par machine"
            },
            "emotion_biometric_disclosure": {
              "title": "Divulgation de reconnaissance d'émotions / biométrique",
              "ref": "Art. 50(3)",
              "who": "Déployeur",
              "summary": "Informer les personnes de l'utilisation de systèmes de reconnaissance d'émotions ou de catégorisation biométrique"
            },
            "deepfake_disclosure": {
              "title": "Divulgation de deepfake",
              "ref": "Art. 50(4)",
              "who": "Déployeur",
              "summary": "Divulguer que le contenu a été généré ou manipulé artificiellement (deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Divulgation de texte généré par IA",
              "ref": "Art. 50(4)",
              "who": "Déployeur",
              "summary": "Indiquer que le texte a été généré par IA lorsqu'il est publié pour informer le public"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual évaluation de conformité"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is à haut risque under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act évaluation de conformité",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is à haut risque under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring à haut risque category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is à haut risque under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-à haut risque",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector à haut risque classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT à haut risque (enhances existing safety, doesn't replace it)",
                "Smart grid management may be à haut risque if it has direct safety impact — case-by-case"
              ]
            }
          }
        },
        "nl": {
          "title": "Transparantieverplichtingen van toepassing",
          "summary": "Uw AI-systeem is niet als hoog risico geclassificeerd maar is onderworpen aan transparantieverplichtingen krachtens art. 50.",
          "verdict": "TRANSPARANTIE",
          "recommendations": [
            "Even without hoog risico classification, consider implementeering good AI governance practices.",
            "AI literacy verplichtingen zijn van toepassing to all operators (Art. 4) since 2 February 2025.",
            "Monitor whether your use case could be reclassified as hoog risico in future.",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 augustus 2026",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          },
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "AI-interactie-openbaarmaking",
              "ref": "Art. 50(1)",
              "who": "Aanbieder",
              "summary": "Het systeem ontwerpen om personen te informeren dat zij met een AI communiceren (tenzij dit uit de context duidelijk is)"
            },
            "synthetic_content_marking": {
              "title": "Markering van synthetische inhoud",
              "ref": "Art. 50(2)",
              "who": "Aanbieder",
              "summary": "Synthetische inhoud (afbeeldingen, audio, video) markeren in een machineleesbaar formaat"
            },
            "emotion_biometric_disclosure": {
              "title": "Openbaarmaking emotieherkenning / biometrie",
              "ref": "Art. 50(3)",
              "who": "Gebruiksverantwoordelijke",
              "summary": "Personen informeren over het gebruik van emotieherkennings- of biometrische categoriseringssystemen"
            },
            "deepfake_disclosure": {
              "title": "Deepfake-openbaarmaking",
              "ref": "Art. 50(4)",
              "who": "Gebruiksverantwoordelijke",
              "summary": "Bekendmaken dat de inhoud kunstmatig is gegenereerd of gemanipuleerd (deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Openbaarmaking van AI-gegenereerde tekst",
              "ref": "Art. 50(4)",
              "who": "Gebruiksverantwoordelijke",
              "summary": "Vermelden dat de tekst door AI is gegenereerd wanneer deze wordt gepubliceerd om het publiek te informeren"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformiteitsbeoordeling"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is hoog risico under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act conformiteitsbeoordeling",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is hoog risico under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring hoog risico category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is hoog risico under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-hoog risico",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector hoog risico classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT hoog risico (enhances existing safety, doesn't replace it)",
                "Smart grid management may be hoog risico if it has direct safety impact — case-by-case"
              ]
            }
          }
        },
        "de": {
          "title": "Transparenzpflichten anwendbar",
          "summary": "Ihr KI-System ist nicht als Hochrisiko eingestuft, unterliegt jedoch Transparenzpflichten gemäß Art. 50.",
          "verdict": "TRANSPARENZ",
          "recommendations": [
            "Even without Hochrisiko classification, consider implementieren Sieing good AI governance practices.",
            "AI literacy Pflichten gelten to all operators (Art. 4) since 2 February 2025.",
            "Monitor whether your use case could be reclassified as Hochrisiko in future.",
            "Other EU legislation may apply alongside or independently of the KI-Gesetz, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2. August 2026",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          },
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "KI-Interaktionsoffenlegung",
              "ref": "Art. 50(1)",
              "who": "Anbieter",
              "summary": "Das System so zu gestalten, dass Personen darüber informiert werden, dass sie mit einer KI interagieren (sofern dies nicht aus dem Kontext ersichtlich ist)"
            },
            "synthetic_content_marking": {
              "title": "Kennzeichnung synthetischer Inhalte",
              "ref": "Art. 50(2)",
              "who": "Anbieter",
              "summary": "Synthetische Inhalte (Bilder, Audio, Video) in einem maschinenlesbaren Format kennzeichnen"
            },
            "emotion_biometric_disclosure": {
              "title": "Offenlegung von Emotionserkennung / Biometrie",
              "ref": "Art. 50(3)",
              "who": "Betreiber",
              "summary": "Personen über die Verwendung von Emotionserkennungs- oder biometrischen Kategorisierungssystemen informieren"
            },
            "deepfake_disclosure": {
              "title": "Deepfake-Offenlegung",
              "ref": "Art. 50(4)",
              "who": "Betreiber",
              "summary": "Offenlegen, dass der Inhalt künstlich erzeugt oder manipuliert wurde (Deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Offenlegung von KI-generiertem Text",
              "ref": "Art. 50(4)",
              "who": "Betreiber",
              "summary": "Angeben, dass der Text von KI generiert wurde, wenn er veröffentlicht wird, um die Öffentlichkeit zu informieren"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: KI-Gesetz + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual Konformitätsbewertung"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is Hochrisiko under Art. 6(1)",
                "A single notified body can handle both MDR and KI-Gesetz Konformitätsbewertung",
                "Clinical evaluation data may satisfy some KI-Gesetz requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: KI-Gesetz implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: KI-Gesetz overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is Hochrisiko under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers KI-Gesetz requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring Hochrisiko category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is Hochrisiko under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-Hochrisiko",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector Hochrisiko classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT Hochrisiko (enhances existing safety, doesn't replace it)",
                "Smart grid management may be Hochrisiko if it has direct safety impact — case-by-case"
              ]
            }
          }
        },
        "es": {
          "title": "Se aplican obligaciones de transparencia",
          "summary": "Su sistema de IA no está clasificado como de alto riesgo pero está sujeto a obligaciones de transparencia en virtud del art. 50.",
          "verdict": "TRANSPARENCIA",
          "recommendations": [
            "Even without alto riesgo classification, consider implementeing good AI governance practices.",
            "AI literacy se aplican las obligaciones to all operators (Art. 4) since 2 February 2025.",
            "Monitor whether your use case could be reclassified as alto riesgo in future.",
            "Other EU legislation may apply alongside or independently of the Ley de IA, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "Up to €15 million or 3% of worldwide annual turnover (whichever is higher)",
          "effective_date": "2 de agosto de 2026",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          },
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "Divulgación de interacción con IA",
              "ref": "Art. 50(1)",
              "who": "Proveedor",
              "summary": "Diseñar el sistema para informar a las personas de que interactúan con una IA (salvo que sea evidente por el contexto)"
            },
            "synthetic_content_marking": {
              "title": "Marcado de contenido sintético",
              "ref": "Art. 50(2)",
              "who": "Proveedor",
              "summary": "Marcar las salidas de contenido sintético (imágenes, audio, vídeo) en un formato legible por máquina"
            },
            "emotion_biometric_disclosure": {
              "title": "Divulgación de reconocimiento de emociones / biometría",
              "ref": "Art. 50(3)",
              "who": "Implementador",
              "summary": "Informar a las personas del uso de sistemas de reconocimiento de emociones o categorización biométrica"
            },
            "deepfake_disclosure": {
              "title": "Divulgación de deepfake",
              "ref": "Art. 50(4)",
              "who": "Implementador",
              "summary": "Divulgar que el contenido ha sido generado o manipulado artificialmente (deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Divulgación de texto generado por IA",
              "ref": "Art. 50(4)",
              "who": "Implementador",
              "summary": "Indicar que el texto ha sido generado por IA cuando se publique para informar al público"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: Ley de IA + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual evaluación de conformidad"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is alto riesgo under Art. 6(1)",
                "A single notified body can handle both MDR and Ley de IA evaluación de conformidad",
                "Clinical evaluation data may satisfy some Ley de IA requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: Ley de IA implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: Ley de IA overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is alto riesgo under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers Ley de IA requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring alto riesgo category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is alto riesgo under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-alto riesgo",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector alto riesgo classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT alto riesgo (enhances existing safety, doesn't replace it)",
                "Smart grid management may be alto riesgo if it has direct safety impact — case-by-case"
              ]
            }
          }
        }
      }
    },
    "result_gpai": {
      "id": "result_gpai",
      "type": "result",
      "verdict": "GPAI",
      "verdict_class": "gpai",
      "title": "General-Purpose AI Model Obligations",
      "summary": "You are a provider of a general-purpose AI model. Specific obligations apply under Chapter V of the AI Act.",
      "legal_ref": "Art. 51-56",
      "effective_date": "2 August 2025",
      "penalty_gpai": "Up to €15 million or 3% of worldwide annual turnover (Commission-imposed)",
      "obligations_all_gpai": [
        {
          "id": "GPAI-1",
          "title": "Technical Documentation",
          "ref": "Art. 53(1)(a), Annex XI",
          "summary": "Draw up and maintain technical documentation of the model"
        },
        {
          "id": "GPAI-2",
          "title": "Downstream Provider Information",
          "ref": "Art. 53(1)(b), Annex XII",
          "summary": "Provide information and documentation to downstream AI system providers"
        },
        {
          "id": "GPAI-3",
          "title": "Copyright Compliance Policy",
          "ref": "Art. 53(1)(c)",
          "summary": "Put in place a policy to comply with EU copyright law"
        },
        {
          "id": "GPAI-4",
          "title": "Training Data Summary",
          "ref": "Art. 53(1)(d)",
          "summary": "Publish a sufficiently detailed summary of training content"
        }
      ],
      "obligations_open_source_exempt": [
        "GPAI-3",
        "GPAI-4"
      ],
      "obligations_open_source_exempt_note": "Open-source GPAI models (Art. 53(2)) are exempt from GPAI-1 and GPAI-2, but must still comply with GPAI-3 and GPAI-4. This exemption does NOT apply to GPAI models with systemic risk.",
      "obligations_notification": {
        "id": "GPAI-NOTIFY",
        "title": "Commission Notification (Systemic Risk)",
        "ref": "Art. 52",
        "summary": "Providers of GPAI models with systemic risk must notify the European Commission within two weeks of meeting the systemic risk criteria (e.g., reaching the 10^25 FLOPs training compute threshold). This notification obligation applies specifically to GPAI models classified as posing systemic risk, not to all GPAI providers. The notification concerns classification as a systemic risk model."
      },
      "obligations_systemic_risk": [
        {
          "id": "GPAI-SR1",
          "title": "Model Evaluation",
          "ref": "Art. 55(1)(a)",
          "summary": "Perform model evaluation including adversarial testing"
        },
        {
          "id": "GPAI-SR2",
          "title": "Systemic Risk Assessment & Mitigation",
          "ref": "Art. 55(1)(b)",
          "summary": "Assess and mitigate possible systemic risks"
        },
        {
          "id": "GPAI-SR3",
          "title": "Serious Incident Reporting",
          "ref": "Art. 55(1)(c)",
          "summary": "Track and report serious incidents to the AI Office and national authorities"
        },
        {
          "id": "GPAI-SR4",
          "title": "Cybersecurity Protection",
          "ref": "Art. 55(1)(d)",
          "summary": "Ensure adequate level of cybersecurity protection"
        }
      ],
      "systemic_risk_threshold": "10^25 FLOPs cumulative training compute (Art. 51(2))",
      "obligations_by_type": {
        "chatbot_disclosure": {
          "title": "AI Interaction Disclosure",
          "ref": "Art. 50(1)",
          "who": "Provider",
          "summary": "Design the system to inform persons they are interacting with AI (unless obvious from context)"
        },
        "synthetic_content_marking": {
          "title": "Synthetic Content Marking",
          "ref": "Art. 50(2)",
          "who": "Provider",
          "summary": "Mark outputs in machine-readable format as artificially generated/manipulated"
        },
        "emotion_biometric_disclosure": {
          "title": "Emotion/Biometric System Disclosure",
          "ref": "Art. 50(3)",
          "who": "Deployer",
          "summary": "Inform natural persons exposed to the system about its operation"
        },
        "deepfake_disclosure": {
          "title": "Deep Fake Disclosure",
          "ref": "Art. 50(4)",
          "who": "Deployer",
          "summary": "Disclose that content has been artificially generated or manipulated"
        },
        "ai_generated_text_disclosure": {
          "title": "AI-Generated Text Disclosure",
          "ref": "Art. 50(4)",
          "who": "Deployer",
          "summary": "Disclose that published text on public interest matters was AI-generated"
        }
      },
      "ai_literacy": {
        "title": "AI Literacy",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "Ensure sufficient AI literacy of staff dealing with AI systems — applies to ALL operators"
      },
      "recommendations": [
        "GPAI models already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
        "GPAI obligations apply from 2 August 2025.",
        "Consider adhering to the GPAI Code of Practice for presumption of compliance (Art. 56).",
        "If downstream providers use your model in high-risk AI systems, your documentation obligations are critical.",
        "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
      ],
      "previous": "q_role_result_router",
      "translations": {
        "fr": {
          "title": "Obligations relatives aux modèles d’IA à usage général",
          "summary": "Vous êtes un fournisseur d’un modèle d’IA à usage général. Des obligations spécifiques s’appliquent au titre du chapitre V du règlement sur l’IA.",
          "verdict": "MODÈLE GPAI",
          "recommendations": [
            "modèle GPAIs already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "GPAI obligations s'appliquent from 2 August 2025.",
            "Consider adhering to the GPAI Code of Practice for presumption of compliance (Art. 56).",
            "If downstream providers use your model in à haut risque système d'IAs, your documentation obligations are critical.",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_gpai": "Up to €15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date": "2 août 2025",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          },
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Documentation technique",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "Rédiger et tenir à jour la documentation technique du modèle GPAI"
            },
            {
              "id": "GPAI-2",
              "title": "Information aux fournisseurs en aval",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Fournir des informations et de la documentation aux fournisseurs de systèmes d'IA en aval"
            },
            {
              "id": "GPAI-3",
              "title": "Politique de respect du droit d'auteur",
              "ref": "Art. 53(1)(c)",
              "summary": "Mettre en place une politique de conformité au droit d'auteur de l'UE"
            },
            {
              "id": "GPAI-4",
              "title": "Résumé des données d'entraînement",
              "ref": "Art. 53(1)(d)",
              "summary": "Rédiger et publier un résumé suffisamment détaillé des données d'entraînement"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Évaluation du modèle",
              "ref": "Art. 55(1)(a)",
              "summary": "Effectuer des évaluations du modèle, y compris la réalisation de tests adverses"
            },
            {
              "id": "GPAI-SR2",
              "title": "Évaluation et atténuation des risques systémiques",
              "ref": "Art. 55(1)(b)",
              "summary": "Évaluer et atténuer les risques systémiques possibles"
            },
            {
              "id": "GPAI-SR3",
              "title": "Signalement des incidents",
              "ref": "Art. 55(1)(c)",
              "summary": "Suivre, documenter et signaler les incidents graves à l'AI Office et aux autorités nationales"
            },
            {
              "id": "GPAI-SR4",
              "title": "Protection de cybersécurité",
              "ref": "Art. 55(1)(d)",
              "summary": "Assurer un niveau adéquat de protection en matière de cybersécurité pour le modèle GPAI"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "Divulgation d'interaction avec l'IA",
              "ref": "Art. 50(1)",
              "who": "Fournisseur",
              "summary": "Concevoir le système pour informer les personnes qu'elles interagissent avec une IA (sauf si c'est évident du contexte)"
            },
            "synthetic_content_marking": {
              "title": "Marquage du contenu synthétique",
              "ref": "Art. 50(2)",
              "who": "Fournisseur",
              "summary": "Marquer les sorties de contenu synthétique (images, audio, vidéo) dans un format lisible par machine"
            },
            "emotion_biometric_disclosure": {
              "title": "Divulgation de reconnaissance d'émotions / biométrique",
              "ref": "Art. 50(3)",
              "who": "Déployeur",
              "summary": "Informer les personnes de l'utilisation de systèmes de reconnaissance d'émotions ou de catégorisation biométrique"
            },
            "deepfake_disclosure": {
              "title": "Divulgation de deepfake",
              "ref": "Art. 50(4)",
              "who": "Déployeur",
              "summary": "Divulguer que le contenu a été généré ou manipulé artificiellement (deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Divulgation de texte généré par IA",
              "ref": "Art. 50(4)",
              "who": "Déployeur",
              "summary": "Indiquer que le texte a été généré par IA lorsqu'il est publié pour informer le public"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual évaluation de conformité"
              ],
              "key_points": [
                "modèle GPAIs integrated into medical device système d'IAs may trigger dual obligations de conformité",
                "Model providers must supply documentation technique sufficient for downstream providers to meet MDR/AI Act requirements",
                "Systemic risk models used in health contexts face enhanced scrutiny under both frameworks"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Financial Services",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance and pensions",
                "D9: EBA Report on AI in Banking (2025) — expectations for credit scoring, fraud detection, and gestion des risques AI"
              ],
              "key_points": [
                "modèle GPAIs used in creditworthiness assessment or fraud detection may create à haut risque obligations for downstream deployers",
                "Model providers should document training data characteristics relevant to financial fairness and non-discrimination",
                "Financial supervisory authorities may request modèle GPAI documentation through market surveillance channels"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance"
              ],
              "key_points": [
                "modèle GPAIs used for risk assessment, pricing, or claims processing may create à haut risque obligations for deployers",
                "Transparency obligations for modèle GPAIs are cumulative with insurance-specific disclosure requirements",
                "Model cards should document potential impacts on insurance pricing fairness"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy",
              "documents": [
                "AI Act applies to AI in critical infrastructure including energy — Annex III, Area 2"
              ],
              "key_points": [
                "modèle GPAIs integrated into energy grid management or safety systems may trigger à haut risque classification for deployers",
                "Model providers should document reliability and safety characteristics relevant to critical infrastructure",
                "NIS2 Directive obligations may apply in parallel for energy sector AI deployments"
              ]
            }
          }
        },
        "nl": {
          "title": "Verplichtingen voor AI-modellen voor algemene doeleinden",
          "summary": "U bent een aanbieder van een AI-model voor algemene doeleinden. Specifieke verplichtingen zijn van toepassing krachtens hoofdstuk V van de AI-verordening.",
          "verdict": "GPAI-MODEL",
          "recommendations": [
            "GPAI-models already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "GPAI verplichtingen zijn van toepassing from 2 August 2025.",
            "Consider adhering to the GPAI Code of Practice for presumption of compliance (Art. 56).",
            "If downstream providers use your model in hoog risico AI-systeems, your documentation obligations are critical.",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_gpai": "Up to €15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date": "2 augustus 2025",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          },
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Technische documentatie",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "De technische documentatie van het GPAI-model opstellen en bijhouden"
            },
            {
              "id": "GPAI-2",
              "title": "Informatie aan downstream-aanbieders",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Informatie en documentatie verstrekken aan downstream AI-systeemaanbieders"
            },
            {
              "id": "GPAI-3",
              "title": "Auteursrechtbeleid",
              "ref": "Art. 53(1)(c)",
              "summary": "Een beleid opstellen ter naleving van het EU-auteursrecht"
            },
            {
              "id": "GPAI-4",
              "title": "Samenvatting trainingsgegevens",
              "ref": "Art. 53(1)(d)",
              "summary": "Een voldoende gedetailleerde samenvatting van de trainingsgegevens opstellen en publiceren"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Modelevaluatie",
              "ref": "Art. 55(1)(a)",
              "summary": "Modelevaluaties uitvoeren, inclusief vijandige tests"
            },
            {
              "id": "GPAI-SR2",
              "title": "Beoordeling en beperking van systeemrisico's",
              "ref": "Art. 55(1)(b)",
              "summary": "Mogelijke systeemrisico's beoordelen en beperken"
            },
            {
              "id": "GPAI-SR3",
              "title": "Incidentrapportage",
              "ref": "Art. 55(1)(c)",
              "summary": "Ernstige incidenten volgen, documenteren en melden bij het AI-bureau en nationale autoriteiten"
            },
            {
              "id": "GPAI-SR4",
              "title": "Cyberbeveiligingsbescherming",
              "ref": "Art. 55(1)(d)",
              "summary": "Een adequaat niveau van cyberbeveiligingsbescherming voor het GPAI-model waarborgen"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "AI-interactie-openbaarmaking",
              "ref": "Art. 50(1)",
              "who": "Aanbieder",
              "summary": "Het systeem ontwerpen om personen te informeren dat zij met een AI communiceren (tenzij dit uit de context duidelijk is)"
            },
            "synthetic_content_marking": {
              "title": "Markering van synthetische inhoud",
              "ref": "Art. 50(2)",
              "who": "Aanbieder",
              "summary": "Synthetische inhoud (afbeeldingen, audio, video) markeren in een machineleesbaar formaat"
            },
            "emotion_biometric_disclosure": {
              "title": "Openbaarmaking emotieherkenning / biometrie",
              "ref": "Art. 50(3)",
              "who": "Gebruiksverantwoordelijke",
              "summary": "Personen informeren over het gebruik van emotieherkennings- of biometrische categoriseringssystemen"
            },
            "deepfake_disclosure": {
              "title": "Deepfake-openbaarmaking",
              "ref": "Art. 50(4)",
              "who": "Gebruiksverantwoordelijke",
              "summary": "Bekendmaken dat de inhoud kunstmatig is gegenereerd of gemanipuleerd (deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Openbaarmaking van AI-gegenereerde tekst",
              "ref": "Art. 50(4)",
              "who": "Gebruiksverantwoordelijke",
              "summary": "Vermelden dat de tekst door AI is gegenereerd wanneer deze wordt gepubliceerd om het publiek te informeren"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformiteitsbeoordeling"
              ],
              "key_points": [
                "GPAI-models integrated into medical device AI-systeems may trigger dual nalevingsverplichtingen",
                "Model providers must supply technische documentatie sufficient for downstream providers to meet MDR/AI Act requirements",
                "Systemic risk models used in health contexts face enhanced scrutiny under both frameworks"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Financial Services",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance and pensions",
                "D9: EBA Report on AI in Banking (2025) — expectations for credit scoring, fraud detection, and risicobeheer AI"
              ],
              "key_points": [
                "GPAI-models used in creditworthiness assessment or fraud detection may create hoog risico obligations for downstream deployers",
                "Model providers should document training data characteristics relevant to financial fairness and non-discrimination",
                "Financial supervisory authorities may request GPAI-model documentation through market surveillance channels"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance"
              ],
              "key_points": [
                "GPAI-models used for risk assessment, pricing, or claims processing may create hoog risico obligations for deployers",
                "Transparency obligations for GPAI-models are cumulative with insurance-specific disclosure requirements",
                "Model cards should document potential impacts on insurance pricing fairness"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy",
              "documents": [
                "AI Act applies to AI in critical infrastructure including energy — Annex III, Area 2"
              ],
              "key_points": [
                "GPAI-models integrated into energy grid management or safety systems may trigger hoog risico classification for deployers",
                "Model providers should document reliability and safety characteristics relevant to critical infrastructure",
                "NIS2 Directive obligations may apply in parallel for energy sector AI deployments"
              ]
            }
          }
        },
        "de": {
          "title": "Pflichten für KI-Modelle mit allgemeinem Verwendungszweck",
          "summary": "Sie sind Anbieter eines KI-Modells mit allgemeinem Verwendungszweck. Spezifische Pflichten gelten gemäß Kapitel V der KI-Verordnung.",
          "verdict": "GPAI-MODELL",
          "recommendations": [
            "GPAI-Modells already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "GPAI Pflichten gelten from 2 August 2025.",
            "Consider adhering to the GPAI Code of Practice for presumption of compliance (Art. 56).",
            "If downstream providers use your model in Hochrisiko KI-Systems, your documentation obligations are critical.",
            "Other EU legislation may apply alongside or independently of the KI-Gesetz, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_gpai": "Up to €15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date": "2. August 2025",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          },
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Technische Dokumentation",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "Die technische Dokumentation des GPAI-Modells erstellen und pflegen"
            },
            {
              "id": "GPAI-2",
              "title": "Information an nachgelagerte Anbieter",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Informationen und Dokumentation an nachgelagerte KI-Systemanbieter bereitstellen"
            },
            {
              "id": "GPAI-3",
              "title": "Urheberrechtspolitik",
              "ref": "Art. 53(1)(c)",
              "summary": "Eine Richtlinie zur Einhaltung des EU-Urheberrechts aufstellen"
            },
            {
              "id": "GPAI-4",
              "title": "Zusammenfassung der Trainingsdaten",
              "ref": "Art. 53(1)(d)",
              "summary": "Eine hinreichend detaillierte Zusammenfassung der Trainingsdaten erstellen und veröffentlichen"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Modellbewertung",
              "ref": "Art. 55(1)(a)",
              "summary": "Modellbewertungen durchführen, einschließlich adversarialer Tests"
            },
            {
              "id": "GPAI-SR2",
              "title": "Bewertung und Minderung systemischer Risiken",
              "ref": "Art. 55(1)(b)",
              "summary": "Mögliche systemische Risiken bewerten und mindern"
            },
            {
              "id": "GPAI-SR3",
              "title": "Meldung von Vorfällen",
              "ref": "Art. 55(1)(c)",
              "summary": "Schwerwiegende Vorfälle verfolgen, dokumentieren und dem KI-Büro und den nationalen Behörden melden"
            },
            {
              "id": "GPAI-SR4",
              "title": "Cybersicherheitsschutz",
              "ref": "Art. 55(1)(d)",
              "summary": "Ein angemessenes Maß an Cybersicherheitsschutz für das GPAI-Modell gewährleisten"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "KI-Interaktionsoffenlegung",
              "ref": "Art. 50(1)",
              "who": "Anbieter",
              "summary": "Das System so zu gestalten, dass Personen darüber informiert werden, dass sie mit einer KI interagieren (sofern dies nicht aus dem Kontext ersichtlich ist)"
            },
            "synthetic_content_marking": {
              "title": "Kennzeichnung synthetischer Inhalte",
              "ref": "Art. 50(2)",
              "who": "Anbieter",
              "summary": "Synthetische Inhalte (Bilder, Audio, Video) in einem maschinenlesbaren Format kennzeichnen"
            },
            "emotion_biometric_disclosure": {
              "title": "Offenlegung von Emotionserkennung / Biometrie",
              "ref": "Art. 50(3)",
              "who": "Betreiber",
              "summary": "Personen über die Verwendung von Emotionserkennungs- oder biometrischen Kategorisierungssystemen informieren"
            },
            "deepfake_disclosure": {
              "title": "Deepfake-Offenlegung",
              "ref": "Art. 50(4)",
              "who": "Betreiber",
              "summary": "Offenlegen, dass der Inhalt künstlich erzeugt oder manipuliert wurde (Deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Offenlegung von KI-generiertem Text",
              "ref": "Art. 50(4)",
              "who": "Betreiber",
              "summary": "Angeben, dass der Text von KI generiert wurde, wenn er veröffentlicht wird, um die Öffentlichkeit zu informieren"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: KI-Gesetz + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual Konformitätsbewertung"
              ],
              "key_points": [
                "GPAI-Modells integrated into medical device KI-Systems may trigger dual Compliance-Pflichten",
                "Model providers must supply technische Dokumentation sufficient for downstream providers to meet MDR/KI-Gesetz requirements",
                "Systemic risk models used in health contexts face enhanced scrutiny under both frameworks"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Financial Services",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance and pensions",
                "D9: EBA Report on AI in Banking (2025) — expectations for credit scoring, fraud detection, and Risikomanagement AI"
              ],
              "key_points": [
                "GPAI-Modells used in creditworthiness assessment or fraud detection may create Hochrisiko obligations for downstream deployers",
                "Model providers should document training data characteristics relevant to financial fairness and non-discrimination",
                "Financial supervisory authorities may request GPAI-Modell documentation through market surveillance channels"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance"
              ],
              "key_points": [
                "GPAI-Modells used for risk assessment, pricing, or claims processing may create Hochrisiko obligations for deployers",
                "Transparency obligations for GPAI-Modells are cumulative with insurance-specific disclosure requirements",
                "Model cards should document potential impacts on insurance pricing fairness"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy",
              "documents": [
                "KI-Gesetz applies to AI in critical infrastructure including energy — Annex III, Area 2"
              ],
              "key_points": [
                "GPAI-Modells integrated into energy grid management or safety systems may trigger Hochrisiko classification for deployers",
                "Model providers should document reliability and safety characteristics relevant to critical infrastructure",
                "NIS2 Directive obligations may apply in parallel for energy sector AI deployments"
              ]
            }
          }
        },
        "es": {
          "title": "Obligaciones de los modelos de IA de uso general",
          "summary": "Usted es un proveedor de un modelo de IA de uso general. Se aplican obligaciones específicas en virtud del capítulo V del Reglamento de IA.",
          "verdict": "MODELO GPAI",
          "recommendations": [
            "modelo GPAIs already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "GPAI se aplican las obligaciones from 2 August 2025.",
            "Consider adhering to the GPAI Code of Practice for presumption of compliance (Art. 56).",
            "If downstream providers use your model in alto riesgo sistema de IAs, your documentation obligations are critical.",
            "Other EU legislation may apply alongside or independently of the Ley de IA, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_gpai": "Up to €15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date": "2 de agosto de 2025",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          },
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Documentación técnica",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "Elaborar y mantener la documentación técnica del modelo GPAI"
            },
            {
              "id": "GPAI-2",
              "title": "Información a proveedores posteriores",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Proporcionar información y documentación a los proveedores de sistemas de IA posteriores"
            },
            {
              "id": "GPAI-3",
              "title": "Política de derechos de autor",
              "ref": "Art. 53(1)(c)",
              "summary": "Establecer una política de cumplimiento del derecho de autor de la UE"
            },
            {
              "id": "GPAI-4",
              "title": "Resumen de datos de entrenamiento",
              "ref": "Art. 53(1)(d)",
              "summary": "Elaborar y publicar un resumen suficientemente detallado de los datos de entrenamiento"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Evaluación del modelo",
              "ref": "Art. 55(1)(a)",
              "summary": "Realizar evaluaciones del modelo, incluidas pruebas adversarias"
            },
            {
              "id": "GPAI-SR2",
              "title": "Evaluación y mitigación de riesgos sistémicos",
              "ref": "Art. 55(1)(b)",
              "summary": "Evaluar y mitigar los posibles riesgos sistémicos"
            },
            {
              "id": "GPAI-SR3",
              "title": "Notificación de incidentes",
              "ref": "Art. 55(1)(c)",
              "summary": "Seguir, documentar y notificar los incidentes graves a la Oficina de IA y a las autoridades nacionales"
            },
            {
              "id": "GPAI-SR4",
              "title": "Protección de ciberseguridad",
              "ref": "Art. 55(1)(d)",
              "summary": "Garantizar un nivel adecuado de protección de ciberseguridad para el modelo GPAI"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_by_type": {
            "chatbot_disclosure": {
              "title": "Divulgación de interacción con IA",
              "ref": "Art. 50(1)",
              "who": "Proveedor",
              "summary": "Diseñar el sistema para informar a las personas de que interactúan con una IA (salvo que sea evidente por el contexto)"
            },
            "synthetic_content_marking": {
              "title": "Marcado de contenido sintético",
              "ref": "Art. 50(2)",
              "who": "Proveedor",
              "summary": "Marcar las salidas de contenido sintético (imágenes, audio, vídeo) en un formato legible por máquina"
            },
            "emotion_biometric_disclosure": {
              "title": "Divulgación de reconocimiento de emociones / biometría",
              "ref": "Art. 50(3)",
              "who": "Implementador",
              "summary": "Informar a las personas del uso de sistemas de reconocimiento de emociones o categorización biométrica"
            },
            "deepfake_disclosure": {
              "title": "Divulgación de deepfake",
              "ref": "Art. 50(4)",
              "who": "Implementador",
              "summary": "Divulgar que el contenido ha sido generado o manipulado artificialmente (deepfakes)"
            },
            "ai_generated_text_disclosure": {
              "title": "Divulgación de texto generado por IA",
              "ref": "Art. 50(4)",
              "who": "Implementador",
              "summary": "Indicar que el texto ha sido generado por IA cuando se publique para informar al público"
            }
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: Ley de IA + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual evaluación de conformidad"
              ],
              "key_points": [
                "modelo GPAIs integrated into medical device sistema de IAs may trigger dual obligaciones de cumplimiento",
                "Model providers must supply documentación técnica sufficient for downstream providers to meet MDR/Ley de IA requirements",
                "Systemic risk models used in health contexts face enhanced scrutiny under both frameworks"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Financial Services",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance and pensions",
                "D9: EBA Report on AI in Banking (2025) — expectations for credit scoring, fraud detection, and gestión de riesgos AI"
              ],
              "key_points": [
                "modelo GPAIs used in creditworthiness assessment or fraud detection may create alto riesgo obligations for downstream deployers",
                "Model providers should document training data characteristics relevant to financial fairness and non-discrimination",
                "Financial supervisory authorities may request modelo GPAI documentation through market surveillance channels"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance"
              ],
              "key_points": [
                "modelo GPAIs used for risk assessment, pricing, or claims processing may create alto riesgo obligations for deployers",
                "Transparency obligations for modelo GPAIs are cumulative with insurance-specific disclosure requirements",
                "Model cards should document potential impacts on insurance pricing fairness"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy",
              "documents": [
                "Ley de IA applies to AI in critical infrastructure including energy — Annex III, Area 2"
              ],
              "key_points": [
                "modelo GPAIs integrated into energy grid management or safety systems may trigger alto riesgo classification for deployers",
                "Model providers should document reliability and safety characteristics relevant to critical infrastructure",
                "NIS2 Directive obligations may apply in parallel for energy sector AI deployments"
              ]
            }
          }
        }
      },
      "sector_specific_guidance": {
        "healthcare_medical_devices": {
          "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
          "documents": [
            "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformity assessment"
          ],
          "key_points": [
            "GPAI models integrated into medical device AI systems may trigger dual compliance obligations",
            "Model providers must supply technical documentation sufficient for downstream providers to meet MDR/AI Act requirements",
            "Systemic risk models used in health contexts face enhanced scrutiny under both frameworks"
          ]
        },
        "financial_services": {
          "title": "Sector-Specific Guidance: Financial Services",
          "documents": [
            "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance and pensions",
            "D9: EBA Report on AI in Banking (2025) — expectations for credit scoring, fraud detection, and risk management AI"
          ],
          "key_points": [
            "GPAI models used in creditworthiness assessment or fraud detection may create high-risk obligations for downstream deployers",
            "Model providers should document training data characteristics relevant to financial fairness and non-discrimination",
            "Financial supervisory authorities may request GPAI model documentation through market surveillance channels"
          ]
        },
        "insurance": {
          "title": "Sector-Specific Guidance: Insurance",
          "documents": [
            "D6: EIOPA Staff Paper on AI Governance (2025) — supervisory expectations for AI in insurance"
          ],
          "key_points": [
            "GPAI models used for risk assessment, pricing, or claims processing may create high-risk obligations for deployers",
            "Transparency obligations for GPAI models are cumulative with insurance-specific disclosure requirements",
            "Model cards should document potential impacts on insurance pricing fairness"
          ]
        },
        "energy": {
          "title": "Sector-Specific Guidance: Energy",
          "documents": [
            "AI Act applies to AI in critical infrastructure including energy — Annex III, Area 2"
          ],
          "key_points": [
            "GPAI models integrated into energy grid management or safety systems may trigger high-risk classification for deployers",
            "Model providers should document reliability and safety characteristics relevant to critical infrastructure",
            "NIS2 Directive obligations may apply in parallel for energy sector AI deployments"
          ]
        }
      },
      "timeline": {
        "gpai_obligations": "Obligations apply from 2 August 2025",
        "gpai_transitional": "GPAI models placed on market before 2 August 2025 must comply by 2 August 2027 (Art. 111(3))"
      }
    },
    "result_minimal_risk": {
      "id": "result_minimal_risk",
      "type": "result",
      "verdict": "MINIMAL_RISK",
      "verdict_class": "minimal-risk",
      "title": "Minimal Risk — No Specific AI Act Obligations",
      "summary": "Your AI system is in scope of the AI Act but is classified as minimal risk. No specific compliance obligations under the AI Act apply, although general principles and voluntary measures are encouraged.",
      "legal_ref": "Art. 95, Art. 4",
      "effective_date": "N/A (voluntary)",
      "penalty": "N/A",
      "obligations": [],
      "recommendations": [
        "AI literacy obligations apply to ALL operators since 2 February 2025 (Art. 4).",
        "Consider adopting voluntary codes of conduct (Art. 95) as a good practice.",
        "Monitor whether your system's use case could evolve into a high-risk category.",
        "Other legislation may still apply: GDPR, Product Safety, Cyber Resilience Act, NIS2, etc.",
        "If a deployer uses your minimal-risk system for a high-risk purpose (Annex III), they become the provider under Art. 25(1)(c).",
        "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
      ],
      "previous": "q_role_result_router",
      "ai_literacy": {
        "title": "AI Literacy (Art. 4)",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "All operators must ensure sufficient AI literacy of staff dealing with AI systems. This applies since 2 February 2025."
      },
      "sector_specific_guidance": {
        "healthcare_medical_devices": {
          "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
          "documents": [
            "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformity assessment"
          ],
          "key_points": [
            "Medical device AI requiring third-party CA under MDR/IVDR is high-risk under Art. 6(1)",
            "A single notified body can handle both MDR and AI Act conformity assessment",
            "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
          ]
        },
        "financial_services": {
          "title": "Sector-Specific Guidance: Banking / Financial Services",
          "documents": [
            "E1: AI Act implications for banking/payments (EBA, 2025)",
            "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
            "E5: AI Act overview for financial services (Eurofi, 2024)"
          ],
          "key_points": [
            "Credit scoring / creditworthiness AI is high-risk under Annex III, point 5(b)",
            "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
            "Fraud detection AI is explicitly excluded from the credit scoring high-risk category"
          ]
        },
        "insurance": {
          "title": "Sector-Specific Guidance: Insurance",
          "documents": [
            "E4: Insurance AI governance opinion (EIOPA, 2025)"
          ],
          "key_points": [
            "Life/health insurance risk assessment and pricing AI is high-risk under Annex III, point 5(c)",
            "EIOPA recommends risk-based governance for ALL insurance AI, including non-high-risk",
            "Fairness metrics required: demographic parity, equalized odds, predictive parity"
          ]
        },
        "energy": {
          "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
          "documents": [
            "F1: Energy sector high-risk classification consultation (Eurelectric, 2025)",
            "F2: AI and the energy sector briefing (EPRS, 2025)"
          ],
          "key_points": [
            "'Safety component' should be limited to systems that directly impact infrastructure safety",
            "Predictive maintenance typically NOT high-risk (enhances existing safety, doesn't replace it)",
            "Smart grid management may be high-risk if it has direct safety impact — case-by-case"
          ]
        }
      },
      "translations": {
        "fr": {
          "title": "Risque minimal — Pas d’obligations spécifiques au titre du règlement sur l’IA",
          "summary": "Votre système d’IA entre dans le champ d’application du règlement sur l’IA mais est classé à risque minimal. Aucune obligation de conformité spécifique ne s’applique, bien que des principes généraux et des mesures volontaires soient encouragés.",
          "verdict": "RISQUE MINIMAL",
          "recommendations": [
            "AI literacy obligations s'appliquent to ALL operators since 2 February 2025 (Art. 4).",
            "Consider adopting voluntary codes of conduct (Art. 95) as a good practice.",
            "Monitor whether votre système's use case could evolve into a à haut risque category.",
            "Other legislation may still apply: GDPR, Product Safety, Cyber Resilience Act, NIS2, etc.",
            "If a deployer uses your minimal-risk system for a à haut risque purpose (Annex III), they become the provider under Art. 25(1)(c).",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "N/A",
          "effective_date": "N/A (voluntary)",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual évaluation de conformité"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is à haut risque under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act évaluation de conformité",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is à haut risque under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring à haut risque category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is à haut risque under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-à haut risque",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector à haut risque classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT à haut risque (enhances existing safety, doesn't replace it)",
                "Smart grid management may be à haut risque if it has direct safety impact — case-by-case"
              ]
            }
          }
        },
        "nl": {
          "title": "Minimaal risico — Geen specifieke verplichtingen krachtens de AI-verordening",
          "summary": "Uw AI-systeem valt binnen het toepassingsgebied van de AI-verordening maar is geclassificeerd als minimaal risico. Er gelden geen specifieke nalevingsverplichtingen, hoewel algemene beginselen en vrijwillige maatregelen worden aangemoedigd.",
          "verdict": "MINIMAAL RISICO",
          "recommendations": [
            "AI literacy verplichtingen zijn van toepassing to ALL operators since 2 February 2025 (Art. 4).",
            "Consider adopting voluntary codes of conduct (Art. 95) as a good practice.",
            "Monitor whether uw systeem's use case could evolve into a hoog risico category.",
            "Other legislation may still apply: GDPR, Product Safety, Cyber Resilience Act, NIS2, etc.",
            "If a deployer uses your minimal-risk system for a hoog risico purpose (Annex III), they become the provider under Art. 25(1)(c).",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "N/A",
          "effective_date": "N/A (voluntary)",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformiteitsbeoordeling"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is hoog risico under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act conformiteitsbeoordeling",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is hoog risico under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring hoog risico category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is hoog risico under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-hoog risico",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector hoog risico classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT hoog risico (enhances existing safety, doesn't replace it)",
                "Smart grid management may be hoog risico if it has direct safety impact — case-by-case"
              ]
            }
          }
        },
        "de": {
          "title": "Minimales Risiko — Keine spezifischen Pflichten nach der KI-Verordnung",
          "summary": "Ihr KI-System fällt in den Anwendungsbereich der KI-Verordnung, ist aber als System mit minimalem Risiko eingestuft. Es gelten keine spezifischen Compliance-Pflichten, obwohl allgemeine Grundsätze und freiwillige Maßnahmen empfohlen werden.",
          "verdict": "MINIMALES RISIKO",
          "recommendations": [
            "AI literacy Pflichten gelten to ALL operators since 2 February 2025 (Art. 4).",
            "Consider adopting voluntary codes of conduct (Art. 95) as a good practice.",
            "Monitor whether Ihr System's use case could evolve into a Hochrisiko category.",
            "Other legislation may still apply: GDPR, Product Safety, Cyber Resilience Act, NIS2, etc.",
            "If a deployer uses your minimal-risk system for a Hochrisiko purpose (Annex III), they become the provider under Art. 25(1)(c).",
            "Other EU legislation may apply alongside or independently of the KI-Gesetz, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "N/A",
          "effective_date": "N/A (voluntary)",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: KI-Gesetz + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual Konformitätsbewertung"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is Hochrisiko under Art. 6(1)",
                "A single notified body can handle both MDR and KI-Gesetz Konformitätsbewertung",
                "Clinical evaluation data may satisfy some KI-Gesetz requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: KI-Gesetz implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: KI-Gesetz overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is Hochrisiko under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers KI-Gesetz requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring Hochrisiko category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is Hochrisiko under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-Hochrisiko",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector Hochrisiko classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT Hochrisiko (enhances existing safety, doesn't replace it)",
                "Smart grid management may be Hochrisiko if it has direct safety impact — case-by-case"
              ]
            }
          }
        },
        "es": {
          "title": "Riesgo mínimo — Sin obligaciones específicas en virtud del Reglamento de IA",
          "summary": "Su sistema de IA está dentro del ámbito de aplicación del Reglamento de IA pero está clasificado como de riesgo mínimo. No se aplican obligaciones de cumplimiento específicas, aunque se fomentan los principios generales y las medidas voluntarias.",
          "verdict": "RIESGO MÍNIMO",
          "recommendations": [
            "AI literacy se aplican las obligaciones to ALL operators since 2 February 2025 (Art. 4).",
            "Consider adopting voluntary codes of conduct (Art. 95) as a good practice.",
            "Monitor whether su sistema's use case could evolve into a alto riesgo category.",
            "Other legislation may still apply: GDPR, Product Safety, Cyber Resilience Act, NIS2, etc.",
            "If a deployer uses your minimal-risk system for a alto riesgo purpose (Annex III), they become the provider under Art. 25(1)(c).",
            "Other EU legislation may apply alongside or independently of the Ley de IA, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty": "N/A",
          "effective_date": "N/A (voluntary)",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: Ley de IA + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual evaluación de conformidad"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is alto riesgo under Art. 6(1)",
                "A single notified body can handle both MDR and Ley de IA evaluación de conformidad",
                "Clinical evaluation data may satisfy some Ley de IA requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: Ley de IA implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: Ley de IA overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is alto riesgo under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers Ley de IA requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring alto riesgo category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is alto riesgo under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-alto riesgo",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector alto riesgo classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT alto riesgo (enhances existing safety, doesn't replace it)",
                "Smart grid management may be alto riesgo if it has direct safety impact — case-by-case"
              ]
            }
          }
        }
      }
    },
    "q_annex_i_also_annex_iii": {
      "id": "q_annex_i_also_annex_iii",
      "type": "question",
      "stage": "S6",
      "title": "Dual Classification Check: Also Annex III?",
      "body": "Your system is classified as high-risk under Annex I (product safety route). However, it may ALSO fall under one of the Annex III high-risk application areas, which could affect your obligations and compliance timeline.\n\nDoes your AI system also fall under any of the Annex III areas?\n(e.g., a medical device AI that also assesses creditworthiness, or a machinery AI used for worker monitoring)",
      "legal_ref": "Art. 6(1) + Art. 6(2)",
      "note": "Dual classification is possible. Annex I obligations apply from 2 Aug 2027, while Annex III obligations apply from 2 Aug 2026. If both apply, you must comply with the earlier deadline for Annex III obligations.",
      "options": [
        {
          "label": "Yes — it may also fall under an Annex III area",
          "value": "yes",
          "next": "q_high_risk_annex_iii_area",
          "translations": {
            "fr": {
              "label": "Oui — il peut également relever d'un domaine de l'Annexe III"
            },
            "nl": {
              "label": "Ja — het kan ook onder een Bijlage III-gebied vallen"
            },
            "de": {
              "label": "Ja — es kann auch unter einen Anhang-III-Bereich fallen"
            },
            "es": {
              "label": "Sí — también puede estar incluido en un área del Anexo III"
            }
          }
        },
        {
          "label": "No — only the product safety (Annex I) route applies",
          "value": "no",
          "next": "q_sector",
          "translations": {
            "fr": {
              "label": "Non — seule la voie de la sécurité des produits (Annexe I) s'applique"
            },
            "nl": {
              "label": "Nee — alleen de productveiligheidsroute (Bijlage I) is van toepassing"
            },
            "de": {
              "label": "Nein — nur der Produktsicherheitsweg (Anhang I) gilt"
            },
            "es": {
              "label": "No — solo se aplica la vía de seguridad de productos (Anexo I)"
            }
          }
        },
        {
          "label": "I'm not sure",
          "value": "unsure",
          "flag": "consult_expert",
          "next": "q_high_risk_annex_iii_area",
          "translations": {
            "fr": {
              "label": "Je ne suis pas sûr"
            },
            "nl": {
              "label": "Ik weet het niet zeker"
            },
            "de": {
              "label": "Ich bin nicht sicher"
            },
            "es": {
              "label": "No estoy seguro/a"
            }
          }
        }
      ],
      "previous": "q_annex_i_third_party_ca",
      "translations": {
        "fr": {
          "title": "Vérification de double classification : également Annexe III ?",
          "body": "Votre système est classé à haut risque au titre de l'Annexe I (voie de la sécurité des produits). Cependant, il peut ÉGALEMENT relever d'un des domaines d'application à haut risque de l'Annexe III, ce qui pourrait affecter vos obligations et votre calendrier de conformité.\n\nVotre système d'IA relève-t-il également d'un des domaines de l'Annexe III ?\n(ex. : un dispositif médical IA qui évalue aussi la solvabilité, ou une machine IA utilisée pour la surveillance des travailleurs)",
          "note": "La double classification est possible. Les obligations de l'Annexe I s'appliquent à partir du 2 août 2027, tandis que les obligations de l'Annexe III s'appliquent à partir du 2 août 2026. Si les deux s'appliquent, vous devez respecter le délai le plus court pour les obligations de l'Annexe III."
        },
        "nl": {
          "title": "Controle op dubbele classificatie: ook Bijlage III?",
          "body": "Uw systeem is geclassificeerd als hoog risico onder Bijlage I (productveiligheidsroute). Het kan echter OOK onder een van de hoog-risicotoepassingsgebieden van Bijlage III vallen, wat invloed kan hebben op uw verplichtingen en nalevingstijdlijn.\n\nValt uw AI-systeem ook onder een van de Bijlage III-gebieden?\n(bv. een AI-medisch hulpmiddel dat ook kredietwaardigheid beoordeelt, of een AI-machine die wordt gebruikt voor werknemersmonitoring)",
          "note": "Dubbele classificatie is mogelijk. Bijlage I-verplichtingen gelden vanaf 2 augustus 2027, terwijl Bijlage III-verplichtingen gelden vanaf 2 augustus 2026. Als beide van toepassing zijn, moet u de eerdere deadline voor Bijlage III-verplichtingen naleven."
        },
        "de": {
          "title": "Prüfung der Doppelklassifizierung: auch Anhang III?",
          "body": "Ihr System ist als Hochrisiko-System gemäß Anhang I (Produktsicherheitsweg) eingestuft. Es kann jedoch AUCH unter einen der Hochrisiko-Anwendungsbereiche des Anhangs III fallen, was Ihre Pflichten und Ihren Compliance-Zeitplan beeinflussen könnte.\n\nFällt Ihr KI-System auch unter einen der Anhang-III-Bereiche?\n(z. B. ein KI-Medizinprodukt, das auch die Kreditwürdigkeit bewertet, oder eine KI-Maschine zur Überwachung von Arbeitnehmern)",
          "note": "Eine Doppelklassifizierung ist möglich. Anhang-I-Pflichten gelten ab dem 2. August 2027, während Anhang-III-Pflichten ab dem 2. August 2026 gelten. Wenn beide zutreffen, müssen Sie die frühere Frist für die Anhang-III-Pflichten einhalten."
        },
        "es": {
          "title": "Verificación de doble clasificación: ¿también Anexo III?",
          "body": "Su sistema está clasificado como de alto riesgo según el Anexo I (vía de seguridad de productos). Sin embargo, también PUEDE estar incluido en una de las áreas de aplicación de alto riesgo del Anexo III, lo que podría afectar sus obligaciones y plazos de cumplimiento.\n\n¿Su sistema de IA también está incluido en alguna de las áreas del Anexo III?\n(ej.: un producto sanitario con IA que también evalúa la solvencia, o una máquina con IA utilizada para la supervisión de trabajadores)",
          "note": "La doble clasificación es posible. Las obligaciones del Anexo I se aplican a partir del 2 de agosto de 2027, mientras que las del Anexo III se aplican a partir del 2 de agosto de 2026. Si ambas se aplican, debe cumplir con el plazo más temprano para las obligaciones del Anexo III."
        }
      }
    },
    "q_sector": {
      "id": "q_sector",
      "type": "question",
      "stage": "S8",
      "title": "Your Sector",
      "body": "Which sector does your AI system primarily operate in? This helps us provide sector-specific guidance.",
      "legal_ref": "Annex III, sector-specific guidance",
      "attribute": "sector",
      "options": [
        {
          "label": "Healthcare / Medical devices",
          "value": "healthcare_medical_devices",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Santé / Dispositifs médicaux"
            },
            "nl": {
              "label": "Gezondheidszorg / Medische hulpmiddelen"
            },
            "de": {
              "label": "Gesundheitswesen / Medizinprodukte"
            },
            "es": {
              "label": "Salud / Productos sanitarios"
            }
          }
        },
        {
          "label": "Banking / Payments / Credit",
          "value": "financial_services",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Banque / Paiements / Crédit"
            },
            "nl": {
              "label": "Bankwezen / Betalingen / Krediet"
            },
            "de": {
              "label": "Banken / Zahlungsverkehr / Kredit"
            },
            "es": {
              "label": "Banca / Pagos / Crédito"
            }
          }
        },
        {
          "label": "Insurance",
          "value": "insurance",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Assurance"
            },
            "nl": {
              "label": "Verzekeringen"
            },
            "de": {
              "label": "Versicherung"
            },
            "es": {
              "label": "Seguros"
            }
          }
        },
        {
          "label": "Energy / Critical infrastructure",
          "value": "energy",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Énergie / Infrastructures critiques"
            },
            "nl": {
              "label": "Energie / Kritieke infrastructuur"
            },
            "de": {
              "label": "Energie / Kritische Infrastruktur"
            },
            "es": {
              "label": "Energía / Infraestructuras críticas"
            }
          }
        },
        {
          "label": "Transport / Automotive / Aviation",
          "value": "transport",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Transport / Automobile / Aviation"
            },
            "nl": {
              "label": "Vervoer / Auto-industrie / Luchtvaart"
            },
            "de": {
              "label": "Verkehr / Automobil / Luftfahrt"
            },
            "es": {
              "label": "Transporte / Automoción / Aviación"
            }
          }
        },
        {
          "label": "Education / Training",
          "value": "education",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Éducation / Formation"
            },
            "nl": {
              "label": "Onderwijs / Opleiding"
            },
            "de": {
              "label": "Bildung / Ausbildung"
            },
            "es": {
              "label": "Educación / Formación"
            }
          }
        },
        {
          "label": "Employment / HR",
          "value": "employment_hr",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Emploi / RH"
            },
            "nl": {
              "label": "Werkgelegenheid / HR"
            },
            "de": {
              "label": "Beschäftigung / Personalwesen"
            },
            "es": {
              "label": "Empleo / RRHH"
            }
          }
        },
        {
          "label": "Law enforcement / Security",
          "value": "law_enforcement",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Répression / Sécurité"
            },
            "nl": {
              "label": "Rechtshandhaving / Beveiliging"
            },
            "de": {
              "label": "Strafverfolgung / Sicherheit"
            },
            "es": {
              "label": "Aplicación de la ley / Seguridad"
            }
          }
        },
        {
          "label": "Migration / Border control",
          "value": "migration_border",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Migration / Contrôle aux frontières"
            },
            "nl": {
              "label": "Migratie / Grenscontrole"
            },
            "de": {
              "label": "Migration / Grenzkontrolle"
            },
            "es": {
              "label": "Migración / Control fronterizo"
            }
          }
        },
        {
          "label": "Justice / Legal",
          "value": "justice",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Justice / Juridique"
            },
            "nl": {
              "label": "Justitie / Juridisch"
            },
            "de": {
              "label": "Justiz / Recht"
            },
            "es": {
              "label": "Justicia / Jurídico"
            }
          }
        },
        {
          "label": "Public services / Government",
          "value": "public_services",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Services publics / Administration"
            },
            "nl": {
              "label": "Overheidsdiensten / Overheid"
            },
            "de": {
              "label": "Öffentliche Dienste / Verwaltung"
            },
            "es": {
              "label": "Servicios públicos / Administración"
            }
          }
        },
        {
          "label": "Other / Cross-sector",
          "value": "other",
          "next": "q_transparency_chatbot",
          "translations": {
            "fr": {
              "label": "Autre / Intersectoriel"
            },
            "nl": {
              "label": "Anders / Sectoroverschrijdend"
            },
            "de": {
              "label": "Sonstiges / Branchenübergreifend"
            },
            "es": {
              "label": "Otro / Intersectorial"
            }
          }
        }
      ],
      "previous": [
        "q_high_risk_annex_iii_area",
        "q_annex_iii_biometrics",
        "q_annex_iii_critical_infra",
        "q_annex_iii_education",
        "q_annex_iii_employment",
        "q_annex_iii_essential_services",
        "q_annex_iii_law_enforcement",
        "q_annex_iii_migration",
        "q_annex_iii_justice",
        "q_art6_3_profiling",
        "q_art6_3_filter",
        "q_annex_i_also_annex_iii"
      ],
      "translations": {
        "fr": {
          "title": "Votre secteur",
          "body": "Quel secteur votre système d'IA opère-t-il principalement ? Cela nous aide à fournir des recommandations sectorielles spécifiques."
        },
        "nl": {
          "title": "Uw sector",
          "body": "In welke sector wordt uw AI-systeem voornamelijk ingezet? Dit helpt ons sectorspecifieke begeleiding te bieden."
        },
        "de": {
          "title": "Ihre Branche",
          "body": "In welcher Branche wird Ihr KI-System hauptsächlich eingesetzt? Dies hilft uns, branchenspezifische Hinweise zu geben."
        },
        "es": {
          "title": "Su sector",
          "body": "¿En qué sector opera principalmente su sistema de IA? Esto nos ayuda a proporcionar orientación específica del sector."
        }
      }
    },
    "q_art25_importer_distributor": {
      "id": "q_art25_importer_distributor",
      "type": "question",
      "stage": "S11",
      "title": "Importer/Distributor as Provider Check",
      "body": "As an importer or distributor, do any of the following apply?\n\n(a) You put your **own name or trademark** on a high-risk AI system already on the market\n(b) You make a **substantial modification** to a high-risk AI system\n(c) You **change the intended purpose** of an AI system so that it becomes high-risk",
      "legal_ref": "Art. 25(1)",
      "note": "Art. 25(1) applies to 'any distributor, importer, deployer or other third-party'. If any condition is met, you assume PROVIDER obligations.",
      "options": [
        {
          "label": "(a) Yes — I put my own name/trademark on it",
          "value": "own_name",
          "set": {
            "deployer_becomes_provider": true,
            "deployer_becomes_provider_reason": "own_name_trademark",
            "role": "provider"
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "(a) Oui — J'appose mon propre nom/ma propre marque"
            },
            "nl": {
              "label": "(a) Ja — Ik plaats mijn eigen naam/merk erop"
            },
            "de": {
              "label": "(a) Ja — Ich versehe es mit meinem eigenen Namen/meiner eigenen Marke"
            },
            "es": {
              "label": "(a) Sí — Pongo mi propio nombre/marca"
            }
          }
        },
        {
          "label": "(b) Yes — I made a substantial modification",
          "value": "substantial_mod",
          "set": {
            "deployer_becomes_provider": true,
            "deployer_becomes_provider_reason": "substantial_modification",
            "role": "provider"
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "(b) Oui — J'ai apporté une modification substantielle"
            },
            "nl": {
              "label": "(b) Ja — Ik heb een substantiële wijziging aangebracht"
            },
            "de": {
              "label": "(b) Ja — Ich habe eine wesentliche Veränderung vorgenommen"
            },
            "es": {
              "label": "(b) Sí — He realizado una modificación sustancial"
            }
          }
        },
        {
          "label": "(c) Yes — I repurposed it for a high-risk use",
          "value": "repurpose",
          "set": {
            "deployer_becomes_provider": true,
            "deployer_becomes_provider_reason": "repurpose_to_high_risk",
            "role": "provider",
            "risk_category": "high_risk_annex_iii"
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "(c) Oui — Je l'ai réaffecté à un usage à haut risque"
            },
            "nl": {
              "label": "(c) Ja — Ik heb het herbestemd voor hoog-risicotoepassing"
            },
            "de": {
              "label": "(c) Ja — Ich habe es für einen Hochrisiko-Einsatz umgewidmet"
            },
            "es": {
              "label": "(c) Sí — Lo he reasignado a un uso de alto riesgo"
            }
          }
        },
        {
          "label": "No — none of the above apply",
          "value": "no",
          "set": {
            "deployer_becomes_provider": false
          },
          "next": "q_role_result_router",
          "translations": {
            "fr": {
              "label": "Non — aucune des situations ci-dessus ne s'applique"
            },
            "nl": {
              "label": "Nee — geen van bovenstaande situaties is van toepassing"
            },
            "de": {
              "label": "Nein — keine der oben genannten Situationen trifft zu"
            },
            "es": {
              "label": "No — ninguna de las situaciones anteriores se aplica"
            }
          }
        }
      ],
      "previous": "q_role",
      "translations": {
        "fr": {
          "title": "Vérification de l'importateur/distributeur en tant que fournisseur",
          "body": "En tant qu'importateur ou distributeur, l'une des situations suivantes s'applique-t-elle ?\n\n(a) Vous apposez votre **propre nom ou marque** sur un système d'IA à haut risque déjà sur le marché\n(b) Vous apportez une **modification substantielle** à un système d'IA à haut risque\n(c) Vous **modifiez la finalité** d'un système d'IA de sorte qu'il devient à haut risque",
          "note": "L'art. 25(1) s'applique à « tout distributeur, importateur, déployeur ou autre tiers ». Si l'une des conditions est remplie, vous assumez les obligations du FOURNISSEUR."
        },
        "nl": {
          "title": "Controle: importeur/distributeur als aanbieder",
          "body": "Is als importeur of distributeur een van de volgende situaties van toepassing?\n\n(a) U plaatst uw **eigen naam of merk** op een hoog-risico-AI-systeem dat al op de markt is\n(b) U brengt een **substantiële wijziging** aan in een hoog-risico-AI-systeem\n(c) U **wijzigt het beoogde doel** van een AI-systeem zodat het hoog risico wordt",
          "note": "Art. 25(1) is van toepassing op 'elke distributeur, importeur, gebruiksverantwoordelijke of andere derde partij'. Als aan een van de voorwaarden is voldaan, neemt u de verplichtingen van de AANBIEDER over."
        },
        "de": {
          "title": "Prüfung: Einführer/Händler als Anbieter",
          "body": "Trifft als Einführer oder Händler eine der folgenden Situationen zu?\n\n(a) Sie versehen ein bereits auf dem Markt befindliches Hochrisiko-KI-System mit Ihrem **eigenen Namen oder Ihrer eigenen Marke**\n(b) Sie nehmen eine **wesentliche Veränderung** an einem Hochrisiko-KI-System vor\n(c) Sie **ändern die Zweckbestimmung** eines KI-Systems, sodass es zu einem Hochrisiko-System wird",
          "note": "Art. 25(1) gilt für „jeden Händler, Einführer, Betreiber oder sonstigen Dritten“. Wenn eine der Bedingungen erfüllt ist, übernehmen Sie die Pflichten des ANBIETERS."
        },
        "es": {
          "title": "Verificación: importador/distribuidor como proveedor",
          "body": "Como importador o distribuidor, ¿se aplica alguna de las siguientes situaciones?\n\n(a) Usted pone su **propio nombre o marca** en un sistema de IA de alto riesgo ya comercializado\n(b) Usted realiza una **modificación sustancial** en un sistema de IA de alto riesgo\n(c) Usted **modifica la finalidad** de un sistema de IA de modo que se convierte en alto riesgo",
          "note": "El art. 25(1) se aplica a «todo distribuidor, importador, responsable del despliegue u otro tercero». Si se cumple alguna de las condiciones, usted asume las obligaciones del PROVEEDOR."
        }
      }
    },
    "result_high_risk_gpai": {
      "id": "result_high_risk_gpai",
      "type": "result",
      "verdict": "HIGH_RISK_AND_GPAI",
      "verdict_class": "high-risk",
      "title": "HIGH-RISK AI System + GPAI Model Provider",
      "summary": "Your AI system is classified as HIGH-RISK under the EU AI Act, AND you are also a provider of a general-purpose AI model. Both sets of obligations apply cumulatively.",
      "legal_ref": "Art. 6 + Art. 51-56",
      "effective_date_high_risk_annex_iii": "2 August 2026",
      "effective_date_high_risk_annex_i": "2 August 2027",
      "effective_date_gpai": "2 August 2025",
      "penalty_high_risk": "Up to EUR 15 million or 3% of worldwide annual turnover",
      "penalty_gpai": "Up to EUR 15 million or 3% of worldwide annual turnover (Commission-imposed)",
      "penalty_prohibited": "Up to EUR 35 million or 7% of worldwide annual turnover",
      "systemic_risk_threshold": "10^25 FLOPs cumulative training compute (Art. 51(2))",
      "note": "GPAI obligations apply from 2 Aug 2025 (already in effect). High-risk obligations have their own timeline.",
      "cross_reference_high_risk": "Full high-risk obligations are listed above. For additional detail, see Art. 9-18, Art. 26, Art. 43-49, Art. 72-73.",
      "cross_reference_gpai": "Full GPAI obligations are listed above. For additional detail, see Art. 53-56, Annex XI-XII.",
      "note_delegated_acts": {
        "title": "Delegated Acts — High-Risk List Updates",
        "ref": "Art. 7(1)",
        "summary": "The European Commission is empowered to adopt delegated acts to update the list of high-risk AI systems in Annex III by adding or modifying use cases. The Commission must take into account criteria including the intended purpose, severity of harm, degree of autonomy, and number of affected persons. Monitor updates to Annex III as they may affect your system's classification."
      },
      "ai_literacy": {
        "title": "AI Literacy",
        "ref": "Art. 4",
        "date": "2 February 2025",
        "summary": "Ensure sufficient AI literacy of staff dealing with AI systems — applies to ALL operators"
      },
      "obligations_provider_high_risk": [
        {
          "id": "HR-P1",
          "title": "Risk Management System",
          "ref": "Art. 9",
          "summary": "Establish and maintain a risk management system throughout the AI system's lifecycle"
        },
        {
          "id": "HR-P2",
          "title": "Data & Data Governance",
          "ref": "Art. 10",
          "summary": "Ensure training, validation, and testing datasets meet quality criteria"
        },
        {
          "id": "HR-P3",
          "title": "Technical Documentation",
          "ref": "Art. 11, Annex IV",
          "summary": "Draw up and maintain technical documentation demonstrating compliance"
        },
        {
          "id": "HR-P4",
          "title": "Record-Keeping (Logging)",
          "ref": "Art. 12",
          "summary": "Enable automatic recording of events (logs) for traceability"
        },
        {
          "id": "HR-P5",
          "title": "Transparency & Instructions for Use",
          "ref": "Art. 13",
          "summary": "Design for transparency; provide instructions for use to deployers"
        },
        {
          "id": "HR-P6",
          "title": "Human Oversight",
          "ref": "Art. 14",
          "summary": "Design for effective human oversight during use"
        },
        {
          "id": "HR-P7",
          "title": "Accuracy, Robustness & Cybersecurity",
          "ref": "Art. 15",
          "summary": "Achieve appropriate levels of accuracy, robustness, and cybersecurity"
        },
        {
          "id": "HR-P8",
          "title": "Quality Management System",
          "ref": "Art. 17",
          "summary": "Put in place a quality management system"
        },
        {
          "id": "HR-P9",
          "title": "Conformity Assessment",
          "ref": "Art. 43, Annex VI/VII",
          "summary": "Undergo conformity assessment before placing on market"
        },
        {
          "id": "HR-P10",
          "title": "EU Declaration of Conformity",
          "ref": "Art. 47, Annex V",
          "summary": "Draw up EU declaration of conformity"
        },
        {
          "id": "HR-P11",
          "title": "CE Marking",
          "ref": "Art. 48",
          "summary": "Affix CE marking"
        },
        {
          "id": "HR-P12",
          "title": "EU Database Registration",
          "ref": "Art. 49, Annex VIII",
          "summary": "Register in the EU database before placing on market"
        },
        {
          "id": "HR-P13",
          "title": "Post-Market Monitoring",
          "ref": "Art. 72",
          "summary": "Establish post-market monitoring system"
        },
        {
          "id": "HR-P14",
          "title": "Serious Incident Reporting",
          "ref": "Art. 73",
          "summary": "Report serious incidents to market surveillance authorities"
        },
        {
          "id": "HR-P15",
          "title": "Documentation Retention",
          "ref": "Art. 18",
          "summary": "Keep documentation for 10 years after last system on market"
        },
        {
          "id": "P-ACC",
          "title": "Accessibility Requirements",
          "ref": "Art. 16(j)",
          "summary": "Ensure the AI system complies with accessibility requirements in accordance with Directives (EU) 2016/2102 and (EU) 2019/882, where applicable."
        }
      ],
      "obligations_deployer_high_risk": [
        {
          "id": "HR-D1",
          "title": "Use per Instructions",
          "ref": "Art. 26(1)",
          "summary": "Use the system in accordance with provider's instructions"
        },
        {
          "id": "HR-D2",
          "title": "Human Oversight",
          "ref": "Art. 26(2)",
          "summary": "Assign competent, trained natural persons for human oversight"
        },
        {
          "id": "HR-D3",
          "title": "Input Data Quality",
          "ref": "Art. 26(4)",
          "summary": "Ensure input data is relevant and representative for intended purpose"
        },
        {
          "id": "HR-D4",
          "title": "Monitor Operation",
          "ref": "Art. 26(5)",
          "summary": "Monitor operation and inform provider/distributor of risks"
        },
        {
          "id": "HR-D5",
          "title": "Log Retention",
          "ref": "Art. 26(6)",
          "summary": "Keep automatically generated logs for minimum 6 months"
        },
        {
          "id": "HR-D6",
          "title": "Worker Information",
          "ref": "Art. 26(7)",
          "summary": "Inform workers' representatives and affected workers before deployment"
        },
        {
          "id": "HR-D7",
          "title": "Inform Affected Persons",
          "ref": "Art. 26(8)",
          "summary": "Inform natural persons subject to decisions by the AI system"
        },
        {
          "id": "HR-D8",
          "title": "FRIA (if public authority)",
          "ref": "Art. 27",
          "summary": "Conduct fundamental rights impact assessment before deployment (public authorities and certain private deployers). Note: Art. 27(10) excludes high-risk AI systems used in the area of critical infrastructure (Annex III, point 2) from the FRIA obligation."
        },
        {
          "id": "HR-D9",
          "title": "EU Database Registration (if public authority)",
          "ref": "Art. 49",
          "summary": "Register use in the EU database (public authorities)"
        },
        {
          "id": "D-DPIA",
          "title": "Data Protection Impact Assessment",
          "ref": "Art. 26(10)",
          "summary": "Carry out a DPIA under Regulation (EU) 2016/679 or Directive (EU) 2016/680, using the output of the FRIA as input where applicable."
        }
      ],
      "obligations_all_gpai": [
        {
          "id": "GPAI-1",
          "title": "Technical Documentation",
          "ref": "Art. 53(1)(a), Annex XI",
          "summary": "Draw up and maintain technical documentation of the model"
        },
        {
          "id": "GPAI-2",
          "title": "Downstream Provider Information",
          "ref": "Art. 53(1)(b), Annex XII",
          "summary": "Provide information and documentation to downstream AI system providers"
        },
        {
          "id": "GPAI-3",
          "title": "Copyright Compliance Policy",
          "ref": "Art. 53(1)(c)",
          "summary": "Put in place a policy to comply with EU copyright law"
        },
        {
          "id": "GPAI-4",
          "title": "Training Data Summary",
          "ref": "Art. 53(1)(d)",
          "summary": "Publish a sufficiently detailed summary of training content"
        }
      ],
      "obligations_systemic_risk": [
        {
          "id": "GPAI-SR1",
          "title": "Model Evaluation",
          "ref": "Art. 55(1)(a)",
          "summary": "Perform model evaluation including adversarial testing"
        },
        {
          "id": "GPAI-SR2",
          "title": "Systemic Risk Assessment & Mitigation",
          "ref": "Art. 55(1)(b)",
          "summary": "Assess and mitigate possible systemic risks"
        },
        {
          "id": "GPAI-SR3",
          "title": "Serious Incident Reporting",
          "ref": "Art. 55(1)(c)",
          "summary": "Track and report serious incidents to the AI Office and national authorities"
        },
        {
          "id": "GPAI-SR4",
          "title": "Cybersecurity Protection",
          "ref": "Art. 55(1)(d)",
          "summary": "Ensure adequate level of cybersecurity protection"
        }
      ],
      "obligations_importer": [
        {
          "id": "HR-I1",
          "title": "Verify Compliance",
          "ref": "Art. 23",
          "summary": "Verify conformity assessment, CE marking, documentation, and provider obligations before placing on market"
        }
      ],
      "obligations_distributor": [
        {
          "id": "HR-Dist1",
          "title": "Verify Compliance",
          "ref": "Art. 24",
          "summary": "Verify CE marking, documentation; ensure storage/transport conditions don't jeopardise compliance"
        }
      ],
      "obligations_open_source_exempt": [
        "GPAI-3",
        "GPAI-4"
      ],
      "obligations_open_source_exempt_note": "Open-source GPAI models (Art. 53(2)) are exempt from GPAI-1 and GPAI-2, but must still comply with GPAI-3 and GPAI-4. This exemption does NOT apply to GPAI models with systemic risk.",
      "obligations_notification": {
        "id": "GPAI-NOTIFY",
        "title": "Commission Notification (Systemic Risk)",
        "ref": "Art. 52",
        "summary": "Providers of GPAI models with systemic risk must notify the European Commission within two weeks of meeting the systemic risk criteria (e.g., reaching the 10^25 FLOPs training compute threshold). This notification obligation applies specifically to GPAI models classified as posing systemic risk, not to all GPAI providers. The notification concerns classification as a systemic risk model."
      },
      "obligations_transparency_cumulative": {
        "note": "Art. 50 transparency obligations apply CUMULATIVELY with high-risk requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
        "possible_obligations": [
          {
            "id": "HR-T1",
            "title": "AI Interaction Disclosure",
            "ref": "Art. 50(1)",
            "condition": "chatbot_disclosure",
            "who": "Provider",
            "summary": "Inform persons they are interacting with AI"
          },
          {
            "id": "HR-T2",
            "title": "Synthetic Content Marking",
            "ref": "Art. 50(2)",
            "condition": "synthetic_content_marking",
            "who": "Provider",
            "summary": "Mark outputs in machine-readable format as AI-generated"
          },
          {
            "id": "HR-T3",
            "title": "Emotion/Biometric Disclosure",
            "ref": "Art. 50(3)",
            "condition": "emotion_biometric_disclosure",
            "who": "Deployer",
            "summary": "Inform persons exposed to emotion recognition or biometric categorisation"
          },
          {
            "id": "HR-T4",
            "title": "Deep Fake Disclosure",
            "ref": "Art. 50(4)",
            "condition": "deepfake_disclosure",
            "who": "Deployer",
            "summary": "Disclose that content is AI-generated or manipulated"
          },
          {
            "id": "HR-T5",
            "title": "AI-Generated Text Disclosure",
            "ref": "Art. 50(4)",
            "condition": "ai_generated_text_disclosure",
            "who": "Deployer",
            "summary": "Disclose AI-generated text on public interest matters"
          }
        ]
      },
      "timeline": {
        "annex_iii_systems": "Obligations apply from 2 August 2026",
        "annex_i_systems": "Obligations apply from 2 August 2027",
        "existing_systems": "Systems on market before applicable date: only if significant design changes after that date",
        "large_scale_it": "Large-scale IT systems (Annex X): compliance by 31 December 2030",
        "public_authority_existing_systems": "Public authority deployers of existing high-risk AI systems must comply by 2 August 2030 (Art. 111(2))"
      },
      "recommendations": [
        "GPAI models already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
        "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
      ],
      "sector_specific_guidance": {
        "healthcare_medical_devices": {
          "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
          "documents": [
            "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformity assessment"
          ],
          "key_points": [
            "Medical device AI requiring third-party CA under MDR/IVDR is high-risk under Art. 6(1)",
            "A single notified body can handle both MDR and AI Act conformity assessment",
            "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
          ]
        },
        "financial_services": {
          "title": "Sector-Specific Guidance: Banking / Financial Services",
          "documents": [
            "E1: AI Act implications for banking/payments (EBA, 2025)",
            "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
            "E5: AI Act overview for financial services (Eurofi, 2024)"
          ],
          "key_points": [
            "Credit scoring / creditworthiness AI is high-risk under Annex III, point 5(b)",
            "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
            "Fraud detection AI is explicitly excluded from the credit scoring high-risk category"
          ]
        },
        "insurance": {
          "title": "Sector-Specific Guidance: Insurance",
          "documents": [
            "E4: Insurance AI governance opinion (EIOPA, 2025)"
          ],
          "key_points": [
            "Life/health insurance risk assessment and pricing AI is high-risk under Annex III, point 5(c)",
            "EIOPA recommends risk-based governance for ALL insurance AI, including non-high-risk",
            "Fairness metrics required: demographic parity, equalized odds, predictive parity"
          ]
        },
        "energy": {
          "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
          "documents": [
            "F1: Energy sector high-risk classification consultation (Eurelectric, 2025)",
            "F2: AI and the energy sector briefing (EPRS, 2025)"
          ],
          "key_points": [
            "'Safety component' should be limited to systems that directly impact infrastructure safety",
            "Predictive maintenance typically NOT high-risk (enhances existing safety, doesn't replace it)",
            "Smart grid management may be high-risk if it has direct safety impact — case-by-case"
          ]
        }
      },
      "fria_guidance": {
        "title": "FRIA Guidance",
        "documents": [
          "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
        ],
        "who_must_conduct": [
          "Public authorities / bodies governed by public law",
          "Private entities providing essential public services",
          "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
        ],
        "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
      },
      "previous": "q_role_result_router",
      "translations": {
        "fr": {
          "title": "Système d’IA à HAUT RISQUE + Fournisseur de modèle GPAI",
          "summary": "Votre système d’IA est classé à HAUT RISQUE au titre du règlement européen sur l’IA, ET vous êtes également fournisseur d’un modèle d’IA à usage général. Les deux ensembles d’obligations s’appliquent de manière cumulative.",
          "note": "Les obligations GPAI s’appliquent à compter du 2 août 2025 (déjà en vigueur). Les obligations relatives aux systèmes à haut risque ont leur propre calendrier.",
          "verdict": "HAUT RISQUE + GPAI",
          "recommendations": [
            "modèle GPAIs already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_high_risk": "Up to EUR 15 million or 3% of worldwide annual turnover",
          "penalty_prohibited": "Up to EUR 35 million or 7% of worldwide annual turnover",
          "penalty_gpai": "Up to EUR 15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date_gpai": "2 août 2025",
          "effective_date_high_risk_annex_i": "2 août 2027",
          "effective_date_high_risk_annex_iii": "2 août 2026",
          "ai_literacy": {
            "title": "Maîtrise de l'IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 février 2025",
            "summary": "Tous les opérateurs doivent veiller à ce que le personnel travaillant avec des systèmes d'IA dispose d'un niveau suffisant de maîtrise de l'IA. Cela s'applique depuis le 2 février 2025."
          },
          "note_delegated_acts": {
            "title": "Actes délégués — Mises à jour de la liste des systèmes à haut risque",
            "ref": "Art. 7(1)",
            "summary": "La Commission européenne est habilitée à adopter des actes délégués pour mettre à jour la liste des systèmes d'IA à haut risque de l'annexe III en ajoutant ou modifiant des cas d'utilisation. Surveillez les mises à jour car elles peuvent affecter la classification de votre système."
          },
          "obligations_provider_high_risk": [
            {
              "id": "HR-P1",
              "title": "Système de gestion des risques",
              "ref": "Art. 9",
              "summary": "Établir et maintenir un système de gestion des risques tout au long du cycle de vie du système d'IA"
            },
            {
              "id": "HR-P2",
              "title": "Gouvernance des données",
              "ref": "Art. 10",
              "summary": "Veiller à ce que les données d'entraînement, de validation et de test respectent les critères de qualité"
            },
            {
              "id": "HR-P3",
              "title": "Documentation technique",
              "ref": "Art. 11, Annex IV",
              "summary": "Rédiger la documentation technique avant la mise sur le marché et la tenir à jour"
            },
            {
              "id": "HR-P4",
              "title": "Journalisation automatique",
              "ref": "Art. 12",
              "summary": "Veiller à ce que le système d'IA dispose de capacités de journalisation automatique"
            },
            {
              "id": "HR-P5",
              "title": "Transparence et information",
              "ref": "Art. 13",
              "summary": "S'assurer que le système est transparent et fournir des instructions d'utilisation aux déployeurs"
            },
            {
              "id": "HR-P6",
              "title": "Contrôle humain",
              "ref": "Art. 14",
              "summary": "Concevoir le système de manière à permettre un contrôle humain effectif"
            },
            {
              "id": "HR-P7",
              "title": "Exactitude, robustesse et cybersécurité",
              "ref": "Art. 15",
              "summary": "Concevoir et développer le système pour atteindre des niveaux appropriés d'exactitude, de robustesse et de cybersécurité"
            },
            {
              "id": "HR-P8",
              "title": "Système de gestion de la qualité",
              "ref": "Art. 17",
              "summary": "Mettre en place un système de gestion de la qualité"
            },
            {
              "id": "HR-P9",
              "title": "Évaluation de conformité",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "Effectuer l'évaluation de conformité applicable avant la mise sur le marché"
            },
            {
              "id": "HR-P10",
              "title": "Déclaration de conformité UE",
              "ref": "Art. 47, Annex V",
              "summary": "Rédiger la déclaration de conformité UE et apposer le marquage CE"
            },
            {
              "id": "HR-P11",
              "title": "Enregistrement dans la base de données de l'UE",
              "ref": "Art. 48",
              "summary": "Enregistrer le système dans la base de données de l'UE avant la mise sur le marché"
            },
            {
              "id": "HR-P12",
              "title": "Mesures correctives et rappel",
              "ref": "Art. 49, Annex VIII",
              "summary": "Prendre des mesures correctives, informer les déployeurs et les autorités de tout non-conformité"
            },
            {
              "id": "HR-P13",
              "title": "Coopération avec les autorités",
              "ref": "Art. 72",
              "summary": "Coopérer avec les autorités nationales compétentes et fournir les informations demandées"
            },
            {
              "id": "HR-P14",
              "title": "Accessibilité",
              "ref": "Art. 73",
              "summary": "Respecter les exigences d'accessibilité conformément aux directives applicables"
            },
            {
              "id": "HR-P15",
              "title": "Surveillance après commercialisation",
              "ref": "Art. 18",
              "summary": "Mettre en place un système de surveillance après commercialisation proportionné"
            },
            {
              "id": "P-ACC",
              "title": "Accessibilité (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Veiller à ce que le système d'IA respecte les exigences d'accessibilité"
            }
          ],
          "obligations_deployer_high_risk": [
            {
              "id": "HR-D1",
              "title": "Utilisation conforme aux instructions",
              "ref": "Art. 26(1)",
              "summary": "Utiliser le système conformément aux instructions d'utilisation du fournisseur"
            },
            {
              "id": "HR-D2",
              "title": "Contrôle humain",
              "ref": "Art. 26(2)",
              "summary": "S'assurer que les personnes chargées du contrôle humain sont compétentes et habilitées"
            },
            {
              "id": "HR-D3",
              "title": "Pertinence des données d'entrée",
              "ref": "Art. 26(4)",
              "summary": "Veiller à la pertinence des données d'entrée au regard de la finalité du système"
            },
            {
              "id": "HR-D4",
              "title": "Surveillance du fonctionnement",
              "ref": "Art. 26(5)",
              "summary": "Surveiller le fonctionnement du système sur la base des instructions d'utilisation"
            },
            {
              "id": "HR-D5",
              "title": "Conservation des journaux",
              "ref": "Art. 26(6)",
              "summary": "Conserver les journaux générés automatiquement dans la mesure où ils sont sous leur contrôle"
            },
            {
              "id": "HR-D6",
              "title": "Information des travailleurs",
              "ref": "Art. 26(7)",
              "summary": "Informer les représentants des travailleurs et les travailleurs concernés de l'utilisation du système"
            },
            {
              "id": "HR-D7",
              "title": "Coopération avec les autorités",
              "ref": "Art. 26(8)",
              "summary": "Coopérer avec les autorités nationales compétentes"
            },
            {
              "id": "HR-D8",
              "title": "Analyse d'impact sur les droits fondamentaux (FRIA)",
              "ref": "Art. 27",
              "summary": "Réaliser une analyse d'impact sur les droits fondamentaux avant le déploiement (Art. 27). Remarque : l'Art. 27(10) exclut les systèmes de l'annexe III, point 2 (infrastructures critiques) de cette obligation."
            },
            {
              "id": "HR-D9",
              "title": "Enregistrement d'utilisation dans la base de données de l'UE",
              "ref": "Art. 49",
              "summary": "Enregistrer l'utilisation du système dans la base de données de l'UE"
            },
            {
              "id": "D-DPIA",
              "title": "Analyse d'impact relative à la protection des données (AIPD)",
              "ref": "Art. 26(10)",
              "summary": "Réaliser une AIPD conformément au règlement (UE) 2016/679 ou à la directive (UE) 2016/680"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Vérifier la conformité",
              "ref": "Art. 23",
              "summary": "Vérifier que le fournisseur a effectué l'évaluation de conformité et établi la documentation technique"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Vérifier la conformité",
              "ref": "Art. 24",
              "summary": "Vérifier que le système porte le marquage CE et est accompagné de la documentation requise"
            }
          ],
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Documentation technique",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "Rédiger et tenir à jour la documentation technique du modèle GPAI"
            },
            {
              "id": "GPAI-2",
              "title": "Information aux fournisseurs en aval",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Fournir des informations et de la documentation aux fournisseurs de systèmes d'IA en aval"
            },
            {
              "id": "GPAI-3",
              "title": "Politique de respect du droit d'auteur",
              "ref": "Art. 53(1)(c)",
              "summary": "Mettre en place une politique de conformité au droit d'auteur de l'UE"
            },
            {
              "id": "GPAI-4",
              "title": "Résumé des données d'entraînement",
              "ref": "Art. 53(1)(d)",
              "summary": "Rédiger et publier un résumé suffisamment détaillé des données d'entraînement"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Évaluation du modèle",
              "ref": "Art. 55(1)(a)",
              "summary": "Effectuer des évaluations du modèle, y compris la réalisation de tests adverses"
            },
            {
              "id": "GPAI-SR2",
              "title": "Évaluation et atténuation des risques systémiques",
              "ref": "Art. 55(1)(b)",
              "summary": "Évaluer et atténuer les risques systémiques possibles"
            },
            {
              "id": "GPAI-SR3",
              "title": "Signalement des incidents",
              "ref": "Art. 55(1)(c)",
              "summary": "Suivre, documenter et signaler les incidents graves à l'AI Office et aux autorités nationales"
            },
            {
              "id": "GPAI-SR4",
              "title": "Protection de cybersécurité",
              "ref": "Art. 55(1)(d)",
              "summary": "Assurer un niveau adéquat de protection en matière de cybersécurité pour le modèle GPAI"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 obligations de transparence apply CUMULATIVELY with à haut risque requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "Divulgation d'interaction avec l'IA",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Fournisseur",
                "summary": "Concevoir le système pour informer les personnes qu'elles interagissent avec une IA (sauf si c'est évident du contexte)"
              },
              {
                "id": "HR-T2",
                "title": "Marquage du contenu synthétique",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Fournisseur",
                "summary": "Marquer les sorties de contenu synthétique (images, audio, vidéo) dans un format lisible par machine"
              },
              {
                "id": "HR-T3",
                "title": "Divulgation de reconnaissance d'émotions / biométrique",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Déployeur",
                "summary": "Informer les personnes de l'utilisation de systèmes de reconnaissance d'émotions ou de catégorisation biométrique"
              },
              {
                "id": "HR-T4",
                "title": "Divulgation de deepfake",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Déployeur",
                "summary": "Divulguer que le contenu a été généré ou manipulé artificiellement (deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Divulgation de texte généré par IA",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Déployeur",
                "summary": "Indiquer que le texte a été généré par IA lorsqu'il est publié pour informer le public"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual évaluation de conformité"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is à haut risque under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act évaluation de conformité",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is à haut risque under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring à haut risque category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is à haut risque under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-à haut risque",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector à haut risque classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT à haut risque (enhances existing safety, doesn't replace it)",
                "Smart grid management may be à haut risque if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        },
        "nl": {
          "title": "HOOG RISICO AI-systeem + GPAI-modelaanbieder",
          "summary": "Uw AI-systeem is geclassificeerd als HOOG RISICO krachtens de EU AI-verordening, EN u bent ook aanbieder van een AI-model voor algemene doeleinden. Beide reeksen verplichtingen zijn cumulatief van toepassing.",
          "note": "GPAI-verplichtingen gelden vanaf 2 augustus 2025 (reeds van kracht). Verplichtingen voor hoog-risico systemen hebben hun eigen tijdlijn.",
          "verdict": "HOOG RISICO + GPAI",
          "recommendations": [
            "GPAI-models already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "Other EU legislation may apply alongside or independently of the AI Act, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_high_risk": "Up to EUR 15 million or 3% of worldwide annual turnover",
          "penalty_prohibited": "Up to EUR 35 million or 7% of worldwide annual turnover",
          "penalty_gpai": "Up to EUR 15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date_gpai": "2 augustus 2025",
          "effective_date_high_risk_annex_i": "2 augustus 2027",
          "effective_date_high_risk_annex_iii": "2 augustus 2026",
          "ai_literacy": {
            "title": "AI-geletterdheid (Art. 4)",
            "ref": "Art. 4",
            "date": "2 februari 2025",
            "summary": "Alle operatoren moeten ervoor zorgen dat personeel dat met AI-systemen werkt over voldoende AI-geletterdheid beschikt. Dit is van toepassing sinds 2 februari 2025."
          },
          "note_delegated_acts": {
            "title": "Gedelegeerde handelingen — Updates van de lijst van AI-systemen met een hoog risico",
            "ref": "Art. 7(1)",
            "summary": "De Europese Commissie is bevoegd om gedelegeerde handelingen vast te stellen om de lijst van AI-systemen met een hoog risico in bijlage III bij te werken door gebruikscategorieën toe te voegen of te wijzigen. Houd updates in de gaten, want deze kunnen de classificatie van uw systeem beïnvloeden."
          },
          "obligations_provider_high_risk": [
            {
              "id": "HR-P1",
              "title": "Risicobeheersysteem",
              "ref": "Art. 9",
              "summary": "Een risicobeheersysteem opzetten en onderhouden gedurende de gehele levenscyclus van het AI-systeem"
            },
            {
              "id": "HR-P2",
              "title": "Datagovernance",
              "ref": "Art. 10",
              "summary": "Ervoor zorgen dat trainings-, validatie- en testgegevens voldoen aan kwaliteitscriteria"
            },
            {
              "id": "HR-P3",
              "title": "Technische documentatie",
              "ref": "Art. 11, Annex IV",
              "summary": "Technische documentatie opstellen vóór het in de handel brengen en deze bijwerken"
            },
            {
              "id": "HR-P4",
              "title": "Automatische registratie",
              "ref": "Art. 12",
              "summary": "Ervoor zorgen dat het AI-systeem over mogelijkheden voor automatische registratie beschikt"
            },
            {
              "id": "HR-P5",
              "title": "Transparantie en informatieverstrekking",
              "ref": "Art. 13",
              "summary": "Ervoor zorgen dat het systeem transparant is en gebruiksinstructies aan gebruiksverantwoordelijken verstrekken"
            },
            {
              "id": "HR-P6",
              "title": "Menselijk toezicht",
              "ref": "Art. 14",
              "summary": "Het systeem zodanig ontwerpen dat effectief menselijk toezicht mogelijk is"
            },
            {
              "id": "HR-P7",
              "title": "Nauwkeurigheid, robuustheid en cyberveiligheid",
              "ref": "Art. 15",
              "summary": "Het systeem ontwerpen en ontwikkelen om passende niveaus van nauwkeurigheid, robuustheid en cyberveiligheid te bereiken"
            },
            {
              "id": "HR-P8",
              "title": "Kwaliteitsmanagementsysteem",
              "ref": "Art. 17",
              "summary": "Een kwaliteitsmanagementsysteem opzetten"
            },
            {
              "id": "HR-P9",
              "title": "Conformiteitsbeoordeling",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "De toepasselijke conformiteitsbeoordeling uitvoeren vóór het in de handel brengen"
            },
            {
              "id": "HR-P10",
              "title": "EU-conformiteitsverklaring",
              "ref": "Art. 47, Annex V",
              "summary": "De EU-conformiteitsverklaring opstellen en de CE-markering aanbrengen"
            },
            {
              "id": "HR-P11",
              "title": "Registratie in de EU-databank",
              "ref": "Art. 48",
              "summary": "Het systeem registreren in de EU-databank vóór het in de handel brengen"
            },
            {
              "id": "HR-P12",
              "title": "Corrigerende maatregelen en terugroeping",
              "ref": "Art. 49, Annex VIII",
              "summary": "Corrigerende maatregelen nemen, gebruiksverantwoordelijken en autoriteiten informeren over non-conformiteit"
            },
            {
              "id": "HR-P13",
              "title": "Samenwerking met autoriteiten",
              "ref": "Art. 72",
              "summary": "Samenwerken met de bevoegde nationale autoriteiten en gevraagde informatie verstrekken"
            },
            {
              "id": "HR-P14",
              "title": "Toegankelijkheid",
              "ref": "Art. 73",
              "summary": "Voldoen aan de toegankelijkheidsvereisten overeenkomstig de toepasselijke richtlijnen"
            },
            {
              "id": "HR-P15",
              "title": "Monitoring na het in de handel brengen",
              "ref": "Art. 18",
              "summary": "Een evenredig monitoringsysteem na het in de handel brengen opzetten"
            },
            {
              "id": "P-ACC",
              "title": "Toegankelijkheid (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Ervoor zorgen dat het AI-systeem voldoet aan de toegankelijkheidsvereisten"
            }
          ],
          "obligations_deployer_high_risk": [
            {
              "id": "HR-D1",
              "title": "Gebruik volgens instructies",
              "ref": "Art. 26(1)",
              "summary": "Het systeem gebruiken in overeenstemming met de gebruiksinstructies van de aanbieder"
            },
            {
              "id": "HR-D2",
              "title": "Menselijk toezicht",
              "ref": "Art. 26(2)",
              "summary": "Ervoor zorgen dat de personen belast met menselijk toezicht bekwaam en bevoegd zijn"
            },
            {
              "id": "HR-D3",
              "title": "Relevantie invoergegevens",
              "ref": "Art. 26(4)",
              "summary": "Zorgen voor de relevantie van invoergegevens met betrekking tot het beoogde doel van het systeem"
            },
            {
              "id": "HR-D4",
              "title": "Monitoring van de werking",
              "ref": "Art. 26(5)",
              "summary": "De werking van het systeem monitoren op basis van de gebruiksinstructies"
            },
            {
              "id": "HR-D5",
              "title": "Bewaring van logbestanden",
              "ref": "Art. 26(6)",
              "summary": "De automatisch gegenereerde logbestanden bewaren voor zover deze onder hun controle vallen"
            },
            {
              "id": "HR-D6",
              "title": "Informatie aan werknemers",
              "ref": "Art. 26(7)",
              "summary": "De werknemersvertegenwoordigers en betrokken werknemers informeren over het gebruik van het systeem"
            },
            {
              "id": "HR-D7",
              "title": "Samenwerking met autoriteiten",
              "ref": "Art. 26(8)",
              "summary": "Samenwerken met de bevoegde nationale autoriteiten"
            },
            {
              "id": "HR-D8",
              "title": "Effectbeoordeling grondrechten (FRIA)",
              "ref": "Art. 27",
              "summary": "Een effectbeoordeling grondrechten uitvoeren vóór de inzet (Art. 27). Opmerking: Art. 27(10) sluit systemen van bijlage III, punt 2 (kritieke infrastructuur) uit van deze verplichting."
            },
            {
              "id": "HR-D9",
              "title": "Registratie van gebruik in de EU-databank",
              "ref": "Art. 49",
              "summary": "Het gebruik van het systeem registreren in de EU-databank"
            },
            {
              "id": "D-DPIA",
              "title": "Gegevensbeschermingseffectbeoordeling (DPIA)",
              "ref": "Art. 26(10)",
              "summary": "Een DPIA uitvoeren overeenkomstig Verordening (EU) 2016/679 of Richtlijn (EU) 2016/680"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Conformiteit verifiëren",
              "ref": "Art. 23",
              "summary": "Controleren of de aanbieder de conformiteitsbeoordeling heeft uitgevoerd en de technische documentatie heeft opgesteld"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Conformiteit verifiëren",
              "ref": "Art. 24",
              "summary": "Controleren of het systeem de CE-markering draagt en vergezeld gaat van de vereiste documentatie"
            }
          ],
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Technische documentatie",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "De technische documentatie van het GPAI-model opstellen en bijhouden"
            },
            {
              "id": "GPAI-2",
              "title": "Informatie aan downstream-aanbieders",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Informatie en documentatie verstrekken aan downstream AI-systeemaanbieders"
            },
            {
              "id": "GPAI-3",
              "title": "Auteursrechtbeleid",
              "ref": "Art. 53(1)(c)",
              "summary": "Een beleid opstellen ter naleving van het EU-auteursrecht"
            },
            {
              "id": "GPAI-4",
              "title": "Samenvatting trainingsgegevens",
              "ref": "Art. 53(1)(d)",
              "summary": "Een voldoende gedetailleerde samenvatting van de trainingsgegevens opstellen en publiceren"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Modelevaluatie",
              "ref": "Art. 55(1)(a)",
              "summary": "Modelevaluaties uitvoeren, inclusief vijandige tests"
            },
            {
              "id": "GPAI-SR2",
              "title": "Beoordeling en beperking van systeemrisico's",
              "ref": "Art. 55(1)(b)",
              "summary": "Mogelijke systeemrisico's beoordelen en beperken"
            },
            {
              "id": "GPAI-SR3",
              "title": "Incidentrapportage",
              "ref": "Art. 55(1)(c)",
              "summary": "Ernstige incidenten volgen, documenteren en melden bij het AI-bureau en nationale autoriteiten"
            },
            {
              "id": "GPAI-SR4",
              "title": "Cyberbeveiligingsbescherming",
              "ref": "Art. 55(1)(d)",
              "summary": "Een adequaat niveau van cyberbeveiligingsbescherming voor het GPAI-model waarborgen"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 transparantieverplichtingen apply CUMULATIVELY with hoog risico requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "AI-interactie-openbaarmaking",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Aanbieder",
                "summary": "Het systeem ontwerpen om personen te informeren dat zij met een AI communiceren (tenzij dit uit de context duidelijk is)"
              },
              {
                "id": "HR-T2",
                "title": "Markering van synthetische inhoud",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Aanbieder",
                "summary": "Synthetische inhoud (afbeeldingen, audio, video) markeren in een machineleesbaar formaat"
              },
              {
                "id": "HR-T3",
                "title": "Openbaarmaking emotieherkenning / biometrie",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Gebruiksverantwoordelijke",
                "summary": "Personen informeren over het gebruik van emotieherkennings- of biometrische categoriseringssystemen"
              },
              {
                "id": "HR-T4",
                "title": "Deepfake-openbaarmaking",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Gebruiksverantwoordelijke",
                "summary": "Bekendmaken dat de inhoud kunstmatig is gegenereerd of gemanipuleerd (deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Openbaarmaking van AI-gegenereerde tekst",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Gebruiksverantwoordelijke",
                "summary": "Vermelden dat de tekst door AI is gegenereerd wanneer deze wordt gepubliceerd om het publiek te informeren"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: AI Act + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual conformiteitsbeoordeling"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is hoog risico under Art. 6(1)",
                "A single notified body can handle both MDR and AI Act conformiteitsbeoordeling",
                "Clinical evaluation data may satisfy some AI Act requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: AI Act implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: AI Act overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is hoog risico under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers AI Act requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring hoog risico category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is hoog risico under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-hoog risico",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector hoog risico classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT hoog risico (enhances existing safety, doesn't replace it)",
                "Smart grid management may be hoog risico if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        },
        "de": {
          "title": "HOCHRISIKO-KI-System + GPAI-Modellanbieter",
          "summary": "Ihr KI-System ist als HOCHRISIKO gemäß der EU-KI-Verordnung eingestuft, UND Sie sind auch Anbieter eines KI-Modells mit allgemeinem Verwendungszweck. Beide Pflichtenkataloge gelten kumulativ.",
          "note": "GPAI-Pflichten gelten ab dem 2. August 2025 (bereits in Kraft). Die Pflichten für Hochrisiko-Systeme haben ihren eigenen Zeitplan.",
          "verdict": "HOCHRISIKO + GPAI",
          "recommendations": [
            "GPAI-Modells already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "Other EU legislation may apply alongside or independently of the KI-Gesetz, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_high_risk": "Up to EUR 15 million or 3% of worldwide annual turnover",
          "penalty_prohibited": "Up to EUR 35 million or 7% of worldwide annual turnover",
          "penalty_gpai": "Up to EUR 15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date_gpai": "2. August 2025",
          "effective_date_high_risk_annex_i": "2. August 2027",
          "effective_date_high_risk_annex_iii": "2. August 2026",
          "ai_literacy": {
            "title": "KI-Kompetenz (Art. 4)",
            "ref": "Art. 4",
            "date": "2. Februar 2025",
            "summary": "Alle Betreiber müssen sicherstellen, dass Mitarbeiter, die mit KI-Systemen arbeiten, über ausreichende KI-Kompetenz verfügen. Dies gilt seit dem 2. Februar 2025."
          },
          "note_delegated_acts": {
            "title": "Delegierte Rechtsakte — Aktualisierungen der Hochrisiko-Liste",
            "ref": "Art. 7(1)",
            "summary": "Die Europäische Kommission ist ermächtigt, delegierte Rechtsakte zu erlassen, um die Liste der Hochrisiko-KI-Systeme in Anhang III durch Hinzufügen oder Ändern von Anwendungsfällen zu aktualisieren. Überwachen Sie Aktualisierungen, da diese die Klassifizierung Ihres Systems beeinflussen können."
          },
          "obligations_provider_high_risk": [
            {
              "id": "HR-P1",
              "title": "Risikomanagementsystem",
              "ref": "Art. 9",
              "summary": "Ein Risikomanagementsystem während des gesamten Lebenszyklus des KI-Systems einrichten und aufrechterhalten"
            },
            {
              "id": "HR-P2",
              "title": "Daten-Governance",
              "ref": "Art. 10",
              "summary": "Sicherstellen, dass Trainings-, Validierungs- und Testdaten die Qualitätskriterien erfüllen"
            },
            {
              "id": "HR-P3",
              "title": "Technische Dokumentation",
              "ref": "Art. 11, Annex IV",
              "summary": "Technische Dokumentation vor dem Inverkehrbringen erstellen und aktuell halten"
            },
            {
              "id": "HR-P4",
              "title": "Automatische Protokollierung",
              "ref": "Art. 12",
              "summary": "Sicherstellen, dass das KI-System über automatische Protokollierungsfunktionen verfügt"
            },
            {
              "id": "HR-P5",
              "title": "Transparenz und Informationsbereitstellung",
              "ref": "Art. 13",
              "summary": "Sicherstellen, dass das System transparent ist, und den Betreibern Gebrauchsanweisungen bereitstellen"
            },
            {
              "id": "HR-P6",
              "title": "Menschliche Aufsicht",
              "ref": "Art. 14",
              "summary": "Das System so zu gestalten, dass eine wirksame menschliche Aufsicht ermöglicht wird"
            },
            {
              "id": "HR-P7",
              "title": "Genauigkeit, Robustheit und Cybersicherheit",
              "ref": "Art. 15",
              "summary": "Das System so zu konzipieren und zu entwickeln, dass angemessene Genauigkeit, Robustheit und Cybersicherheit erreicht werden"
            },
            {
              "id": "HR-P8",
              "title": "Qualitätsmanagementsystem",
              "ref": "Art. 17",
              "summary": "Ein Qualitätsmanagementsystem einrichten"
            },
            {
              "id": "HR-P9",
              "title": "Konformitätsbewertung",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "Die anwendbare Konformitätsbewertung vor dem Inverkehrbringen durchführen"
            },
            {
              "id": "HR-P10",
              "title": "EU-Konformitätserklärung",
              "ref": "Art. 47, Annex V",
              "summary": "Die EU-Konformitätserklärung erstellen und die CE-Kennzeichnung anbringen"
            },
            {
              "id": "HR-P11",
              "title": "Registrierung in der EU-Datenbank",
              "ref": "Art. 48",
              "summary": "Das System vor dem Inverkehrbringen in der EU-Datenbank registrieren"
            },
            {
              "id": "HR-P12",
              "title": "Korrekturmaßnahmen und Rückruf",
              "ref": "Art. 49, Annex VIII",
              "summary": "Korrekturmaßnahmen ergreifen, Betreiber und Behörden über Nichtkonformität informieren"
            },
            {
              "id": "HR-P13",
              "title": "Zusammenarbeit mit Behörden",
              "ref": "Art. 72",
              "summary": "Mit den zuständigen nationalen Behörden zusammenarbeiten und angeforderte Informationen bereitstellen"
            },
            {
              "id": "HR-P14",
              "title": "Barrierefreiheit",
              "ref": "Art. 73",
              "summary": "Barrierefreiheitsanforderungen gemäß den geltenden Richtlinien einhalten"
            },
            {
              "id": "HR-P15",
              "title": "Überwachung nach dem Inverkehrbringen",
              "ref": "Art. 18",
              "summary": "Ein verhältnismäßiges Überwachungssystem nach dem Inverkehrbringen einrichten"
            },
            {
              "id": "P-ACC",
              "title": "Barrierefreiheit (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Sicherstellen, dass das KI-System die Barrierefreiheitsanforderungen erfüllt"
            }
          ],
          "obligations_deployer_high_risk": [
            {
              "id": "HR-D1",
              "title": "Nutzung gemäß Anweisungen",
              "ref": "Art. 26(1)",
              "summary": "Das System gemäß den Gebrauchsanweisungen des Anbieters verwenden"
            },
            {
              "id": "HR-D2",
              "title": "Menschliche Aufsicht",
              "ref": "Art. 26(2)",
              "summary": "Sicherstellen, dass die mit der menschlichen Aufsicht betrauten Personen kompetent und befugt sind"
            },
            {
              "id": "HR-D3",
              "title": "Relevanz der Eingabedaten",
              "ref": "Art. 26(4)",
              "summary": "Die Relevanz der Eingabedaten im Hinblick auf den Zweck des Systems sicherstellen"
            },
            {
              "id": "HR-D4",
              "title": "Überwachung des Betriebs",
              "ref": "Art. 26(5)",
              "summary": "Den Betrieb des Systems auf Grundlage der Gebrauchsanweisungen überwachen"
            },
            {
              "id": "HR-D5",
              "title": "Aufbewahrung von Protokollen",
              "ref": "Art. 26(6)",
              "summary": "Die automatisch generierten Protokolle aufbewahren, soweit diese unter ihrer Kontrolle stehen"
            },
            {
              "id": "HR-D6",
              "title": "Information der Arbeitnehmer",
              "ref": "Art. 26(7)",
              "summary": "Arbeitnehmervertreter und betroffene Arbeitnehmer über die Nutzung des Systems informieren"
            },
            {
              "id": "HR-D7",
              "title": "Zusammenarbeit mit Behörden",
              "ref": "Art. 26(8)",
              "summary": "Mit den zuständigen nationalen Behörden zusammenarbeiten"
            },
            {
              "id": "HR-D8",
              "title": "Grundrechte-Folgenabschätzung (FRIA)",
              "ref": "Art. 27",
              "summary": "Vor dem Einsatz eine Grundrechte-Folgenabschätzung durchführen (Art. 27). Hinweis: Art. 27(10) nimmt Systeme nach Anhang III Nr. 2 (kritische Infrastruktur) von dieser Pflicht aus."
            },
            {
              "id": "HR-D9",
              "title": "Nutzungsregistrierung in der EU-Datenbank",
              "ref": "Art. 49",
              "summary": "Die Nutzung des Systems in der EU-Datenbank registrieren"
            },
            {
              "id": "D-DPIA",
              "title": "Datenschutz-Folgenabschätzung (DSFA)",
              "ref": "Art. 26(10)",
              "summary": "Eine DSFA gemäß Verordnung (EU) 2016/679 oder Richtlinie (EU) 2016/680 durchführen"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Konformität überprüfen",
              "ref": "Art. 23",
              "summary": "Überprüfen, dass der Anbieter die Konformitätsbewertung durchgeführt und die technische Dokumentation erstellt hat"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Konformität überprüfen",
              "ref": "Art. 24",
              "summary": "Überprüfen, dass das System die CE-Kennzeichnung trägt und die erforderliche Dokumentation beiliegt"
            }
          ],
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Technische Dokumentation",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "Die technische Dokumentation des GPAI-Modells erstellen und pflegen"
            },
            {
              "id": "GPAI-2",
              "title": "Information an nachgelagerte Anbieter",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Informationen und Dokumentation an nachgelagerte KI-Systemanbieter bereitstellen"
            },
            {
              "id": "GPAI-3",
              "title": "Urheberrechtspolitik",
              "ref": "Art. 53(1)(c)",
              "summary": "Eine Richtlinie zur Einhaltung des EU-Urheberrechts aufstellen"
            },
            {
              "id": "GPAI-4",
              "title": "Zusammenfassung der Trainingsdaten",
              "ref": "Art. 53(1)(d)",
              "summary": "Eine hinreichend detaillierte Zusammenfassung der Trainingsdaten erstellen und veröffentlichen"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Modellbewertung",
              "ref": "Art. 55(1)(a)",
              "summary": "Modellbewertungen durchführen, einschließlich adversarialer Tests"
            },
            {
              "id": "GPAI-SR2",
              "title": "Bewertung und Minderung systemischer Risiken",
              "ref": "Art. 55(1)(b)",
              "summary": "Mögliche systemische Risiken bewerten und mindern"
            },
            {
              "id": "GPAI-SR3",
              "title": "Meldung von Vorfällen",
              "ref": "Art. 55(1)(c)",
              "summary": "Schwerwiegende Vorfälle verfolgen, dokumentieren und dem KI-Büro und den nationalen Behörden melden"
            },
            {
              "id": "GPAI-SR4",
              "title": "Cybersicherheitsschutz",
              "ref": "Art. 55(1)(d)",
              "summary": "Ein angemessenes Maß an Cybersicherheitsschutz für das GPAI-Modell gewährleisten"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 Transparenzpflichten apply CUMULATIVELY with Hochrisiko requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "KI-Interaktionsoffenlegung",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Anbieter",
                "summary": "Das System so zu gestalten, dass Personen darüber informiert werden, dass sie mit einer KI interagieren (sofern dies nicht aus dem Kontext ersichtlich ist)"
              },
              {
                "id": "HR-T2",
                "title": "Kennzeichnung synthetischer Inhalte",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Anbieter",
                "summary": "Synthetische Inhalte (Bilder, Audio, Video) in einem maschinenlesbaren Format kennzeichnen"
              },
              {
                "id": "HR-T3",
                "title": "Offenlegung von Emotionserkennung / Biometrie",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Betreiber",
                "summary": "Personen über die Verwendung von Emotionserkennungs- oder biometrischen Kategorisierungssystemen informieren"
              },
              {
                "id": "HR-T4",
                "title": "Deepfake-Offenlegung",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Betreiber",
                "summary": "Offenlegen, dass der Inhalt künstlich erzeugt oder manipuliert wurde (Deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Offenlegung von KI-generiertem Text",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Betreiber",
                "summary": "Angeben, dass der Text von KI generiert wurde, wenn er veröffentlicht wird, um die Öffentlichkeit zu informieren"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: KI-Gesetz + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual Konformitätsbewertung"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is Hochrisiko under Art. 6(1)",
                "A single notified body can handle both MDR and KI-Gesetz Konformitätsbewertung",
                "Clinical evaluation data may satisfy some KI-Gesetz requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: KI-Gesetz implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: KI-Gesetz overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is Hochrisiko under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers KI-Gesetz requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring Hochrisiko category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is Hochrisiko under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-Hochrisiko",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector Hochrisiko classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT Hochrisiko (enhances existing safety, doesn't replace it)",
                "Smart grid management may be Hochrisiko if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        },
        "es": {
          "title": "Sistema de IA de ALTO RIESGO + Proveedor de modelo GPAI",
          "summary": "Su sistema de IA está clasificado como de ALTO RIESGO en virtud del Reglamento Europeo de IA, Y usted también es proveedor de un modelo de IA de uso general. Ambos conjuntos de obligaciones se aplican acumulativamente.",
          "note": "Las obligaciones GPAI se aplican a partir del 2 de agosto de 2025 (ya en vigor). Las obligaciones de alto riesgo tienen su propio calendario.",
          "verdict": "ALTO RIESGO + GPAI",
          "recommendations": [
            "modelo GPAIs already placed on the market before 2 August 2025 must comply with GPAI obligations by 2 August 2027 (Art. 111(3)).",
            "Other EU legislation may apply alongside or independently of the Ley de IA, including: GDPR (data protection), DSA (intermediary liability, Art. 2(5)), consumer protection and product safety rules (Art. 2(9)), and Member States may have more favourable worker protection laws (Art. 2(11))."
          ],
          "penalty_high_risk": "Up to EUR 15 million or 3% of worldwide annual turnover",
          "penalty_prohibited": "Up to EUR 35 million or 7% of worldwide annual turnover",
          "penalty_gpai": "Up to EUR 15 million or 3% of worldwide annual turnover (Commission-imposed)",
          "effective_date_gpai": "2 de agosto de 2025",
          "effective_date_high_risk_annex_i": "2 de agosto de 2027",
          "effective_date_high_risk_annex_iii": "2 de agosto de 2026",
          "ai_literacy": {
            "title": "Alfabetización en IA (Art. 4)",
            "ref": "Art. 4",
            "date": "2 de febrero de 2025",
            "summary": "Todos los operadores deben garantizar que el personal que trabaja con sistemas de IA tenga un nivel suficiente de alfabetización en IA. Esto se aplica desde el 2 de febrero de 2025."
          },
          "note_delegated_acts": {
            "title": "Actos delegados — Actualizaciones de la lista de alto riesgo",
            "ref": "Art. 7(1)",
            "summary": "La Comisión Europea está facultada para adoptar actos delegados para actualizar la lista de sistemas de IA de alto riesgo del anexo III, añadiendo o modificando casos de uso. Supervise las actualizaciones, ya que pueden afectar la clasificación de su sistema."
          },
          "obligations_provider_high_risk": [
            {
              "id": "HR-P1",
              "title": "Sistema de gestión de riesgos",
              "ref": "Art. 9",
              "summary": "Establecer y mantener un sistema de gestión de riesgos a lo largo del ciclo de vida del sistema de IA"
            },
            {
              "id": "HR-P2",
              "title": "Gobernanza de datos",
              "ref": "Art. 10",
              "summary": "Garantizar que los datos de entrenamiento, validación y prueba cumplan los criterios de calidad"
            },
            {
              "id": "HR-P3",
              "title": "Documentación técnica",
              "ref": "Art. 11, Annex IV",
              "summary": "Elaborar la documentación técnica antes de la comercialización y mantenerla actualizada"
            },
            {
              "id": "HR-P4",
              "title": "Registro automático",
              "ref": "Art. 12",
              "summary": "Garantizar que el sistema de IA disponga de capacidades de registro automático"
            },
            {
              "id": "HR-P5",
              "title": "Transparencia e información",
              "ref": "Art. 13",
              "summary": "Asegurar que el sistema sea transparente y proporcionar instrucciones de uso a los implementadores"
            },
            {
              "id": "HR-P6",
              "title": "Supervisión humana",
              "ref": "Art. 14",
              "summary": "Diseñar el sistema de manera que permita una supervisión humana efectiva"
            },
            {
              "id": "HR-P7",
              "title": "Precisión, robustez y ciberseguridad",
              "ref": "Art. 15",
              "summary": "Diseñar y desarrollar el sistema para alcanzar niveles apropiados de precisión, robustez y ciberseguridad"
            },
            {
              "id": "HR-P8",
              "title": "Sistema de gestión de calidad",
              "ref": "Art. 17",
              "summary": "Implementar un sistema de gestión de calidad"
            },
            {
              "id": "HR-P9",
              "title": "Evaluación de conformidad",
              "ref": "Art. 43, Annex VI/VII",
              "summary": "Realizar la evaluación de conformidad aplicable antes de la comercialización"
            },
            {
              "id": "HR-P10",
              "title": "Declaración de conformidad de la UE",
              "ref": "Art. 47, Annex V",
              "summary": "Elaborar la declaración de conformidad de la UE y colocar el marcado CE"
            },
            {
              "id": "HR-P11",
              "title": "Registro en la base de datos de la UE",
              "ref": "Art. 48",
              "summary": "Registrar el sistema en la base de datos de la UE antes de su comercialización"
            },
            {
              "id": "HR-P12",
              "title": "Medidas correctivas y retirada",
              "ref": "Art. 49, Annex VIII",
              "summary": "Adoptar medidas correctivas, informar a los implementadores y autoridades de cualquier no conformidad"
            },
            {
              "id": "HR-P13",
              "title": "Cooperación con las autoridades",
              "ref": "Art. 72",
              "summary": "Cooperar con las autoridades nacionales competentes y proporcionar la información solicitada"
            },
            {
              "id": "HR-P14",
              "title": "Accesibilidad",
              "ref": "Art. 73",
              "summary": "Cumplir los requisitos de accesibilidad de conformidad con las directivas aplicables"
            },
            {
              "id": "HR-P15",
              "title": "Vigilancia poscomercialización",
              "ref": "Art. 18",
              "summary": "Establecer un sistema de vigilancia poscomercialización proporcionado"
            },
            {
              "id": "P-ACC",
              "title": "Accesibilidad (Art. 16(k))",
              "ref": "Art. 16(j)",
              "summary": "Garantizar que el sistema de IA cumpla los requisitos de accesibilidad"
            }
          ],
          "obligations_deployer_high_risk": [
            {
              "id": "HR-D1",
              "title": "Uso conforme a las instrucciones",
              "ref": "Art. 26(1)",
              "summary": "Utilizar el sistema de conformidad con las instrucciones de uso del proveedor"
            },
            {
              "id": "HR-D2",
              "title": "Supervisión humana",
              "ref": "Art. 26(2)",
              "summary": "Asegurar que las personas encargadas de la supervisión humana sean competentes y estén autorizadas"
            },
            {
              "id": "HR-D3",
              "title": "Pertinencia de los datos de entrada",
              "ref": "Art. 26(4)",
              "summary": "Garantizar la pertinencia de los datos de entrada respecto a la finalidad del sistema"
            },
            {
              "id": "HR-D4",
              "title": "Supervisión del funcionamiento",
              "ref": "Art. 26(5)",
              "summary": "Supervisar el funcionamiento del sistema con base en las instrucciones de uso"
            },
            {
              "id": "HR-D5",
              "title": "Conservación de registros",
              "ref": "Art. 26(6)",
              "summary": "Conservar los registros generados automáticamente en la medida en que estén bajo su control"
            },
            {
              "id": "HR-D6",
              "title": "Información a los trabajadores",
              "ref": "Art. 26(7)",
              "summary": "Informar a los representantes de los trabajadores y a los trabajadores afectados sobre el uso del sistema"
            },
            {
              "id": "HR-D7",
              "title": "Cooperación con las autoridades",
              "ref": "Art. 26(8)",
              "summary": "Cooperar con las autoridades nacionales competentes"
            },
            {
              "id": "HR-D8",
              "title": "Evaluación de impacto en derechos fundamentales (FRIA)",
              "ref": "Art. 27",
              "summary": "Realizar una evaluación de impacto en derechos fundamentales antes del despliegue (Art. 27). Nota: el Art. 27(10) excluye los sistemas del anexo III, punto 2 (infraestructuras críticas) de esta obligación."
            },
            {
              "id": "HR-D9",
              "title": "Registro de uso en la base de datos de la UE",
              "ref": "Art. 49",
              "summary": "Registrar el uso del sistema en la base de datos de la UE"
            },
            {
              "id": "D-DPIA",
              "title": "Evaluación de impacto relativa a la protección de datos (EIPD)",
              "ref": "Art. 26(10)",
              "summary": "Realizar una EIPD de conformidad con el Reglamento (UE) 2016/679 o la Directiva (UE) 2016/680"
            }
          ],
          "obligations_importer": [
            {
              "id": "HR-I1",
              "title": "Verificar la conformidad",
              "ref": "Art. 23",
              "summary": "Verificar que el proveedor ha realizado la evaluación de conformidad y elaborado la documentación técnica"
            }
          ],
          "obligations_distributor": [
            {
              "id": "HR-Dist1",
              "title": "Verificar la conformidad",
              "ref": "Art. 24",
              "summary": "Verificar que el sistema lleva el marcado CE y va acompañado de la documentación requerida"
            }
          ],
          "obligations_all_gpai": [
            {
              "id": "GPAI-1",
              "title": "Documentación técnica",
              "ref": "Art. 53(1)(a), Annex XI",
              "summary": "Elaborar y mantener la documentación técnica del modelo GPAI"
            },
            {
              "id": "GPAI-2",
              "title": "Información a proveedores posteriores",
              "ref": "Art. 53(1)(b), Annex XII",
              "summary": "Proporcionar información y documentación a los proveedores de sistemas de IA posteriores"
            },
            {
              "id": "GPAI-3",
              "title": "Política de derechos de autor",
              "ref": "Art. 53(1)(c)",
              "summary": "Establecer una política de cumplimiento del derecho de autor de la UE"
            },
            {
              "id": "GPAI-4",
              "title": "Resumen de datos de entrenamiento",
              "ref": "Art. 53(1)(d)",
              "summary": "Elaborar y publicar un resumen suficientemente detallado de los datos de entrenamiento"
            }
          ],
          "obligations_systemic_risk": [
            {
              "id": "GPAI-SR1",
              "title": "Evaluación del modelo",
              "ref": "Art. 55(1)(a)",
              "summary": "Realizar evaluaciones del modelo, incluidas pruebas adversarias"
            },
            {
              "id": "GPAI-SR2",
              "title": "Evaluación y mitigación de riesgos sistémicos",
              "ref": "Art. 55(1)(b)",
              "summary": "Evaluar y mitigar los posibles riesgos sistémicos"
            },
            {
              "id": "GPAI-SR3",
              "title": "Notificación de incidentes",
              "ref": "Art. 55(1)(c)",
              "summary": "Seguir, documentar y notificar los incidentes graves a la Oficina de IA y a las autoridades nacionales"
            },
            {
              "id": "GPAI-SR4",
              "title": "Protección de ciberseguridad",
              "ref": "Art. 55(1)(d)",
              "summary": "Garantizar un nivel adecuado de protección de ciberseguridad para el modelo GPAI"
            }
          ],
          "obligations_notification": [
            "id",
            "title",
            "ref",
            "summary"
          ],
          "obligations_transparency_cumulative": {
            "note": "Art. 50 obligaciones de transparencia apply CUMULATIVELY with alto riesgo requirements (Art. 50(6)). Check the transparency_obligations attribute for applicable Art. 50 obligations.",
            "possible_obligations": [
              {
                "id": "HR-T1",
                "title": "Divulgación de interacción con IA",
                "ref": "Art. 50(1)",
                "condition": "chatbot_disclosure",
                "who": "Proveedor",
                "summary": "Diseñar el sistema para informar a las personas de que interactúan con una IA (salvo que sea evidente por el contexto)"
              },
              {
                "id": "HR-T2",
                "title": "Marcado de contenido sintético",
                "ref": "Art. 50(2)",
                "condition": "synthetic_content_marking",
                "who": "Proveedor",
                "summary": "Marcar las salidas de contenido sintético (imágenes, audio, vídeo) en un formato legible por máquina"
              },
              {
                "id": "HR-T3",
                "title": "Divulgación de reconocimiento de emociones / biometría",
                "ref": "Art. 50(3)",
                "condition": "emotion_biometric_disclosure",
                "who": "Implementador",
                "summary": "Informar a las personas del uso de sistemas de reconocimiento de emociones o categorización biométrica"
              },
              {
                "id": "HR-T4",
                "title": "Divulgación de deepfake",
                "ref": "Art. 50(4)",
                "condition": "deepfake_disclosure",
                "who": "Implementador",
                "summary": "Divulgar que el contenido ha sido generado o manipulado artificialmente (deepfakes)"
              },
              {
                "id": "HR-T5",
                "title": "Divulgación de texto generado por IA",
                "ref": "Art. 50(4)",
                "condition": "ai_generated_text_disclosure",
                "who": "Implementador",
                "summary": "Indicar que el texto ha sido generado por IA cuando se publique para informar al público"
              }
            ]
          },
          "sector_specific_guidance": {
            "healthcare_medical_devices": {
              "title": "Sector-Specific Guidance: Healthcare / Medical Devices",
              "documents": [
                "D1: Ley de IA + MDR/IVDR Interplay FAQ (MDCG, 2025) — authoritative guidance on medical device AI classification and dual evaluación de conformidad"
              ],
              "key_points": [
                "Medical device AI requiring third-party CA under MDR/IVDR is alto riesgo under Art. 6(1)",
                "A single notified body can handle both MDR and Ley de IA evaluación de conformidad",
                "Clinical evaluation data may satisfy some Ley de IA requirements (Art. 9)"
              ]
            },
            "financial_services": {
              "title": "Sector-Specific Guidance: Banking / Financial Services",
              "documents": [
                "E1: Ley de IA implications for banking/payments (EBA, 2025)",
                "E2: AI in investment services — MiFID II guidance (ESMA, 2024)",
                "E5: Ley de IA overview for financial services (Eurofi, 2024)"
              ],
              "key_points": [
                "Credit scoring / creditworthiness AI is alto riesgo under Annex III, point 5(b)",
                "Existing banking regulation (CRR/CRD, DORA) partially covers Ley de IA requirements but gaps exist in data governance, human oversight, and accuracy/robustness",
                "Fraud detection AI is explicitly excluded from the credit scoring alto riesgo category"
              ]
            },
            "insurance": {
              "title": "Sector-Specific Guidance: Insurance",
              "documents": [
                "E4: Insurance AI governance opinion (EIOPA, 2025)"
              ],
              "key_points": [
                "Life/health insurance risk assessment and pricing AI is alto riesgo under Annex III, point 5(c)",
                "EIOPA recommends risk-based governance for ALL insurance AI, including non-alto riesgo",
                "Fairness metrics required: demographic parity, equalized odds, predictive parity"
              ]
            },
            "energy": {
              "title": "Sector-Specific Guidance: Energy / Critical Infrastructure",
              "documents": [
                "F1: Energy sector alto riesgo classification consultation (Eurelectric, 2025)",
                "F2: AI and the energy sector briefing (EPRS, 2025)"
              ],
              "key_points": [
                "'Safety component' should be limited to systems that directly impact infrastructure safety",
                "Predictive maintenance typically NOT alto riesgo (enhances existing safety, doesn't replace it)",
                "Smart grid management may be alto riesgo if it has direct safety impact — case-by-case"
              ]
            }
          },
          "fria_guidance": {
            "title": "FRIA Guidance",
            "documents": [
              "G3: Practical guide to FRIA (DIHR/ECNL, 2025)"
            ],
            "who_must_conduct": [
              "Public authorities / bodies governed by public law",
              "Private entities providing essential public services",
              "Banking/insurance companies using AI under Annex III points 5(b)-(c)"
            ],
            "process": "5 phases: Planning & Scoping, Assess & Mitigate, Deployment Decision, Monitoring, Consulting Affected Groups"
          }
        }
      }
    }
  },
  "timeline": {
    "entry_into_force": "1 August 2024",
    "milestones": [
      {
        "date": "2 February 2025",
        "description": "Prohibited practices (Art. 5) + AI literacy (Art. 4) apply",
        "status": "active"
      },
      {
        "date": "2 August 2025",
        "description": "GPAI model obligations (Art. 51-56), governance (Ch. VII), penalties (Ch. XII, except Art. 101) apply",
        "status": "active"
      },
      {
        "date": "2 August 2026",
        "description": "General date of application — High-risk Annex III, transparency (Art. 50), all remaining provisions apply",
        "status": "upcoming"
      },
      {
        "date": "2 August 2027",
        "description": "High-risk Annex I (product safety) obligations apply",
        "status": "upcoming"
      },
      {
        "date": "31 December 2030",
        "description": "Large-scale IT systems (Annex X) compliance deadline",
        "status": "upcoming"
      }
    ]
  },
  "penalties_summary": {
    "prohibited_practices": {
      "amount": "Up to €35,000,000 or 7% worldwide annual turnover",
      "ref": "Art. 99(3)"
    },
    "high_risk_and_transparency": {
      "amount": "Up to €15,000,000 or 3% worldwide annual turnover",
      "ref": "Art. 99(4)"
    },
    "incorrect_information": {
      "amount": "Up to €7,500,000 or 1% worldwide annual turnover",
      "ref": "Art. 99(5)"
    },
    "sme_provision": {
      "description": "For SMEs including start-ups, each fine is capped at the lower of the percentage or the fixed amount",
      "ref": "Art. 99(6)"
    },
    "eu_institutions": {
      "prohibited": "Up to €1,500,000",
      "other": "Up to €750,000",
      "ref": "Art. 100"
    },
    "gpai_providers": {
      "amount": "Up to €15,000,000 or 3% of worldwide annual turnover, whichever is higher (Commission-imposed)",
      "ref": "Art. 101"
    }
  }
}