From 8a476c504aaed59178fb34402929f764d04ca4ce Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 25 Mar 2026 15:55:05 +0100 Subject: [PATCH 001/157] feat(validation): add v1 framework structure, schemas, and central config Validation framework v1 directory layout alongside existing release-automation assets (shared schemas, validate-release-plan.py interface frozen). New schemas: common findings model, rule metadata definitions, central validation config. Module placeholders for engines, bundling, context, post-filter, and output pipeline. Initial validation-config.yaml with ReleaseTest as first advisory-stage repo. --- validation/bundling/__init__.py | 3 + validation/config/validation-config.yaml | 16 +++ validation/context/__init__.py | 3 + validation/engines/__init__.py | 3 + validation/output/__init__.py | 3 + validation/postfilter/__init__.py | 3 + validation/rules/README.md | 23 +++ validation/schemas/findings-schema.yaml | 81 +++++++++++ validation/schemas/rule-metadata-schema.yaml | 132 ++++++++++++++++++ .../schemas/validation-config-schema.yaml | 56 ++++++++ validation/scripts/validate-release-plan.py | 7 + 11 files changed, 330 insertions(+) create mode 100644 validation/bundling/__init__.py create mode 100644 validation/config/validation-config.yaml create mode 100644 validation/context/__init__.py create mode 100644 validation/engines/__init__.py create mode 100644 validation/output/__init__.py create mode 100644 validation/postfilter/__init__.py create mode 100644 validation/rules/README.md create mode 100644 validation/schemas/findings-schema.yaml create mode 100644 validation/schemas/rule-metadata-schema.yaml create mode 100644 validation/schemas/validation-config-schema.yaml diff --git a/validation/bundling/__init__.py b/validation/bundling/__init__.py new file mode 100644 index 00000000..ecf543a5 --- /dev/null +++ b/validation/bundling/__init__.py @@ -0,0 +1,3 @@ +# Bundling pipeline. +# External $ref resolution via Redocly CLI, source map generation, +# and cache sync validation. diff --git a/validation/config/validation-config.yaml b/validation/config/validation-config.yaml new file mode 100644 index 00000000..bf552f3f --- /dev/null +++ b/validation/config/validation-config.yaml @@ -0,0 +1,16 @@ +# Central validation configuration. +# Read by the reusable workflow on every run. +# Schema: validation/schemas/validation-config-schema.yaml + +version: 1 + +defaults: + stage: disabled + +fork_owners: + - hdamker + - rartych + +repositories: + ReleaseTest: + stage: advisory diff --git a/validation/context/__init__.py b/validation/context/__init__.py new file mode 100644 index 00000000..9187ed0b --- /dev/null +++ b/validation/context/__init__.py @@ -0,0 +1,3 @@ +# Context building. +# Assembles the unified validation context from branch type, trigger, +# release-plan.yaml, PR metadata, and central config. diff --git a/validation/engines/__init__.py b/validation/engines/__init__.py new file mode 100644 index 00000000..37d0d99b --- /dev/null +++ b/validation/engines/__init__.py @@ -0,0 +1,3 @@ +# Validation engine adapters. +# Each adapter invokes its engine and normalizes output to the common +# findings model (see schemas/findings-schema.yaml). diff --git a/validation/output/__init__.py b/validation/output/__init__.py new file mode 100644 index 00000000..2068a35f --- /dev/null +++ b/validation/output/__init__.py @@ -0,0 +1,3 @@ +# Output pipeline. +# Formats findings for workflow summary, check annotations, PR comments, +# commit status, and diagnostic artifacts. diff --git a/validation/postfilter/__init__.py b/validation/postfilter/__init__.py new file mode 100644 index 00000000..0cba252a --- /dev/null +++ b/validation/postfilter/__init__.py @@ -0,0 +1,3 @@ +# Post-filter pipeline. +# Applies rule metadata lookup, applicability evaluation, conditional +# severity resolution, and profile-based blocking decisions. diff --git a/validation/rules/README.md b/validation/rules/README.md new file mode 100644 index 00000000..d6b17cd3 --- /dev/null +++ b/validation/rules/README.md @@ -0,0 +1,23 @@ +# Rule Metadata + +Rule definitions mapping engine-level checks to framework-level applicability, +conditional severity, and fix guidance. + +Schema: [../schemas/rule-metadata-schema.yaml](../schemas/rule-metadata-schema.yaml) + +## Files + +- `spectral-rules.yaml` — Spectral rule metadata (WP-06.14) +- `gherkin-rules.yaml` — gherkin-lint rule metadata (WP-06.14) +- `python-rules.yaml` — Python check rule metadata (WP-06.14) + +## ID Assignment + +Rule IDs are three-digit, zero-padded, sequentially assigned: + +- `001`–`099`: Spectral rules +- `100`–`149`: gherkin-lint rules +- `150`–`199`: Python checks +- `200`+: reserved for future engines + +Once assigned, an ID is never reused even if the rule is retired. diff --git a/validation/schemas/findings-schema.yaml b/validation/schemas/findings-schema.yaml new file mode 100644 index 00000000..b8bc9924 --- /dev/null +++ b/validation/schemas/findings-schema.yaml @@ -0,0 +1,81 @@ +# JSON Schema for the common findings model. +# Every validation engine (Spectral, yamllint, gherkin-lint, Python checks) +# normalizes its output into this format before the post-filter stage. + +$schema: "https://json-schema.org/draft/2020-12/schema" +title: CAMARA Validation Finding +description: > + A single finding produced by a validation engine and consumed by the + post-filter pipeline. Arrays of these objects form the interchange + format between engine adapters and the post-filter/output stages. + +type: object +required: + - engine + - engine_rule + - level + - message + - path + - line + +properties: + rule_id: + type: string + pattern: "^[0-9]{3}$" + description: > + Framework rule ID (three-digit, zero-padded). Looked up from rule + metadata by engine + engine_rule. Auto-assigned for engine rules + without explicit metadata. + + engine: + type: string + enum: [spectral, yamllint, gherkin, python] + description: Validation engine that produced this finding. + + engine_rule: + type: string + description: > + Native rule identifier within the engine (e.g. Spectral rule name, + Python check function name). + + level: + type: string + enum: [error, warn, hint] + description: > + Severity as reported by the engine, before post-filter remapping. + The post-filter may change this based on conditional_level overrides + and may suppress the finding entirely (level "off"). + + message: + type: string + description: Human-readable description of the issue. + + path: + type: string + description: > + File path relative to the repository root + (e.g. "code/API_definitions/quality-on-demand.yaml"). + + line: + type: integer + minimum: 1 + description: > + 1-indexed line number in the source file. For findings on bundled + output, this is mapped back to the source file via the source map. + + column: + type: ["integer", "null"] + minimum: 1 + description: Column number, if available from the engine. + + api_name: + type: ["string", "null"] + description: > + API name this finding belongs to, derived from the spec file path. + Null for repo-level findings (e.g. release-plan checks). + + hint: + type: string + description: > + Actionable fix guidance. From rule metadata if available, otherwise + falls back to the engine message. diff --git a/validation/schemas/rule-metadata-schema.yaml b/validation/schemas/rule-metadata-schema.yaml new file mode 100644 index 00000000..e154ee19 --- /dev/null +++ b/validation/schemas/rule-metadata-schema.yaml @@ -0,0 +1,132 @@ +# JSON Schema for validation rule metadata definitions. +# Each rule maps an engine-level check to framework-level applicability, +# conditional severity, and fix guidance. + +$schema: "https://json-schema.org/draft/2020-12/schema" +title: CAMARA Validation Rule Metadata +description: > + Defines one validation rule within the framework. Rule metadata files + contain an array of these objects. + +type: object +required: + - id + - name + - engine + - engine_rule + - hint + - conditional_level + +properties: + id: + type: string + pattern: "^[0-9]{3}$" + description: > + Stable sequential ID (three-digit, zero-padded). Survives engine + migrations — once assigned, never reused. + + name: + type: string + pattern: "^[a-z][a-z0-9-]+$" + description: Human-readable kebab-case name. + + engine: + type: string + enum: [spectral, yamllint, gherkin, python, manual] + description: > + Engine responsible for producing the finding. "manual" means the + rule is documented but not machine-checkable. + + engine_rule: + type: string + description: > + Native rule identifier within the engine. For Spectral this is the + rule name in .spectral.yaml; for Python checks the function name. + + hint: + type: string + description: Actionable fix guidance shown to the developer. + + applicability: + type: object + description: > + Conditions under which this rule fires. Omitted fields are + unconstrained (always match). All present fields must match (AND). + Array fields use OR logic (value must be in the array). + properties: + branch_types: + type: array + items: + type: string + enum: [main, release, maintenance, feature] + + trigger_types: + type: array + items: + type: string + enum: [pr, dispatch, release-automation, local] + + target_release_type: + type: array + items: + type: string + + target_api_status: + type: array + items: + type: string + + target_api_maturity: + type: array + items: + type: string + enum: [initial, stable] + + api_pattern: + type: array + items: + type: string + enum: [request-response, implicit-subscription, explicit-subscription] + + commonalities_release: + type: string + description: > + Range expression for Commonalities version (e.g. ">=r4.1"). + + is_release_review_pr: + type: boolean + + release_plan_changed: + type: boolean + + additionalProperties: false + + conditional_level: + type: object + required: [default] + properties: + default: + type: string + enum: [error, warn, hint, off] + description: > + Base severity level. "off" means the rule is suppressed in + contexts where no override matches. + + overrides: + type: array + description: > + Ordered list of condition/level pairs. First match wins. + items: + type: object + required: [condition, level] + properties: + condition: + type: object + description: > + Same field vocabulary as applicability. All present + fields must match; array fields use OR logic. + level: + type: string + enum: [error, warn, hint, off] + +additionalProperties: false diff --git a/validation/schemas/validation-config-schema.yaml b/validation/schemas/validation-config-schema.yaml new file mode 100644 index 00000000..97e0821d --- /dev/null +++ b/validation/schemas/validation-config-schema.yaml @@ -0,0 +1,56 @@ +# JSON Schema for the central validation configuration file. +# Located at validation-config.yaml in the tooling repository root. +# Read by the reusable workflow on every run to determine per-repo behavior. + +$schema: "https://json-schema.org/draft/2020-12/schema" +title: CAMARA Validation Config +description: > + Central configuration controlling which repositories have validation + enabled and at what rollout stage. + +type: object +required: + - version + - defaults + +properties: + version: + type: integer + const: 1 + description: Schema version. Allows future evolution. + + defaults: + type: object + required: [stage] + properties: + stage: + type: string + enum: [disabled, advisory, standard] + description: > + Default stage for repositories not listed under "repositories". + disabled = dark (exit immediately), advisory = dispatch only, + standard = PRs and dispatch. + additionalProperties: false + + fork_owners: + type: array + items: + type: string + description: > + GitHub usernames allowed to test in forks. When the workflow runs + in a fork owned by a listed user, the stage is overridden to + "standard" regardless of the upstream repo's configured stage. + + repositories: + type: object + description: Per-repository stage overrides. + additionalProperties: + type: object + required: [stage] + properties: + stage: + type: string + enum: [disabled, advisory, standard] + additionalProperties: false + +additionalProperties: false diff --git a/validation/scripts/validate-release-plan.py b/validation/scripts/validate-release-plan.py index 81f40f78..e2d453b1 100755 --- a/validation/scripts/validate-release-plan.py +++ b/validation/scripts/validate-release-plan.py @@ -3,6 +3,13 @@ CAMARA Release Plan Validator Validates release-plan.yaml files against JSON schema and semantic rules. +Called by the pr_validation workflow via the shared-actions/validate-release-plan/ +action. Do not modify the CLI interface or exit code semantics without checking +that action for compatibility. + +The validation framework v1 will implement its own release-plan checks with a +different architecture (common findings model, context-aware severity, profile +filtering). Do not use this script as a blueprint for v1 checks. Usage: python3 validate-release-plan.py [--schema ] [--check-files] From 574336ecc832c3fe77a224439391d0fd5cf18995 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 25 Mar 2026 16:08:55 +0100 Subject: [PATCH 002/157] fix(validation): use engine-prefixed rule IDs (S-001, P-001, G-001) Replaces flat numeric IDs with engine-prefixed format for readability and unlimited per-engine growth. Pattern: {prefix}-{NNN}. --- validation/rules/README.md | 11 ++++++----- validation/schemas/findings-schema.yaml | 8 ++++---- validation/schemas/rule-metadata-schema.yaml | 6 +++--- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/validation/rules/README.md b/validation/rules/README.md index d6b17cd3..06adb8fd 100644 --- a/validation/rules/README.md +++ b/validation/rules/README.md @@ -13,11 +13,12 @@ Schema: [../schemas/rule-metadata-schema.yaml](../schemas/rule-metadata-schema.y ## ID Assignment -Rule IDs are three-digit, zero-padded, sequentially assigned: +Rule IDs use an engine prefix and a three-digit sequential number: -- `001`–`099`: Spectral rules -- `100`–`149`: gherkin-lint rules -- `150`–`199`: Python checks -- `200`+: reserved for future engines +- `S-nnn`: Spectral rules (e.g. `S-001`, `S-042`) +- `P-nnn`: Python checks (e.g. `P-001`, `P-012`) +- `G-nnn`: gherkin-lint rules (e.g. `G-001`) +- `Y-nnn`: yamllint rules (e.g. `Y-001`) +- `M-nnn`: manual rules — documented but not machine-checkable Once assigned, an ID is never reused even if the rule is retired. diff --git a/validation/schemas/findings-schema.yaml b/validation/schemas/findings-schema.yaml index b8bc9924..74201f63 100644 --- a/validation/schemas/findings-schema.yaml +++ b/validation/schemas/findings-schema.yaml @@ -21,11 +21,11 @@ required: properties: rule_id: type: string - pattern: "^[0-9]{3}$" + pattern: "^[A-Z]-[0-9]{3}$" description: > - Framework rule ID (three-digit, zero-padded). Looked up from rule - metadata by engine + engine_rule. Auto-assigned for engine rules - without explicit metadata. + Framework rule ID (engine prefix + three-digit number, e.g. "S-042"). + Looked up from rule metadata by engine + engine_rule. Auto-assigned + for engine rules without explicit metadata. engine: type: string diff --git a/validation/schemas/rule-metadata-schema.yaml b/validation/schemas/rule-metadata-schema.yaml index e154ee19..d10ffeb3 100644 --- a/validation/schemas/rule-metadata-schema.yaml +++ b/validation/schemas/rule-metadata-schema.yaml @@ -20,10 +20,10 @@ required: properties: id: type: string - pattern: "^[0-9]{3}$" + pattern: "^[A-Z]-[0-9]{3}$" description: > - Stable sequential ID (three-digit, zero-padded). Survives engine - migrations — once assigned, never reused. + Stable ID with engine prefix and three-digit number (e.g. "S-042"). + Once assigned, never reused even if the rule is retired. name: type: string From b3db22760772775fe4bfce46231c7318f69129c9 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 25 Mar 2026 17:11:33 +0100 Subject: [PATCH 003/157] feat(validation): add central config loader and stage gate Implement config_gate module that reads validation-config.yaml, validates it against its JSON Schema (2020-12), and resolves the effective rollout stage for a given repository. Handles fork owner override logic and advisory/disabled gate decisions. Includes 24 unit tests covering schema validation, stage resolution, fork detection, and gate logic for all stage/trigger combinations. --- validation/__init__.py | 2 + validation/config/__init__.py | 2 + validation/config/config_gate.py | 200 ++++++++++++++++++++ validation/tests/__init__.py | 1 + validation/tests/test_config_gate.py | 267 +++++++++++++++++++++++++++ 5 files changed, 472 insertions(+) create mode 100644 validation/__init__.py create mode 100644 validation/config/__init__.py create mode 100644 validation/config/config_gate.py create mode 100644 validation/tests/__init__.py create mode 100644 validation/tests/test_config_gate.py diff --git a/validation/__init__.py b/validation/__init__.py new file mode 100644 index 00000000..e03cec9d --- /dev/null +++ b/validation/__init__.py @@ -0,0 +1,2 @@ +# CAMARA Validation Framework +# See validation/schemas/ for data models, validation/config/ for configuration. diff --git a/validation/config/__init__.py b/validation/config/__init__.py new file mode 100644 index 00000000..11f8a196 --- /dev/null +++ b/validation/config/__init__.py @@ -0,0 +1,2 @@ +# Configuration loading and stage gate resolution. +# See config_gate.py for the public API. diff --git a/validation/config/config_gate.py b/validation/config/config_gate.py new file mode 100644 index 00000000..bee25f27 --- /dev/null +++ b/validation/config/config_gate.py @@ -0,0 +1,200 @@ +"""Central config gate for the CAMARA validation framework. + +Reads validation-config.yaml, validates it against its JSON Schema, +and resolves the effective rollout stage for a given repository. + +Design doc references: + - Section 6.2: central config schema + - Section 8.1 steps 1-7: config gate logic +""" + +from dataclasses import dataclass +from pathlib import Path +from typing import List + +import yaml +from jsonschema import Draft202012Validator + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +# Upstream GitHub org names. Repos owned by these orgs are "upstream"; +# all others are forks. +UPSTREAM_ORGS = frozenset({"camaraproject", "GSMA-Open-Gateway"}) + +STAGE_DISABLED = "disabled" +STAGE_ADVISORY = "advisory" +STAGE_STANDARD = "standard" + +# --------------------------------------------------------------------------- +# Exceptions +# --------------------------------------------------------------------------- + + +class ConfigValidationError(Exception): + """Raised when validation-config.yaml fails schema validation.""" + + def __init__(self, errors: List[str]): + self.errors = errors + summary = "; ".join(errors[:3]) + if len(errors) > 3: + summary += f" (and {len(errors) - 3} more)" + super().__init__(f"Invalid validation config: {summary}") + + +# --------------------------------------------------------------------------- +# Result +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class StageGateResult: + """Result of stage gate resolution. + + Attributes: + stage: The resolved stage (disabled, advisory, standard). + should_continue: Whether the validation pipeline should proceed. + reason: Human-readable explanation when should_continue is False. + is_fork: Whether the workflow is running in a fork. + fork_override_applied: Whether the fork owner override changed the stage. + """ + + stage: str + should_continue: bool + reason: str = "" + is_fork: bool = False + fork_override_applied: bool = False + + +# --------------------------------------------------------------------------- +# Config loading +# --------------------------------------------------------------------------- + + +def load_and_validate_config( + config_path: Path, schema_path: Path +) -> dict: + """Load validation-config.yaml and validate against its JSON Schema. + + Args: + config_path: Path to validation-config.yaml. + schema_path: Path to validation-config-schema.yaml. + + Returns: + Parsed and validated config dict. + + Raises: + ConfigValidationError: If the config fails schema validation. + FileNotFoundError: If either file does not exist. + yaml.YAMLError: If the YAML is malformed. + """ + config_data = yaml.safe_load(config_path.read_text(encoding="utf-8")) + if config_data is None: + raise ConfigValidationError(["Config file is empty"]) + + schema = yaml.safe_load(schema_path.read_text(encoding="utf-8")) + validator = Draft202012Validator(schema) + + errors = [] + for error in validator.iter_errors(config_data): + path = ".".join(str(p) for p in error.absolute_path) or "(root)" + errors.append(f"{path}: {error.message}") + + if errors: + raise ConfigValidationError(errors) + + return config_data + + +# --------------------------------------------------------------------------- +# Stage resolution +# --------------------------------------------------------------------------- + + +def resolve_stage( + config: dict, + repo_full_name: str, + repo_owner: str, + trigger_type: str, +) -> StageGateResult: + """Resolve the effective rollout stage for a repository. + + Pure function — no I/O. Implements design doc section 8.1, steps 2-7. + + Args: + config: Validated config dict (output of load_and_validate_config). + repo_full_name: Full GitHub repository name (e.g. "camaraproject/QoD"). + repo_owner: GitHub repository owner (e.g. "camaraproject"). + trigger_type: Raw GitHub event name (e.g. "pull_request", + "workflow_dispatch"). + + Returns: + StageGateResult with the resolved stage and gate decision. + """ + # Step 2: extract repo name (strip owner prefix) + repo_name = repo_full_name.split("/", 1)[-1] + + # Step 3: look up stage (fall back to defaults.stage) + repositories = config.get("repositories") or {} + repo_entry = repositories.get(repo_name) + stage = repo_entry["stage"] if repo_entry else config["defaults"]["stage"] + + # Step 4: fork override + is_fork = repo_owner not in UPSTREAM_ORGS + fork_override_applied = False + + if is_fork: + fork_owners = config.get("fork_owners") or [] + if repo_owner in fork_owners: + stage = STAGE_STANDARD + fork_override_applied = True + + # Steps 5-7: gate decisions + if stage == STAGE_DISABLED: + return StageGateResult( + stage=stage, + should_continue=False, + reason="Validation is not enabled for this repository", + is_fork=is_fork, + fork_override_applied=fork_override_applied, + ) + + if stage == STAGE_ADVISORY and trigger_type == "pull_request": + return StageGateResult( + stage=stage, + should_continue=False, + reason=( + "Validation is in advisory mode " + "\u2014 use workflow_dispatch to run" + ), + is_fork=is_fork, + fork_override_applied=fork_override_applied, + ) + + return StageGateResult( + stage=stage, + should_continue=True, + is_fork=is_fork, + fork_override_applied=fork_override_applied, + ) + + +# --------------------------------------------------------------------------- +# Convenience +# --------------------------------------------------------------------------- + + +def resolve_stage_from_files( + config_path: Path, + schema_path: Path, + repo_full_name: str, + repo_owner: str, + trigger_type: str, +) -> StageGateResult: + """Load config, validate, and resolve stage in one call. + + Composes load_and_validate_config() + resolve_stage(). + """ + config = load_and_validate_config(config_path, schema_path) + return resolve_stage(config, repo_full_name, repo_owner, trigger_type) diff --git a/validation/tests/__init__.py b/validation/tests/__init__.py new file mode 100644 index 00000000..b6a2d933 --- /dev/null +++ b/validation/tests/__init__.py @@ -0,0 +1 @@ +# Validation framework tests. diff --git a/validation/tests/test_config_gate.py b/validation/tests/test_config_gate.py new file mode 100644 index 00000000..1be22da8 --- /dev/null +++ b/validation/tests/test_config_gate.py @@ -0,0 +1,267 @@ +"""Unit tests for validation.config.config_gate.""" + +from pathlib import Path + +import pytest +import yaml + +from validation.config.config_gate import ( + ConfigValidationError, + StageGateResult, + load_and_validate_config, + resolve_stage, +) + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +SCHEMA_PATH = ( + Path(__file__).resolve().parent.parent / "schemas" / "validation-config-schema.yaml" +) + + +@pytest.fixture +def schema_path(): + """Path to the real validation-config schema.""" + return SCHEMA_PATH + + +@pytest.fixture +def sample_config(): + """A realistic validated config dict.""" + return { + "version": 1, + "defaults": {"stage": "disabled"}, + "fork_owners": ["hdamker", "rartych"], + "repositories": { + "ReleaseTest": {"stage": "advisory"}, + "QualityOnDemand": {"stage": "standard"}, + }, + } + + +def _write_yaml(path: Path, data) -> Path: + """Write a Python object as YAML to *path* and return the path.""" + path.write_text(yaml.dump(data, default_flow_style=False), encoding="utf-8") + return path + + +# --------------------------------------------------------------------------- +# TestLoadAndValidateConfig +# --------------------------------------------------------------------------- + + +class TestLoadAndValidateConfig: + """Tests for config file loading and schema validation.""" + + def test_valid_config(self, tmp_path, schema_path, sample_config): + cfg_path = _write_yaml(tmp_path / "config.yaml", sample_config) + result = load_and_validate_config(cfg_path, schema_path) + assert result["version"] == 1 + assert result["defaults"]["stage"] == "disabled" + assert "ReleaseTest" in result["repositories"] + + def test_minimal_config(self, tmp_path, schema_path): + """Only required fields — no fork_owners, no repositories.""" + cfg = {"version": 1, "defaults": {"stage": "disabled"}} + cfg_path = _write_yaml(tmp_path / "config.yaml", cfg) + result = load_and_validate_config(cfg_path, schema_path) + assert result["version"] == 1 + + def test_invalid_version_rejected(self, tmp_path, schema_path): + cfg = {"version": 2, "defaults": {"stage": "disabled"}} + cfg_path = _write_yaml(tmp_path / "config.yaml", cfg) + with pytest.raises(ConfigValidationError) as exc_info: + load_and_validate_config(cfg_path, schema_path) + assert "version" in str(exc_info.value).lower() + + def test_invalid_stage_rejected(self, tmp_path, schema_path): + cfg = {"version": 1, "defaults": {"stage": "blocking"}} + cfg_path = _write_yaml(tmp_path / "config.yaml", cfg) + with pytest.raises(ConfigValidationError): + load_and_validate_config(cfg_path, schema_path) + + def test_missing_defaults_rejected(self, tmp_path, schema_path): + cfg = {"version": 1} + cfg_path = _write_yaml(tmp_path / "config.yaml", cfg) + with pytest.raises(ConfigValidationError) as exc_info: + load_and_validate_config(cfg_path, schema_path) + assert "defaults" in str(exc_info.value).lower() + + def test_extra_properties_rejected(self, tmp_path, schema_path): + cfg = {"version": 1, "defaults": {"stage": "disabled"}, "unknown_key": True} + cfg_path = _write_yaml(tmp_path / "config.yaml", cfg) + with pytest.raises(ConfigValidationError): + load_and_validate_config(cfg_path, schema_path) + + def test_empty_file_rejected(self, tmp_path, schema_path): + cfg_path = tmp_path / "config.yaml" + cfg_path.write_text("", encoding="utf-8") + with pytest.raises(ConfigValidationError) as exc_info: + load_and_validate_config(cfg_path, schema_path) + assert "empty" in str(exc_info.value).lower() + + def test_multiple_errors_collected(self, tmp_path, schema_path): + """Config with several violations — all should be reported.""" + cfg = {"version": 99, "extra": True} # wrong version, missing defaults, extra key + cfg_path = _write_yaml(tmp_path / "config.yaml", cfg) + with pytest.raises(ConfigValidationError) as exc_info: + load_and_validate_config(cfg_path, schema_path) + assert len(exc_info.value.errors) >= 2 + + +# --------------------------------------------------------------------------- +# TestResolveStage +# --------------------------------------------------------------------------- + + +class TestResolveStage: + """Tests for stage resolution logic (pure function, dict input).""" + + def test_known_repo_uses_repo_stage(self, sample_config): + result = resolve_stage( + sample_config, "camaraproject/ReleaseTest", "camaraproject", "pull_request" + ) + assert result.stage == "advisory" + + def test_unknown_repo_falls_back_to_default(self, sample_config): + result = resolve_stage( + sample_config, "camaraproject/UnknownRepo", "camaraproject", "pull_request" + ) + assert result.stage == "disabled" + + def test_disabled_does_not_continue(self, sample_config): + result = resolve_stage( + sample_config, "camaraproject/UnknownRepo", "camaraproject", "pull_request" + ) + assert result.should_continue is False + assert "not enabled" in result.reason + + def test_advisory_pr_does_not_continue(self, sample_config): + result = resolve_stage( + sample_config, "camaraproject/ReleaseTest", "camaraproject", "pull_request" + ) + assert result.should_continue is False + assert "advisory" in result.reason.lower() + + def test_advisory_dispatch_continues(self, sample_config): + result = resolve_stage( + sample_config, + "camaraproject/ReleaseTest", + "camaraproject", + "workflow_dispatch", + ) + assert result.should_continue is True + assert result.stage == "advisory" + + def test_standard_continues(self, sample_config): + result = resolve_stage( + sample_config, + "camaraproject/QualityOnDemand", + "camaraproject", + "pull_request", + ) + assert result.should_continue is True + assert result.stage == "standard" + + def test_fork_listed_owner_overrides_to_standard(self, sample_config): + result = resolve_stage( + sample_config, + "hdamker/QualityOnDemand", + "hdamker", + "pull_request", + ) + assert result.stage == "standard" + assert result.is_fork is True + assert result.fork_override_applied is True + assert result.should_continue is True + + def test_fork_unlisted_owner_keeps_resolved_stage(self, sample_config): + result = resolve_stage( + sample_config, + "unknown-user/QualityOnDemand", + "unknown-user", + "pull_request", + ) + # QualityOnDemand is "standard" in config, but no fork override for + # an unlisted owner — the stage stays as looked up. + assert result.stage == "standard" + assert result.is_fork is True + assert result.fork_override_applied is False + + def test_upstream_owner_not_fork(self, sample_config): + result = resolve_stage( + sample_config, "camaraproject/ReleaseTest", "camaraproject", "workflow_dispatch" + ) + assert result.is_fork is False + assert result.fork_override_applied is False + + def test_gsma_owner_is_upstream(self, sample_config): + result = resolve_stage( + sample_config, + "GSMA-Open-Gateway/SomeAPI", + "GSMA-Open-Gateway", + "pull_request", + ) + assert result.is_fork is False + + def test_repo_name_extracted_from_full_name(self, sample_config): + """'camaraproject/QualityOnDemand' should look up 'QualityOnDemand'.""" + result = resolve_stage( + sample_config, + "camaraproject/QualityOnDemand", + "camaraproject", + "pull_request", + ) + assert result.stage == "standard" + + def test_fork_disabled_unlisted_stays_disabled(self, sample_config): + """Unlisted fork owner of a disabled repo → stays disabled.""" + result = resolve_stage( + sample_config, + "stranger/UnknownRepo", + "stranger", + "workflow_dispatch", + ) + assert result.stage == "disabled" + assert result.should_continue is False + assert result.is_fork is True + assert result.fork_override_applied is False + + def test_fork_disabled_listed_overrides_to_standard(self, sample_config): + """Listed fork owner of a disabled repo → overrides to standard.""" + result = resolve_stage( + sample_config, + "hdamker/UnknownRepo", + "hdamker", + "workflow_dispatch", + ) + assert result.stage == "standard" + assert result.should_continue is True + assert result.fork_override_applied is True + + def test_empty_fork_owners(self): + cfg = { + "version": 1, + "defaults": {"stage": "disabled"}, + "fork_owners": [], + "repositories": {}, + } + result = resolve_stage(cfg, "hdamker/Repo", "hdamker", "workflow_dispatch") + assert result.stage == "disabled" + assert result.fork_override_applied is False + + def test_no_fork_owners_key(self): + cfg = {"version": 1, "defaults": {"stage": "disabled"}, "repositories": {}} + result = resolve_stage(cfg, "hdamker/Repo", "hdamker", "workflow_dispatch") + assert result.stage == "disabled" + assert result.fork_override_applied is False + + def test_no_repositories_key(self): + cfg = {"version": 1, "defaults": {"stage": "standard"}} + result = resolve_stage( + cfg, "camaraproject/AnyRepo", "camaraproject", "pull_request" + ) + assert result.stage == "standard" + assert result.should_continue is True From c0acdf18d693ab747e50ebf1e24d4fc654bbd3d1 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 25 Mar 2026 17:30:02 +0100 Subject: [PATCH 004/157] feat(validation): add context building module Implement unified validation context assembly from workflow inputs, release-plan.yaml, and OpenAPI spec files. Includes branch type and trigger type derivation, profile auto-selection, release plan parsing, and API pattern detection (request-response, implicit/explicit subscription). Three focused modules: context_builder (dataclasses + pure derivation), release_plan_parser (Draft 7 schema validation + field extraction), api_pattern_detector (3-tier heuristic from spec paths/callbacks). Includes 47 unit tests covering all derivation paths, graceful degradation for missing files, and dataclass serialization. --- validation/context/__init__.py | 6 + validation/context/api_pattern_detector.py | 90 +++++ validation/context/context_builder.py | 323 ++++++++++++++++++ validation/context/release_plan_parser.py | 126 +++++++ validation/tests/test_api_pattern_detector.py | 104 ++++++ validation/tests/test_context_builder.py | 210 ++++++++++++ validation/tests/test_release_plan_parser.py | 142 ++++++++ 7 files changed, 1001 insertions(+) create mode 100644 validation/context/api_pattern_detector.py create mode 100644 validation/context/context_builder.py create mode 100644 validation/context/release_plan_parser.py create mode 100644 validation/tests/test_api_pattern_detector.py create mode 100644 validation/tests/test_context_builder.py create mode 100644 validation/tests/test_release_plan_parser.py diff --git a/validation/context/__init__.py b/validation/context/__init__.py index 9187ed0b..317f074b 100644 --- a/validation/context/__init__.py +++ b/validation/context/__init__.py @@ -1,3 +1,9 @@ # Context building. # Assembles the unified validation context from branch type, trigger, # release-plan.yaml, PR metadata, and central config. + +from .context_builder import ( # noqa: F401 + ApiContext, + ValidationContext, + build_validation_context, +) diff --git a/validation/context/api_pattern_detector.py b/validation/context/api_pattern_detector.py new file mode 100644 index 00000000..2bab0341 --- /dev/null +++ b/validation/context/api_pattern_detector.py @@ -0,0 +1,90 @@ +"""API pattern detection from OpenAPI spec content. + +Classifies a CAMARA API as request-response, implicit-subscription, +or explicit-subscription by inspecting the OpenAPI paths and operations. + +Design doc references: + - Section 1.4: api_pattern detection logic + - Section 8.3: api_pattern field in context object +""" + +from __future__ import annotations + +import logging +from pathlib import Path + +import yaml + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +PATTERN_REQUEST_RESPONSE = "request-response" +PATTERN_IMPLICIT_SUBSCRIPTION = "implicit-subscription" +PATTERN_EXPLICIT_SUBSCRIPTION = "explicit-subscription" + +# --------------------------------------------------------------------------- +# Pure detection +# --------------------------------------------------------------------------- + + +def detect_api_pattern(spec: dict) -> str: + """Detect the API pattern from a parsed OpenAPI spec. + + Three-tier heuristic: + 1. Any path containing ``/subscriptions`` → explicit-subscription + 2. Any operation with a ``callbacks`` key → implicit-subscription + 3. Default → request-response + + Args: + spec: Parsed OpenAPI spec dict (from yaml.safe_load). + + Returns: + One of PATTERN_REQUEST_RESPONSE, PATTERN_IMPLICIT_SUBSCRIPTION, + or PATTERN_EXPLICIT_SUBSCRIPTION. + """ + paths = spec.get("paths") or {} + + # Check 1: explicit subscription endpoints + for path_key in paths: + if "/subscriptions" in path_key: + return PATTERN_EXPLICIT_SUBSCRIPTION + + # Check 2: implicit subscription via callbacks + for path_key, path_item in paths.items(): + if not isinstance(path_item, dict): + continue + for method in ("get", "post", "put", "patch", "delete"): + operation = path_item.get(method) + if isinstance(operation, dict) and "callbacks" in operation: + return PATTERN_IMPLICIT_SUBSCRIPTION + + return PATTERN_REQUEST_RESPONSE + + +# --------------------------------------------------------------------------- +# I/O wrapper +# --------------------------------------------------------------------------- + + +def detect_api_pattern_from_file(spec_path: Path) -> str: + """Load an OpenAPI spec and detect its API pattern. + + Returns ``"request-response"`` if the file is missing or unparseable. + """ + if not spec_path.is_file(): + logger.debug("Spec file not found: %s", spec_path) + return PATTERN_REQUEST_RESPONSE + + try: + data = yaml.safe_load(spec_path.read_text(encoding="utf-8")) + except yaml.YAMLError: + logger.warning("Failed to parse spec file %s", spec_path) + return PATTERN_REQUEST_RESPONSE + + if not isinstance(data, dict): + return PATTERN_REQUEST_RESPONSE + + return detect_api_pattern(data) diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py new file mode 100644 index 00000000..52da7f1b --- /dev/null +++ b/validation/context/context_builder.py @@ -0,0 +1,323 @@ +"""Context building for the CAMARA validation framework. + +Assembles the unified validation context object from workflow inputs, +release-plan.yaml, and OpenAPI spec files. All fields are always present +in the output — downstream consumers never need to handle missing keys. + +Design doc references: + - Section 8.3: context object structure + - Section 8.1 step 4: context assembly + - Section 1.1: trigger and branch type enums + - Section 1.4: derived fields (maturity, api_pattern) +""" + +from __future__ import annotations + +import dataclasses +import logging +import re +from dataclasses import dataclass +from pathlib import Path +from typing import Optional, Tuple + +from .api_pattern_detector import detect_api_pattern_from_file +from .release_plan_parser import load_release_plan + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Constants — branch types +# --------------------------------------------------------------------------- + +BRANCH_MAIN = "main" +BRANCH_RELEASE = "release" +BRANCH_MAINTENANCE = "maintenance" +BRANCH_FEATURE = "feature" + +# --------------------------------------------------------------------------- +# Constants — trigger types +# --------------------------------------------------------------------------- + +TRIGGER_PR = "pr" +TRIGGER_DISPATCH = "dispatch" +TRIGGER_RELEASE_AUTOMATION = "release-automation" +TRIGGER_LOCAL = "local" + +# --------------------------------------------------------------------------- +# Constants — profiles +# --------------------------------------------------------------------------- + +PROFILE_ADVISORY = "advisory" +PROFILE_STANDARD = "standard" +PROFILE_STRICT = "strict" + +_VALID_PROFILES = frozenset({PROFILE_ADVISORY, PROFILE_STANDARD, PROFILE_STRICT}) + +# --------------------------------------------------------------------------- +# Constants — API maturity +# --------------------------------------------------------------------------- + +MATURITY_INITIAL = "initial" +MATURITY_STABLE = "stable" + +# --------------------------------------------------------------------------- +# Dataclasses +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class ApiContext: + """Per-API validation context. + + Attributes: + api_name: Kebab-case API name (e.g. "qos-booking"). + target_api_version: Semantic version from release-plan (e.g. "1.0.0"). + target_api_status: Status from release-plan (draft/alpha/rc/public). + target_api_maturity: Derived — "initial" (0.x) or "stable" (>=1.x). + api_pattern: Detected from spec — request-response, implicit-subscription, + or explicit-subscription. + spec_file: Relative path to the spec file (e.g. + "code/API_definitions/qos-booking.yaml"). + """ + + api_name: str + target_api_version: str + target_api_status: str + target_api_maturity: str + api_pattern: str + spec_file: str + + +@dataclass(frozen=True) +class ValidationContext: + """Unified validation context. All fields always present. + + None values represent absent data (e.g. no release-plan.yaml or + non-PR trigger). + """ + + # Repository identification + repository: str + branch_type: str + trigger_type: str + profile: str + stage: str + + # Release context (from release-plan.yaml; None if absent) + target_release_type: Optional[str] + commonalities_release: Optional[str] + icm_release: Optional[str] + + # PR-specific (None / False for non-PR triggers) + is_release_review_pr: bool + release_plan_changed: Optional[bool] + pr_number: Optional[int] + + # Per-API contexts (empty tuple if no release-plan.yaml) + apis: Tuple[ApiContext, ...] + + # Workflow metadata + workflow_run_url: str + tooling_ref: str + + def to_dict(self) -> dict: + """Serialize to dict with all keys present. + + ``apis`` is converted from a tuple of dataclasses to a list of dicts. + """ + d = dataclasses.asdict(self) + # asdict converts nested dataclasses to dicts but keeps tuples as + # tuples — convert apis to a list for JSON serialization. + d["apis"] = list(d["apis"]) + return d + + +# --------------------------------------------------------------------------- +# Pure derivation functions +# --------------------------------------------------------------------------- + + +def derive_branch_type(branch_name: str) -> str: + """Derive the branch type from a branch name. + + Args: + branch_name: Target branch for PRs (base_ref) or checked-out branch + for dispatch (ref_name). + + Returns: + One of BRANCH_MAIN, BRANCH_RELEASE, BRANCH_MAINTENANCE, BRANCH_FEATURE. + """ + if branch_name == "main": + return BRANCH_MAIN + if branch_name.startswith("release-snapshot/"): + return BRANCH_RELEASE + if branch_name.startswith("maintenance/"): + return BRANCH_MAINTENANCE + return BRANCH_FEATURE + + +def derive_trigger_type(event_name: str, mode: str = "") -> str: + """Map a GitHub event name (+ optional mode) to a trigger type. + + Args: + event_name: ``github.event_name`` value. + mode: Workflow input — ``"pre-snapshot"`` sets release-automation. + + Returns: + One of TRIGGER_PR, TRIGGER_DISPATCH, TRIGGER_RELEASE_AUTOMATION, + TRIGGER_LOCAL. + """ + if mode == "pre-snapshot": + return TRIGGER_RELEASE_AUTOMATION + if event_name == "pull_request": + return TRIGGER_PR + if event_name == "workflow_dispatch": + return TRIGGER_DISPATCH + # Safe fallback for unknown events. + return TRIGGER_DISPATCH + + +def derive_target_branch( + event_name: str, base_ref: str, ref_name: str +) -> str: + """Return the relevant branch name for branch type derivation. + + For PRs the target branch (base_ref) determines which rules apply. + For dispatch the checked-out branch (ref_name) is used. + """ + if event_name == "pull_request": + return base_ref + return ref_name + + +def select_profile( + trigger_type: str, + branch_type: str, + is_release_review_pr: bool, + profile_override: str = "", +) -> str: + """Auto-select the validation profile. + + If *profile_override* is a valid profile name it takes precedence. + + Profile selection table (design doc section 8.1): + dispatch / local → advisory + release-automation → strict + pr + release + review → strict + pr + any other → standard + """ + if profile_override and profile_override in _VALID_PROFILES: + return profile_override + + if trigger_type in (TRIGGER_DISPATCH, TRIGGER_LOCAL): + return PROFILE_ADVISORY + if trigger_type == TRIGGER_RELEASE_AUTOMATION: + return PROFILE_STRICT + # trigger_type == TRIGGER_PR + if branch_type == BRANCH_RELEASE and is_release_review_pr: + return PROFILE_STRICT + return PROFILE_STANDARD + + +def derive_api_maturity(target_api_version: str) -> str: + """Derive API maturity from the semantic version. + + Major version 0 → initial; ≥ 1 → stable. + """ + match = re.match(r"^(\d+)\.", target_api_version) + if match: + major = int(match.group(1)) + return MATURITY_STABLE if major >= 1 else MATURITY_INITIAL + return MATURITY_INITIAL + + +def is_release_review_pr_check(base_ref: str) -> bool: + """True when the PR targets a release-snapshot branch.""" + return base_ref.startswith("release-snapshot/") + + +# --------------------------------------------------------------------------- +# Top-level builder +# --------------------------------------------------------------------------- + + +def build_validation_context( + repo_name: str, + event_name: str, + ref_name: str, + base_ref: str, + mode: str = "", + profile_override: str = "", + stage: str = "", + pr_number: Optional[int] = None, + release_plan_changed: Optional[bool] = None, + repo_path: Optional[Path] = None, + release_plan_schema_path: Optional[Path] = None, + workflow_run_url: str = "", + tooling_ref: str = "", +) -> ValidationContext: + """Assemble the unified validation context. + + Composes all derivation functions and I/O loaders into a single + immutable context object. + """ + # Derive branch and trigger + target_branch = derive_target_branch(event_name, base_ref, ref_name) + branch_type = derive_branch_type(target_branch) + trigger_type = derive_trigger_type(event_name, mode) + + # Release review detection (only meaningful for PRs) + is_review = is_release_review_pr_check(base_ref) if base_ref else False + + # Profile selection + profile = select_profile(trigger_type, branch_type, is_review, profile_override) + + # Release plan + target_release_type: Optional[str] = None + commonalities_release: Optional[str] = None + icm_release: Optional[str] = None + api_contexts: Tuple[ApiContext, ...] = () + + if repo_path is not None and release_plan_schema_path is not None: + plan_path = repo_path / "release-plan.yaml" + release_plan = load_release_plan(plan_path, release_plan_schema_path) + if release_plan is not None: + target_release_type = release_plan.target_release_type + commonalities_release = release_plan.commonalities_release + icm_release = release_plan.icm_release + + # Build per-API contexts + api_list = [] + for api in release_plan.apis: + spec_file = f"code/API_definitions/{api.api_name}.yaml" + spec_path = repo_path / spec_file + api_pattern = detect_api_pattern_from_file(spec_path) + maturity = derive_api_maturity(api.target_api_version) + api_list.append( + ApiContext( + api_name=api.api_name, + target_api_version=api.target_api_version, + target_api_status=api.target_api_status, + target_api_maturity=maturity, + api_pattern=api_pattern, + spec_file=spec_file, + ) + ) + api_contexts = tuple(api_list) + + return ValidationContext( + repository=repo_name, + branch_type=branch_type, + trigger_type=trigger_type, + profile=profile, + stage=stage, + target_release_type=target_release_type, + commonalities_release=commonalities_release, + icm_release=icm_release, + is_release_review_pr=is_review, + release_plan_changed=release_plan_changed, + pr_number=pr_number, + apis=api_contexts, + workflow_run_url=workflow_run_url, + tooling_ref=tooling_ref, + ) diff --git a/validation/context/release_plan_parser.py b/validation/context/release_plan_parser.py new file mode 100644 index 00000000..c38bf4f5 --- /dev/null +++ b/validation/context/release_plan_parser.py @@ -0,0 +1,126 @@ +"""Release-plan.yaml parser for the CAMARA validation framework. + +Loads and extracts fields from the repository's release-plan.yaml file. +Returns None when the file is absent — the context builder treats this +as "no release context" (null fields, empty APIs list). + +Design doc references: + - Section 8.3: release context fields in the validation context object + - release-plan-schema.yaml: authoritative schema (Draft 7) +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from pathlib import Path +from typing import Optional, Tuple + +import yaml +from jsonschema import Draft7Validator + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Dataclasses +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class ReleasePlanApi: + """Single API entry from release-plan.yaml.""" + + api_name: str + target_api_version: str + target_api_status: str + + +@dataclass(frozen=True) +class ReleasePlanData: + """Parsed release-plan.yaml content.""" + + target_release_type: str + commonalities_release: Optional[str] + icm_release: Optional[str] + apis: Tuple[ReleasePlanApi, ...] + + +# --------------------------------------------------------------------------- +# Pure parsing +# --------------------------------------------------------------------------- + + +def parse_release_plan(data: dict) -> ReleasePlanData: + """Extract validation-relevant fields from a parsed release-plan dict. + + This is a pure function — no I/O. Expects a dict that has already + been loaded from YAML (optionally schema-validated). + """ + repo = data.get("repository", {}) + deps = data.get("dependencies") or {} + + apis_raw = data.get("apis") or [] + apis = tuple( + ReleasePlanApi( + api_name=a["api_name"], + target_api_version=a["target_api_version"], + target_api_status=a["target_api_status"], + ) + for a in apis_raw + ) + + return ReleasePlanData( + target_release_type=repo.get("target_release_type", "none"), + commonalities_release=deps.get("commonalities_release"), + icm_release=deps.get("identity_consent_management_release"), + apis=apis, + ) + + +# --------------------------------------------------------------------------- +# I/O wrapper +# --------------------------------------------------------------------------- + + +def load_release_plan( + plan_path: Path, schema_path: Path +) -> Optional[ReleasePlanData]: + """Load release-plan.yaml, validate, and extract fields. + + Returns None if the file is missing or empty. If schema validation + fails, logs a warning and returns what can be parsed (graceful + degradation). + + Args: + plan_path: Path to release-plan.yaml in the repo checkout. + schema_path: Path to release-plan-schema.yaml. + """ + if not plan_path.is_file(): + logger.debug("release-plan.yaml not found at %s", plan_path) + return None + + try: + data = yaml.safe_load(plan_path.read_text(encoding="utf-8")) + except yaml.YAMLError: + logger.warning("Failed to parse %s as YAML", plan_path) + return None + + if not data: + logger.debug("release-plan.yaml is empty at %s", plan_path) + return None + + # Schema validation (warn on failure, continue with best-effort parse) + try: + schema = yaml.safe_load(schema_path.read_text(encoding="utf-8")) + validator = Draft7Validator(schema) + errors = list(validator.iter_errors(data)) + if errors: + for err in errors[:3]: + path = ".".join(str(p) for p in err.absolute_path) or "(root)" + logger.warning( + "release-plan.yaml validation: %s: %s", path, err.message + ) + except Exception: + logger.warning("Could not validate release-plan.yaml against schema") + + return parse_release_plan(data) diff --git a/validation/tests/test_api_pattern_detector.py b/validation/tests/test_api_pattern_detector.py new file mode 100644 index 00000000..b5a3fac8 --- /dev/null +++ b/validation/tests/test_api_pattern_detector.py @@ -0,0 +1,104 @@ +"""Unit tests for validation.context.api_pattern_detector.""" + +from pathlib import Path + +import pytest +import yaml + +from validation.context.api_pattern_detector import ( + PATTERN_EXPLICIT_SUBSCRIPTION, + PATTERN_IMPLICIT_SUBSCRIPTION, + PATTERN_REQUEST_RESPONSE, + detect_api_pattern, + detect_api_pattern_from_file, +) + + +# --------------------------------------------------------------------------- +# TestDetectApiPattern +# --------------------------------------------------------------------------- + + +class TestDetectApiPattern: + def test_request_response_api(self): + spec = { + "paths": { + "/sessions": {"post": {"operationId": "createSession"}}, + "/sessions/{id}": {"get": {"operationId": "getSession"}}, + } + } + assert detect_api_pattern(spec) == PATTERN_REQUEST_RESPONSE + + def test_explicit_subscription_has_subscriptions_path(self): + spec = { + "paths": { + "/subscriptions": {"post": {"operationId": "createSubscription"}}, + "/subscriptions/{id}": {"delete": {"operationId": "deleteSubscription"}}, + } + } + assert detect_api_pattern(spec) == PATTERN_EXPLICIT_SUBSCRIPTION + + def test_implicit_subscription_has_callbacks(self): + spec = { + "paths": { + "/sessions": { + "post": { + "operationId": "createSession", + "callbacks": { + "sessionNotification": { + "{$request.body#/sink}": { + "post": {"operationId": "notify"} + } + } + }, + } + } + } + } + assert detect_api_pattern(spec) == PATTERN_IMPLICIT_SUBSCRIPTION + + def test_empty_spec(self): + assert detect_api_pattern({}) == PATTERN_REQUEST_RESPONSE + + def test_no_paths(self): + spec = {"info": {"title": "Test"}, "components": {"schemas": {}}} + assert detect_api_pattern(spec) == PATTERN_REQUEST_RESPONSE + + def test_explicit_wins_over_implicit(self): + """When both subscription path and callbacks exist, explicit wins.""" + spec = { + "paths": { + "/subscriptions": {"post": {"operationId": "createSub"}}, + "/sessions": { + "post": { + "operationId": "createSession", + "callbacks": {"cb": {}}, + } + }, + } + } + assert detect_api_pattern(spec) == PATTERN_EXPLICIT_SUBSCRIPTION + + +# --------------------------------------------------------------------------- +# TestDetectApiPatternFromFile +# --------------------------------------------------------------------------- + + +class TestDetectApiPatternFromFile: + def test_real_spec_file(self, tmp_path): + spec = { + "openapi": "3.0.3", + "paths": { + "/subscriptions": {"post": {"operationId": "createSubscription"}} + }, + } + spec_path = tmp_path / "api.yaml" + spec_path.write_text(yaml.dump(spec), encoding="utf-8") + assert detect_api_pattern_from_file(spec_path) == PATTERN_EXPLICIT_SUBSCRIPTION + + def test_missing_file_returns_default(self, tmp_path): + assert ( + detect_api_pattern_from_file(tmp_path / "nonexistent.yaml") + == PATTERN_REQUEST_RESPONSE + ) diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py new file mode 100644 index 00000000..8c00ce81 --- /dev/null +++ b/validation/tests/test_context_builder.py @@ -0,0 +1,210 @@ +"""Unit tests for validation.context.context_builder.""" + +from pathlib import Path + +import pytest + +from validation.context.context_builder import ( + ApiContext, + ValidationContext, + derive_api_maturity, + derive_branch_type, + derive_target_branch, + derive_trigger_type, + is_release_review_pr_check, + select_profile, +) + + +# --------------------------------------------------------------------------- +# TestDeriveBranchType +# --------------------------------------------------------------------------- + + +class TestDeriveBranchType: + def test_main_branch(self): + assert derive_branch_type("main") == "main" + + def test_release_snapshot_branch(self): + assert derive_branch_type("release-snapshot/r4.1") == "release" + + def test_release_snapshot_nested(self): + assert derive_branch_type("release-snapshot/r4.1/alpha") == "release" + + def test_maintenance_branch(self): + assert derive_branch_type("maintenance/4.x") == "maintenance" + + def test_feature_branch(self): + assert derive_branch_type("fix/some-issue") == "feature" + + def test_develop_branch(self): + assert derive_branch_type("develop") == "feature" + + def test_empty_string(self): + assert derive_branch_type("") == "feature" + + +# --------------------------------------------------------------------------- +# TestDeriveTriggerType +# --------------------------------------------------------------------------- + + +class TestDeriveTriggerType: + def test_pull_request(self): + assert derive_trigger_type("pull_request") == "pr" + + def test_workflow_dispatch(self): + assert derive_trigger_type("workflow_dispatch") == "dispatch" + + def test_pre_snapshot_mode_overrides_event(self): + assert ( + derive_trigger_type("workflow_dispatch", mode="pre-snapshot") + == "release-automation" + ) + + def test_unknown_event_fallback(self): + assert derive_trigger_type("push") == "dispatch" + + +# --------------------------------------------------------------------------- +# TestSelectProfile +# --------------------------------------------------------------------------- + + +class TestSelectProfile: + def test_dispatch_gets_advisory(self): + assert select_profile("dispatch", "main", False) == "advisory" + + def test_release_automation_gets_strict(self): + assert select_profile("release-automation", "main", False) == "strict" + + def test_pr_release_review_gets_strict(self): + assert select_profile("pr", "release", True) == "strict" + + def test_pr_main_gets_standard(self): + assert select_profile("pr", "main", False) == "standard" + + def test_pr_feature_gets_standard(self): + assert select_profile("pr", "feature", False) == "standard" + + def test_pr_maintenance_gets_standard(self): + assert select_profile("pr", "maintenance", False) == "standard" + + def test_profile_override_wins(self): + assert ( + select_profile("dispatch", "main", False, profile_override="strict") + == "strict" + ) + + def test_invalid_profile_override_ignored(self): + assert ( + select_profile("dispatch", "main", False, profile_override="invalid") + == "advisory" + ) + + def test_local_gets_advisory(self): + assert select_profile("local", "main", False) == "advisory" + + +# --------------------------------------------------------------------------- +# TestDeriveApiMaturity +# --------------------------------------------------------------------------- + + +class TestDeriveApiMaturity: + def test_zero_major_is_initial(self): + assert derive_api_maturity("0.5.0") == "initial" + + def test_one_major_is_stable(self): + assert derive_api_maturity("1.0.0") == "stable" + + def test_high_major_is_stable(self): + assert derive_api_maturity("3.2.1") == "stable" + + def test_unparseable_defaults_initial(self): + assert derive_api_maturity("invalid") == "initial" + + +# --------------------------------------------------------------------------- +# TestIsReleaseReviewPr +# --------------------------------------------------------------------------- + + +class TestIsReleaseReviewPr: + def test_release_snapshot_target(self): + assert is_release_review_pr_check("release-snapshot/r4.1") is True + + def test_main_target(self): + assert is_release_review_pr_check("main") is False + + def test_empty_base_ref(self): + assert is_release_review_pr_check("") is False + + +# --------------------------------------------------------------------------- +# TestDeriveTargetBranch +# --------------------------------------------------------------------------- + + +class TestDeriveTargetBranch: + def test_pr_uses_base_ref(self): + assert derive_target_branch("pull_request", "main", "feature/x") == "main" + + def test_dispatch_uses_ref_name(self): + assert derive_target_branch("workflow_dispatch", "", "main") == "main" + + +# --------------------------------------------------------------------------- +# TestValidationContextToDict +# --------------------------------------------------------------------------- + + +class TestValidationContextToDict: + @pytest.fixture + def sample_context(self): + return ValidationContext( + repository="QualityOnDemand", + branch_type="main", + trigger_type="pr", + profile="standard", + stage="standard", + target_release_type="pre-release-rc", + commonalities_release="r4.1", + icm_release=None, + is_release_review_pr=False, + release_plan_changed=True, + pr_number=42, + apis=( + ApiContext( + api_name="qos-booking", + target_api_version="1.0.0", + target_api_status="rc", + target_api_maturity="stable", + api_pattern="request-response", + spec_file="code/API_definitions/qos-booking.yaml", + ), + ), + workflow_run_url="https://github.com/example/run/1", + tooling_ref="abc123", + ) + + def test_all_keys_present(self, sample_context): + d = sample_context.to_dict() + expected_keys = { + "repository", "branch_type", "trigger_type", "profile", "stage", + "target_release_type", "commonalities_release", "icm_release", + "is_release_review_pr", "release_plan_changed", "pr_number", + "apis", "workflow_run_url", "tooling_ref", + } + assert set(d.keys()) == expected_keys + + def test_apis_serialized_as_list(self, sample_context): + d = sample_context.to_dict() + assert isinstance(d["apis"], list) + assert len(d["apis"]) == 1 + assert isinstance(d["apis"][0], dict) + assert d["apis"][0]["api_name"] == "qos-booking" + + def test_none_values_preserved(self, sample_context): + d = sample_context.to_dict() + assert d["icm_release"] is None diff --git a/validation/tests/test_release_plan_parser.py b/validation/tests/test_release_plan_parser.py new file mode 100644 index 00000000..679de737 --- /dev/null +++ b/validation/tests/test_release_plan_parser.py @@ -0,0 +1,142 @@ +"""Unit tests for validation.context.release_plan_parser.""" + +from pathlib import Path + +import pytest +import yaml + +from validation.context.release_plan_parser import ( + ReleasePlanData, + load_release_plan, + parse_release_plan, +) + +SCHEMA_PATH = ( + Path(__file__).resolve().parent.parent / "schemas" / "release-plan-schema.yaml" +) + + +def _write_yaml(path: Path, data) -> Path: + path.write_text(yaml.dump(data, default_flow_style=False), encoding="utf-8") + return path + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture +def schema_path(): + return SCHEMA_PATH + + +@pytest.fixture +def full_plan_dict(): + return { + "repository": { + "release_track": "meta-release", + "meta_release": "Spring26", + "target_release_tag": "r4.1", + "target_release_type": "pre-release-rc", + }, + "dependencies": { + "commonalities_release": "r4.2", + "identity_consent_management_release": "r4.3", + }, + "apis": [ + { + "api_name": "quality-on-demand", + "target_api_version": "1.0.0", + "target_api_status": "rc", + }, + { + "api_name": "qos-booking", + "target_api_version": "0.5.0", + "target_api_status": "alpha", + }, + ], + } + + +# --------------------------------------------------------------------------- +# TestParseReleasePlan +# --------------------------------------------------------------------------- + + +class TestParseReleasePlan: + def test_full_plan(self, full_plan_dict): + result = parse_release_plan(full_plan_dict) + assert result.target_release_type == "pre-release-rc" + assert result.commonalities_release == "r4.2" + assert result.icm_release == "r4.3" + assert len(result.apis) == 2 + assert result.apis[0].api_name == "quality-on-demand" + assert result.apis[1].api_name == "qos-booking" + + def test_plan_without_dependencies(self): + data = { + "repository": { + "release_track": "independent", + "target_release_tag": "r4.1", + "target_release_type": "public-release", + }, + "apis": [ + { + "api_name": "some-api", + "target_api_version": "2.0.0", + "target_api_status": "public", + }, + ], + } + result = parse_release_plan(data) + assert result.commonalities_release is None + assert result.icm_release is None + + def test_plan_with_multiple_apis(self, full_plan_dict): + result = parse_release_plan(full_plan_dict) + assert len(result.apis) == 2 + assert result.apis[0].target_api_version == "1.0.0" + assert result.apis[1].target_api_version == "0.5.0" + + def test_plan_with_none_release_type(self): + data = { + "repository": { + "release_track": "independent", + "target_release_tag": None, + "target_release_type": "none", + }, + "apis": [ + { + "api_name": "draft-api", + "target_api_version": "0.1.0", + "target_api_status": "draft", + }, + ], + } + result = parse_release_plan(data) + assert result.target_release_type == "none" + + +# --------------------------------------------------------------------------- +# TestLoadReleasePlan +# --------------------------------------------------------------------------- + + +class TestLoadReleasePlan: + def test_valid_file(self, tmp_path, schema_path, full_plan_dict): + plan_path = _write_yaml(tmp_path / "release-plan.yaml", full_plan_dict) + result = load_release_plan(plan_path, schema_path) + assert result is not None + assert result.target_release_type == "pre-release-rc" + assert len(result.apis) == 2 + + def test_missing_file_returns_none(self, tmp_path, schema_path): + result = load_release_plan(tmp_path / "release-plan.yaml", schema_path) + assert result is None + + def test_empty_file_returns_none(self, tmp_path, schema_path): + plan_path = tmp_path / "release-plan.yaml" + plan_path.write_text("", encoding="utf-8") + result = load_release_plan(plan_path, schema_path) + assert result is None From 2b5b5a08d53b4a4a9b9b2031b10090962f1b9dd2 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 25 Mar 2026 21:07:53 +0100 Subject: [PATCH 005/157] feat(validation): add Spectral engine adapter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Invoke Spectral CLI, parse JSON output, and normalize findings into the common findings model. Includes version-specific ruleset selection with fallback, severity mapping (0→error, 1→warn, 2/3→hint), and graceful error handling for missing CLI, timeouts, and runtime errors. 40 unit tests covering pure functions and mocked subprocess invocation. --- validation/engines/__init__.py | 4 + validation/engines/spectral_adapter.py | 335 +++++++++++++++++++ validation/tests/test_spectral_adapter.py | 383 ++++++++++++++++++++++ 3 files changed, 722 insertions(+) create mode 100644 validation/engines/spectral_adapter.py create mode 100644 validation/tests/test_spectral_adapter.py diff --git a/validation/engines/__init__.py b/validation/engines/__init__.py index 37d0d99b..e3aeea2a 100644 --- a/validation/engines/__init__.py +++ b/validation/engines/__init__.py @@ -1,3 +1,7 @@ # Validation engine adapters. # Each adapter invokes its engine and normalizes output to the common # findings model (see schemas/findings-schema.yaml). + +from .spectral_adapter import run_spectral_engine + +__all__ = ["run_spectral_engine"] diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py new file mode 100644 index 00000000..3b0bff5a --- /dev/null +++ b/validation/engines/spectral_adapter.py @@ -0,0 +1,335 @@ +"""Spectral engine adapter for the CAMARA validation framework. + +Invokes Spectral CLI on OpenAPI spec files, parses the JSON output, and +normalizes findings into the common findings model. + +Design doc references: + - Section 8.1 step 7: full validation (Spectral invocation) + - Section 7.5: Spectral pre-selection (version-specific rulesets) + - Section 2.2: check areas (Spectral coverage) +""" + +from __future__ import annotations + +import json +import logging +import subprocess +from dataclasses import dataclass +from pathlib import Path, PurePosixPath +from typing import List, Optional + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +ENGINE_NAME = "spectral" + +# Spectral severity (integer) -> framework level (string). +# Spectral: 0=error, 1=warn, 2=info, 3=hint. +# Framework collapses info and hint into "hint". +SEVERITY_MAP: dict[int, str] = { + 0: "error", + 1: "warn", + 2: "hint", + 3: "hint", +} + +DEFAULT_SPEC_GLOB = "code/API_definitions/*.yaml" + +# Fallback ruleset when version-specific file is not found. +DEFAULT_RULESET = ".spectral.yaml" + +# Version-line prefix -> ruleset filename. +_VERSION_RULESET_MAP: dict[str, str] = { + "r3": ".spectral-r3.4.yaml", + "r4": ".spectral-r4.yaml", +} + +# Latest version line used when commonalities_release is absent. +_LATEST_VERSION_LINE = "r4" + +# Sentinel rule name for adapter-level errors. +_EXECUTION_ERROR_RULE = "spectral-execution-error" + + +# --------------------------------------------------------------------------- +# Pure functions +# --------------------------------------------------------------------------- + + +def map_severity(spectral_severity: int) -> str: + """Map a Spectral severity integer to a framework level string. + + Args: + spectral_severity: Spectral severity (0=error, 1=warn, 2=info, 3=hint). + + Returns: + Framework level: "error", "warn", or "hint". + + Raises: + KeyError: If *spectral_severity* is not in the range 0-3. + """ + return SEVERITY_MAP[spectral_severity] + + +def derive_api_name(file_path: str) -> Optional[str]: + """Extract the API name from a spec file path. + + Expects paths like ``code/API_definitions/quality-on-demand.yaml``. + Returns the file stem (without extension) as the API name, or ``None`` + for paths that are not under ``API_definitions``. + """ + if not file_path: + return None + parts = PurePosixPath(file_path).parts + try: + idx = parts.index("API_definitions") + except ValueError: + return None + # The file name should follow immediately after API_definitions. + if idx + 1 < len(parts): + return PurePosixPath(parts[idx + 1]).stem + return None + + +def select_ruleset_path( + commonalities_release: Optional[str], + config_dir: Path, +) -> Path: + """Select the Spectral ruleset based on the Commonalities release version. + + Resolution order: + 1. Map *commonalities_release* prefix to a version-specific filename + (e.g. ``r4.1`` -> ``.spectral-r4.yaml``). + 2. If *commonalities_release* is absent or unrecognised, default to the + latest version line (currently r4). + 3. If the version-specific file does not exist on disk, fall back to + ``.spectral.yaml``. + + Args: + commonalities_release: Version string from release-plan.yaml + (e.g. "r4.1", "r3.4") or ``None``. + config_dir: Directory containing Spectral ruleset files. + + Returns: + Absolute path to the selected ruleset file. + """ + # Determine target version line. + version_line = _LATEST_VERSION_LINE + if commonalities_release: + for prefix in _VERSION_RULESET_MAP: + if commonalities_release.startswith(prefix): + version_line = prefix + break + + # Try version-specific ruleset. + ruleset_name = _VERSION_RULESET_MAP[version_line] + candidate = config_dir / ruleset_name + if candidate.is_file(): + return candidate + + # Fallback to default. + fallback = config_dir / DEFAULT_RULESET + logger.info( + "Version-specific ruleset %s not found; falling back to %s", + ruleset_name, + DEFAULT_RULESET, + ) + return fallback + + +def normalize_finding(raw: dict) -> dict: + """Convert one Spectral JSON finding to the common findings model. + + Critical field mapping: + - ``raw["source"]`` -> ``finding["path"]`` (file path, NOT ``raw["path"]`` + which is the JSONPath within the document). + - ``raw["range"]["start"]["line"]`` is 0-indexed; add 1 for the framework. + - ``raw["range"]["start"]["character"]`` is 0-indexed; add 1. + """ + source = raw.get("source", "") + start = raw.get("range", {}).get("start", {}) + + line = start.get("line", 0) + 1 + character = start.get("character") + column = (character + 1) if character is not None else None + + finding: dict = { + "engine": ENGINE_NAME, + "engine_rule": raw.get("code", "unknown"), + "level": map_severity(raw.get("severity", 1)), + "message": raw.get("message", ""), + "path": source, + "line": line, + "api_name": derive_api_name(source), + } + + if column is not None: + finding["column"] = column + + return finding + + +def parse_spectral_output(raw_json: str) -> List[dict]: + """Parse Spectral ``--format json`` stdout into normalised findings. + + Args: + raw_json: Raw JSON string from Spectral stdout. + + Returns: + List of findings conforming to the common findings model. + Returns an empty list if *raw_json* is empty or not valid JSON. + """ + if not raw_json.strip(): + return [] + + try: + data = json.loads(raw_json) + except json.JSONDecodeError as exc: + logger.warning("Failed to parse Spectral JSON output: %s", exc) + return [] + + if not isinstance(data, list): + logger.warning("Spectral output is not a JSON array") + return [] + + findings = [] + for item in data: + try: + findings.append(normalize_finding(item)) + except (KeyError, TypeError) as exc: + logger.warning("Skipping malformed Spectral finding: %s", exc) + return findings + + +# --------------------------------------------------------------------------- +# I/O wrappers +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class SpectralResult: + """Result of a Spectral CLI invocation.""" + + findings: List[dict] + success: bool + error_message: str = "" + + +def run_spectral( + ruleset_path: Path, + spec_patterns: List[str], + cwd: Path, +) -> SpectralResult: + """Invoke Spectral CLI and capture structured output. + + Uses ``--format json`` for machine-readable output. The default + ``--fail-severity error`` means exit 0 for warnings-only and exit 1 + when errors are present — both are normal operation with valid JSON + on stdout. + + Args: + ruleset_path: Path to the Spectral ruleset file. + spec_patterns: Glob patterns for input files (e.g. + ``["code/API_definitions/*.yaml"]``). + cwd: Working directory for the subprocess (normally the repo root + so that ``source`` paths in the output are repo-relative). + + Returns: + :class:`SpectralResult` with parsed findings and status. + """ + cmd = [ + "spectral", + "lint", + "--format", "json", + "--ruleset", str(ruleset_path), + *spec_patterns, + ] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=str(cwd), + timeout=300, + ) + except FileNotFoundError: + return SpectralResult( + findings=[], + success=False, + error_message="Spectral CLI not found — is @stoplight/spectral-cli installed?", + ) + except subprocess.TimeoutExpired: + return SpectralResult( + findings=[], + success=False, + error_message="Spectral timed out after 300 seconds", + ) + + # Exit 0 or 1: normal operation (findings may or may not exist). + if result.returncode in (0, 1): + findings = parse_spectral_output(result.stdout) + return SpectralResult(findings=findings, success=True) + + # Exit 2+: Spectral runtime error. + stderr = result.stderr.strip() if result.stderr else "unknown error" + return SpectralResult( + findings=[], + success=False, + error_message=f"Spectral exited with code {result.returncode}: {stderr}", + ) + + +def _make_error_finding(message: str) -> dict: + """Create an error finding for adapter-level failures.""" + return { + "engine": ENGINE_NAME, + "engine_rule": _EXECUTION_ERROR_RULE, + "level": "error", + "message": message, + "path": "", + "line": 1, + "api_name": None, + } + + +def run_spectral_engine( + repo_path: Path, + config_dir: Path, + commonalities_release: Optional[str] = None, + spec_patterns: Optional[List[str]] = None, +) -> List[dict]: + """Top-level entry point for the orchestrator. + + Selects the appropriate ruleset, invokes Spectral, and returns a list + of findings conforming to the common findings model. On adapter-level + errors (Spectral not installed, runtime error) a single error finding + is returned instead of raising. + + Args: + repo_path: Root of the repository being validated. + config_dir: Directory containing Spectral ruleset files. + commonalities_release: Version string for ruleset selection. + spec_patterns: Override glob patterns (default: + ``["code/API_definitions/*.yaml"]``). + + Returns: + List of finding dicts conforming to ``findings-schema.yaml``. + """ + if spec_patterns is None: + spec_patterns = [DEFAULT_SPEC_GLOB] + + ruleset = select_ruleset_path(commonalities_release, config_dir) + logger.info("Using Spectral ruleset: %s", ruleset) + + result = run_spectral(ruleset, spec_patterns, cwd=repo_path) + + if not result.success: + logger.error("Spectral engine error: %s", result.error_message) + return [_make_error_finding(result.error_message)] + + logger.info("Spectral produced %d finding(s)", len(result.findings)) + return result.findings diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py new file mode 100644 index 00000000..150c2e2e --- /dev/null +++ b/validation/tests/test_spectral_adapter.py @@ -0,0 +1,383 @@ +"""Unit tests for validation.engines.spectral_adapter.""" + +from __future__ import annotations + +import json +import subprocess +from pathlib import Path +from unittest.mock import patch + +import pytest + +from validation.engines.spectral_adapter import ( + DEFAULT_RULESET, + ENGINE_NAME, + SpectralResult, + derive_api_name, + map_severity, + normalize_finding, + parse_spectral_output, + run_spectral, + run_spectral_engine, + select_ruleset_path, +) + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +# A typical Spectral JSON finding (one element of the --format json array). +SAMPLE_SPECTRAL_FINDING = { + "code": "camara-parameter-casing-convention", + "path": ["paths", "/qualityOnDemand", "post"], + "message": "qualityOnDemand is not kebab-case", + "severity": 0, + "source": "code/API_definitions/quality-on-demand.yaml", + "range": { + "start": {"line": 46, "character": 4}, + "end": {"line": 46, "character": 30}, + }, +} + +SAMPLE_SPECTRAL_WARN = { + "code": "camara-path-param-id", + "path": ["paths", "/sessions/{id}", "get", "parameters", "0"], + "message": "Use 'resource_id' instead of just 'id'", + "severity": 1, + "source": "code/API_definitions/qos-booking.yaml", + "range": { + "start": {"line": 120, "character": 8}, + "end": {"line": 120, "character": 20}, + }, +} + +SAMPLE_SPECTRAL_INFO = { + "code": "camara-operationid-casing-convention", + "path": ["paths", "/sessions", "post", "operationId"], + "message": "Operation Id must be in Camel case", + "severity": 2, + "source": "code/API_definitions/quality-on-demand.yaml", + "range": { + "start": {"line": 50, "character": 18}, + "end": {"line": 50, "character": 40}, + }, +} + + +# --------------------------------------------------------------------------- +# TestMapSeverity +# --------------------------------------------------------------------------- + + +class TestMapSeverity: + def test_error(self): + assert map_severity(0) == "error" + + def test_warn(self): + assert map_severity(1) == "warn" + + def test_info_maps_to_hint(self): + assert map_severity(2) == "hint" + + def test_hint_maps_to_hint(self): + assert map_severity(3) == "hint" + + def test_unknown_severity_raises(self): + with pytest.raises(KeyError): + map_severity(99) + + +# --------------------------------------------------------------------------- +# TestDeriveApiName +# --------------------------------------------------------------------------- + + +class TestDeriveApiName: + def test_standard_api_path(self): + assert ( + derive_api_name("code/API_definitions/quality-on-demand.yaml") + == "quality-on-demand" + ) + + def test_another_api(self): + assert ( + derive_api_name("code/API_definitions/qos-booking.yaml") + == "qos-booking" + ) + + def test_test_definitions_returns_none(self): + assert derive_api_name("code/Test_definitions/foo.feature") is None + + def test_repo_level_file_returns_none(self): + assert derive_api_name("release-plan.yaml") is None + + def test_empty_string_returns_none(self): + assert derive_api_name("") is None + + def test_nested_api_definitions(self): + """Handles unusual nesting (takes first file after API_definitions).""" + assert ( + derive_api_name("some/prefix/API_definitions/my-api.yaml") + == "my-api" + ) + + +# --------------------------------------------------------------------------- +# TestSelectRulesetPath +# --------------------------------------------------------------------------- + + +class TestSelectRulesetPath: + def test_r4_release_selects_r4_ruleset(self, tmp_path): + (tmp_path / ".spectral-r4.yaml").touch() + result = select_ruleset_path("r4.1", tmp_path) + assert result.name == ".spectral-r4.yaml" + + def test_r3_release_selects_r3_ruleset(self, tmp_path): + (tmp_path / ".spectral-r3.4.yaml").touch() + result = select_ruleset_path("r3.4", tmp_path) + assert result.name == ".spectral-r3.4.yaml" + + def test_none_defaults_to_latest(self, tmp_path): + (tmp_path / ".spectral-r4.yaml").touch() + result = select_ruleset_path(None, tmp_path) + assert result.name == ".spectral-r4.yaml" + + def test_unrecognised_version_defaults_to_latest(self, tmp_path): + (tmp_path / ".spectral-r4.yaml").touch() + result = select_ruleset_path("r99.0", tmp_path) + assert result.name == ".spectral-r4.yaml" + + def test_version_specific_missing_falls_back(self, tmp_path): + (tmp_path / ".spectral.yaml").touch() + # r4 version-specific not present — fall back to default. + result = select_ruleset_path("r4.1", tmp_path) + assert result.name == ".spectral.yaml" + + def test_all_missing_returns_fallback_path(self, tmp_path): + """Even if no ruleset file exists, returns the fallback path.""" + result = select_ruleset_path("r4.1", tmp_path) + assert result.name == DEFAULT_RULESET + + +# --------------------------------------------------------------------------- +# TestNormalizeFinding +# --------------------------------------------------------------------------- + + +class TestNormalizeFinding: + def test_standard_finding(self): + finding = normalize_finding(SAMPLE_SPECTRAL_FINDING) + assert finding["engine"] == "spectral" + assert finding["engine_rule"] == "camara-parameter-casing-convention" + assert finding["level"] == "error" + assert finding["message"] == "qualityOnDemand is not kebab-case" + assert finding["path"] == "code/API_definitions/quality-on-demand.yaml" + assert finding["line"] == 47 # 0-indexed 46 -> 1-indexed 47 + assert finding["column"] == 5 # 0-indexed 4 -> 1-indexed 5 + assert finding["api_name"] == "quality-on-demand" + + def test_warn_severity(self): + finding = normalize_finding(SAMPLE_SPECTRAL_WARN) + assert finding["level"] == "warn" + assert finding["api_name"] == "qos-booking" + + def test_info_severity_maps_to_hint(self): + finding = normalize_finding(SAMPLE_SPECTRAL_INFO) + assert finding["level"] == "hint" + + def test_missing_character_omits_column(self): + raw = { + "code": "some-rule", + "message": "msg", + "severity": 1, + "source": "code/API_definitions/api.yaml", + "range": {"start": {"line": 10}}, + } + finding = normalize_finding(raw) + assert finding["line"] == 11 + assert "column" not in finding + + def test_rule_id_and_hint_not_set(self): + """Adapter does not assign rule_id or hint — post-filter does.""" + finding = normalize_finding(SAMPLE_SPECTRAL_FINDING) + assert "rule_id" not in finding + assert "hint" not in finding + + +# --------------------------------------------------------------------------- +# TestParseSpectralOutput +# --------------------------------------------------------------------------- + + +class TestParseSpectralOutput: + def test_valid_json_array(self): + raw = json.dumps([SAMPLE_SPECTRAL_FINDING, SAMPLE_SPECTRAL_WARN]) + findings = parse_spectral_output(raw) + assert len(findings) == 2 + assert findings[0]["engine_rule"] == "camara-parameter-casing-convention" + assert findings[1]["engine_rule"] == "camara-path-param-id" + + def test_empty_array(self): + assert parse_spectral_output("[]") == [] + + def test_empty_string(self): + assert parse_spectral_output("") == [] + + def test_whitespace_only(self): + assert parse_spectral_output(" \n ") == [] + + def test_invalid_json_returns_empty(self): + findings = parse_spectral_output("not json at all") + assert findings == [] + + def test_json_object_instead_of_array(self): + findings = parse_spectral_output('{"error": "oops"}') + assert findings == [] + + def test_mixed_severities(self): + raw = json.dumps([ + SAMPLE_SPECTRAL_FINDING, # error + SAMPLE_SPECTRAL_WARN, # warn + SAMPLE_SPECTRAL_INFO, # info -> hint + ]) + findings = parse_spectral_output(raw) + levels = [f["level"] for f in findings] + assert levels == ["error", "warn", "hint"] + + +# --------------------------------------------------------------------------- +# TestRunSpectral +# --------------------------------------------------------------------------- + + +class TestRunSpectral: + @patch("validation.engines.spectral_adapter.subprocess.run") + def test_exit_0_no_findings(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=0, stdout="[]", stderr="", + ) + result = run_spectral( + tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path, + ) + assert result.success is True + assert result.findings == [] + assert result.error_message == "" + + @patch("validation.engines.spectral_adapter.subprocess.run") + def test_exit_1_with_findings(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], + returncode=1, + stdout=json.dumps([SAMPLE_SPECTRAL_FINDING]), + stderr="", + ) + result = run_spectral( + tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path, + ) + assert result.success is True + assert len(result.findings) == 1 + assert result.findings[0]["engine_rule"] == "camara-parameter-casing-convention" + + @patch("validation.engines.spectral_adapter.subprocess.run") + def test_exit_2_runtime_error(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=2, stdout="", stderr="Error: invalid ruleset", + ) + result = run_spectral( + tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path, + ) + assert result.success is False + assert "invalid ruleset" in result.error_message + + @patch("validation.engines.spectral_adapter.subprocess.run") + def test_spectral_not_installed(self, mock_run, tmp_path): + mock_run.side_effect = FileNotFoundError("spectral") + result = run_spectral( + tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path, + ) + assert result.success is False + assert "not found" in result.error_message + + @patch("validation.engines.spectral_adapter.subprocess.run") + def test_spectral_timeout(self, mock_run, tmp_path): + mock_run.side_effect = subprocess.TimeoutExpired(cmd="spectral", timeout=300) + result = run_spectral( + tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path, + ) + assert result.success is False + assert "timed out" in result.error_message + + @patch("validation.engines.spectral_adapter.subprocess.run") + def test_command_includes_ruleset_and_patterns(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=0, stdout="[]", stderr="", + ) + ruleset = tmp_path / ".spectral-r4.yaml" + run_spectral(ruleset, ["code/API_definitions/*.yaml"], cwd=tmp_path) + call_args = mock_run.call_args + cmd = call_args[0][0] + assert "--ruleset" in cmd + assert str(ruleset) in cmd + assert "code/API_definitions/*.yaml" in cmd + assert call_args[1]["cwd"] == str(tmp_path) + + +# --------------------------------------------------------------------------- +# TestRunSpectralEngine +# --------------------------------------------------------------------------- + + +class TestRunSpectralEngine: + @patch("validation.engines.spectral_adapter.run_spectral") + def test_normal_execution(self, mock_run, tmp_path): + findings = [{"engine": "spectral", "engine_rule": "r1", "level": "warn", + "message": "m", "path": "f.yaml", "line": 1}] + mock_run.return_value = SpectralResult(findings=findings, success=True) + (tmp_path / ".spectral.yaml").touch() + + result = run_spectral_engine(tmp_path, tmp_path, commonalities_release="r4.1") + assert result == findings + + @patch("validation.engines.spectral_adapter.run_spectral") + def test_spectral_error_returns_error_finding(self, mock_run, tmp_path): + mock_run.return_value = SpectralResult( + findings=[], success=False, error_message="CLI not found", + ) + (tmp_path / ".spectral.yaml").touch() + + result = run_spectral_engine(tmp_path, tmp_path) + assert len(result) == 1 + assert result[0]["level"] == "error" + assert result[0]["engine_rule"] == "spectral-execution-error" + assert "CLI not found" in result[0]["message"] + + @patch("validation.engines.spectral_adapter.run_spectral") + def test_default_spec_patterns(self, mock_run, tmp_path): + mock_run.return_value = SpectralResult(findings=[], success=True) + (tmp_path / ".spectral.yaml").touch() + + run_spectral_engine(tmp_path, tmp_path) + call_args = mock_run.call_args + assert call_args[0][1] == ["code/API_definitions/*.yaml"] + + @patch("validation.engines.spectral_adapter.run_spectral") + def test_custom_spec_patterns(self, mock_run, tmp_path): + mock_run.return_value = SpectralResult(findings=[], success=True) + (tmp_path / ".spectral.yaml").touch() + + custom = ["bundled/*.yaml"] + run_spectral_engine(tmp_path, tmp_path, spec_patterns=custom) + call_args = mock_run.call_args + assert call_args[0][1] == custom + + @patch("validation.engines.spectral_adapter.run_spectral") + def test_ruleset_selection_uses_commonalities(self, mock_run, tmp_path): + """Verifies that the correct ruleset is selected and passed.""" + mock_run.return_value = SpectralResult(findings=[], success=True) + r4 = tmp_path / ".spectral-r4.yaml" + r4.touch() + + run_spectral_engine(tmp_path, tmp_path, commonalities_release="r4.2") + call_args = mock_run.call_args + assert call_args[0][0] == r4 From c3b78812c471e55b5075e50a9bf0470de35eb419 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 25 Mar 2026 22:16:32 +0100 Subject: [PATCH 006/157] feat(validation): add yamllint and gherkin-lint engine adapters yamllint adapter parses --format parsable output, maps warning->warn, and preserves 1-indexed line/column numbers. Errors block downstream Spectral validation (orchestrator decision, not adapter). gherkin-lint adapter parses --format json output, relativizes absolute filePaths, and defaults all findings to warn level (post-filter can override via rule metadata). 51 unit tests covering parsing, severity mapping, and mocked subprocess. --- validation/engines/__init__.py | 4 +- validation/engines/gherkin_adapter.py | 250 +++++++++++++++++++++ validation/engines/yamllint_adapter.py | 252 ++++++++++++++++++++++ validation/tests/test_gherkin_adapter.py | 246 +++++++++++++++++++++ validation/tests/test_yamllint_adapter.py | 246 +++++++++++++++++++++ 5 files changed, 997 insertions(+), 1 deletion(-) create mode 100644 validation/engines/gherkin_adapter.py create mode 100644 validation/engines/yamllint_adapter.py create mode 100644 validation/tests/test_gherkin_adapter.py create mode 100644 validation/tests/test_yamllint_adapter.py diff --git a/validation/engines/__init__.py b/validation/engines/__init__.py index e3aeea2a..9e47ccc8 100644 --- a/validation/engines/__init__.py +++ b/validation/engines/__init__.py @@ -2,6 +2,8 @@ # Each adapter invokes its engine and normalizes output to the common # findings model (see schemas/findings-schema.yaml). +from .gherkin_adapter import run_gherkin_engine from .spectral_adapter import run_spectral_engine +from .yamllint_adapter import run_yamllint_engine -__all__ = ["run_spectral_engine"] +__all__ = ["run_gherkin_engine", "run_spectral_engine", "run_yamllint_engine"] diff --git a/validation/engines/gherkin_adapter.py b/validation/engines/gherkin_adapter.py new file mode 100644 index 00000000..06ea227a --- /dev/null +++ b/validation/engines/gherkin_adapter.py @@ -0,0 +1,250 @@ +"""gherkin-lint engine adapter for the CAMARA validation framework. + +Invokes gherkin-lint on BDD feature files, parses the JSON output, +and normalizes findings into the common findings model. + +Design doc references: + - Section 8.1 step 7: full validation (gherkin-lint invocation) + - Section 2.2: check areas (gherkin-lint coverage) +""" + +from __future__ import annotations + +import json +import logging +import os +import subprocess +from dataclasses import dataclass +from pathlib import Path, PurePosixPath +from typing import List, Optional + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +ENGINE_NAME = "gherkin" + +# gherkin-lint has no per-finding severity — all are reported identically. +# Default to "warn" so findings don't block in standard profile; post-filter +# rule metadata can elevate specific rules to "error". +DEFAULT_LEVEL = "warn" + +DEFAULT_TEST_GLOB = "code/Test_definitions/**/*.feature" + +# Sentinel rule name for adapter-level errors. +_EXECUTION_ERROR_RULE = "gherkin-execution-error" + + +# --------------------------------------------------------------------------- +# Pure functions +# --------------------------------------------------------------------------- + + +def derive_api_name(file_path: str) -> Optional[str]: + """Extract the API name from a test file path. + + Expects paths like ``code/Test_definitions/quality-on-demand.feature``. + Returns the file stem, or ``None`` for paths outside + ``Test_definitions``. + """ + if not file_path: + return None + parts = PurePosixPath(file_path).parts + try: + idx = parts.index("Test_definitions") + except ValueError: + return None + if idx + 1 < len(parts): + return PurePosixPath(parts[idx + 1]).stem + return None + + +def normalize_file_errors(file_entry: dict, cwd: str) -> List[dict]: + """Convert one gherkin-lint file entry into normalised findings. + + gherkin-lint JSON format per file:: + + {"filePath": "/absolute/path/to/file.feature", + "errors": [{"message": "...", "rule": "...", "line": N}, ...]} + + ``filePath`` is absolute and must be made relative to *cwd*. + Each error becomes a finding with ``engine="gherkin"`` and + ``level=DEFAULT_LEVEL``. + """ + abs_path = file_entry.get("filePath", "") + + # Relativize: strip cwd prefix to get repo-relative path. + try: + rel_path = os.path.relpath(abs_path, cwd) + except ValueError: + # On Windows, relpath raises ValueError for different drives. + rel_path = abs_path + + errors = file_entry.get("errors", []) + findings = [] + for err in errors: + finding: dict = { + "engine": ENGINE_NAME, + "engine_rule": err.get("rule", "unknown"), + "level": DEFAULT_LEVEL, + "message": err.get("message", ""), + "path": rel_path, + "line": err.get("line", 1), + "api_name": derive_api_name(rel_path), + } + findings.append(finding) + return findings + + +def parse_gherkin_output(raw_json: str, cwd: str) -> List[dict]: + """Parse gherkin-lint ``--format json`` stdout into normalised findings. + + gherkin-lint outputs a JSON array of file entries. Files with no + errors (empty ``errors`` array) are skipped. + + Args: + raw_json: Raw JSON string from gherkin-lint stdout. + cwd: Repo root path for relativizing absolute file paths. + + Returns: + List of findings conforming to the common findings model. + """ + if not raw_json.strip(): + return [] + + try: + data = json.loads(raw_json) + except json.JSONDecodeError as exc: + logger.warning("Failed to parse gherkin-lint JSON output: %s", exc) + return [] + + if not isinstance(data, list): + logger.warning("gherkin-lint output is not a JSON array") + return [] + + findings = [] + for file_entry in data: + if file_entry.get("errors"): + findings.extend(normalize_file_errors(file_entry, cwd)) + return findings + + +# --------------------------------------------------------------------------- +# I/O wrappers +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class GherkinResult: + """Result of a gherkin-lint CLI invocation.""" + + findings: List[dict] + success: bool + error_message: str = "" + + +def run_gherkin_lint( + config_path: Path, + file_patterns: List[str], + cwd: Path, +) -> GherkinResult: + """Invoke gherkin-lint via npx and capture structured output. + + Uses ``--format json`` for machine-readable output. + + Args: + config_path: Path to the ``.gherkin-lintrc`` configuration file. + file_patterns: Glob patterns for input feature files. + cwd: Working directory (repo root). + + Returns: + :class:`GherkinResult` with parsed findings and status. + """ + cmd = [ + "npx", "gherkin-lint", + "--format", "json", + "--config", str(config_path), + *file_patterns, + ] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=str(cwd), + timeout=120, + ) + except FileNotFoundError: + return GherkinResult( + findings=[], + success=False, + error_message="npx/gherkin-lint not found — is Node.js installed?", + ) + except subprocess.TimeoutExpired: + return GherkinResult( + findings=[], + success=False, + error_message="gherkin-lint timed out after 120 seconds", + ) + + # Exit 0 = clean, exit 1 = findings found. Both produce valid JSON. + if result.returncode in (0, 1): + findings = parse_gherkin_output(result.stdout, str(cwd)) + return GherkinResult(findings=findings, success=True) + + # Other exit codes: check for config-not-found or other runtime errors. + stderr = result.stderr.strip() if result.stderr else "" + stdout = result.stdout.strip() if result.stdout else "" + error_detail = stderr or stdout or "unknown error" + return GherkinResult( + findings=[], + success=False, + error_message=f"gherkin-lint exited with code {result.returncode}: {error_detail}", + ) + + +def _make_error_finding(message: str) -> dict: + """Create an error finding for adapter-level failures.""" + return { + "engine": ENGINE_NAME, + "engine_rule": _EXECUTION_ERROR_RULE, + "level": "error", + "message": message, + "path": "", + "line": 1, + "api_name": None, + } + + +def run_gherkin_engine( + repo_path: Path, + config_path: Path, + file_patterns: Optional[List[str]] = None, +) -> List[dict]: + """Top-level entry point for the orchestrator. + + Args: + repo_path: Root of the repository being validated. + config_path: Path to the gherkin-lint configuration file. + file_patterns: Override glob patterns (default: + ``["code/Test_definitions/**/*.feature"]``). + + Returns: + List of finding dicts conforming to ``findings-schema.yaml``. + """ + if file_patterns is None: + file_patterns = [DEFAULT_TEST_GLOB] + + logger.info("Running gherkin-lint with config: %s", config_path) + + result = run_gherkin_lint(config_path, file_patterns, cwd=repo_path) + + if not result.success: + logger.error("gherkin-lint engine error: %s", result.error_message) + return [_make_error_finding(result.error_message)] + + logger.info("gherkin-lint produced %d finding(s)", len(result.findings)) + return result.findings diff --git a/validation/engines/yamllint_adapter.py b/validation/engines/yamllint_adapter.py new file mode 100644 index 00000000..9235b0c4 --- /dev/null +++ b/validation/engines/yamllint_adapter.py @@ -0,0 +1,252 @@ +"""yamllint engine adapter for the CAMARA validation framework. + +Invokes yamllint on YAML spec files, parses the parsable-format output, +and normalizes findings into the common findings model. + +Design doc references: + - Section 8.1 step 5: pre-bundling validation (YAML syntax check) + - Section 2.2: check areas (yamllint coverage) + +yamllint errors block downstream steps (Spectral, bundling). That +blocking decision is made by the orchestrator — this adapter simply +produces findings with the correct severity levels. +""" + +from __future__ import annotations + +import logging +import re +import subprocess +import sys +from dataclasses import dataclass +from pathlib import Path, PurePosixPath +from typing import List, Optional + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +ENGINE_NAME = "yamllint" + +# yamllint level -> framework level. +SEVERITY_MAP: dict[str, str] = { + "error": "error", + "warning": "warn", +} + +DEFAULT_SPEC_GLOB = "code/API_definitions/*.yaml" + +# Regex for yamllint parsable-format lines: +# file:line:col: [level] message (rule) +# The rule suffix is optional (syntax errors may omit it). +# The rule is always the last parenthesised group on the line. +_PARSABLE_RE = re.compile( + r"^(.+?):(\d+):(\d+): \[(error|warning)\] (.+?)(?:\s+\(([^)]+)\))?$" +) + +# Sentinel rule name for adapter-level errors. +_EXECUTION_ERROR_RULE = "yamllint-execution-error" + + +# --------------------------------------------------------------------------- +# Pure functions +# --------------------------------------------------------------------------- + + +def map_severity(yamllint_level: str) -> str: + """Map a yamllint level string to a framework level. + + Args: + yamllint_level: ``"error"`` or ``"warning"``. + + Returns: + Framework level: ``"error"`` or ``"warn"``. + + Raises: + KeyError: If *yamllint_level* is not recognised. + """ + return SEVERITY_MAP[yamllint_level] + + +def derive_api_name(file_path: str) -> Optional[str]: + """Extract the API name from a spec file path. + + Expects paths like ``code/API_definitions/quality-on-demand.yaml``. + Returns the file stem, or ``None`` for paths outside + ``API_definitions``. + """ + if not file_path: + return None + parts = PurePosixPath(file_path).parts + try: + idx = parts.index("API_definitions") + except ValueError: + return None + if idx + 1 < len(parts): + return PurePosixPath(parts[idx + 1]).stem + return None + + +def parse_parsable_line(line: str) -> Optional[dict]: + """Parse one yamllint parsable-format line into a finding dict. + + Format: ``file:line:col: [level] message (rule)`` + + yamllint line and column numbers are already 1-indexed. + + Returns: + A finding dict, or ``None`` if the line does not match. + """ + match = _PARSABLE_RE.match(line.strip()) + if not match: + return None + + file_path, line_no, col_no, level, message, rule = match.groups() + + finding: dict = { + "engine": ENGINE_NAME, + "engine_rule": rule or "syntax-error", + "level": map_severity(level), + "message": message.strip(), + "path": file_path, + "line": int(line_no), + "column": int(col_no), + "api_name": derive_api_name(file_path), + } + return finding + + +def parse_yamllint_output(raw: str) -> List[dict]: + """Parse yamllint ``--format parsable`` stdout into normalised findings. + + Blank lines and lines that don't match the parsable format are skipped. + + Returns: + List of findings conforming to the common findings model. + """ + findings = [] + for line in raw.splitlines(): + finding = parse_parsable_line(line) + if finding is not None: + findings.append(finding) + return findings + + +# --------------------------------------------------------------------------- +# I/O wrappers +# --------------------------------------------------------------------------- + + +@dataclass(frozen=True) +class YamllintResult: + """Result of a yamllint CLI invocation.""" + + findings: List[dict] + success: bool + error_message: str = "" + + +def run_yamllint( + config_path: Path, + file_patterns: List[str], + cwd: Path, +) -> YamllintResult: + """Invoke yamllint and capture structured output. + + Uses ``python3 -m yamllint`` for reliable module execution and + ``--format parsable`` for machine-readable output. + + Args: + config_path: Path to the ``.yamllint.yaml`` configuration file. + file_patterns: Glob patterns for input files. + cwd: Working directory (repo root). + + Returns: + :class:`YamllintResult` with parsed findings and status. + """ + cmd = [ + sys.executable, "-m", "yamllint", + "--format", "parsable", + "--config-file", str(config_path), + *file_patterns, + ] + + try: + result = subprocess.run( + cmd, + capture_output=True, + text=True, + cwd=str(cwd), + timeout=120, + ) + except FileNotFoundError: + return YamllintResult( + findings=[], + success=False, + error_message="yamllint not found — is it installed?", + ) + except subprocess.TimeoutExpired: + return YamllintResult( + findings=[], + success=False, + error_message="yamllint timed out after 120 seconds", + ) + + # Exit 0 = clean, exit 1 = findings found. Both are normal. + if result.returncode in (0, 1): + findings = parse_yamllint_output(result.stdout) + return YamllintResult(findings=findings, success=True) + + # Other exit codes indicate a runtime error. + stderr = result.stderr.strip() if result.stderr else "unknown error" + return YamllintResult( + findings=[], + success=False, + error_message=f"yamllint exited with code {result.returncode}: {stderr}", + ) + + +def _make_error_finding(message: str) -> dict: + """Create an error finding for adapter-level failures.""" + return { + "engine": ENGINE_NAME, + "engine_rule": _EXECUTION_ERROR_RULE, + "level": "error", + "message": message, + "path": "", + "line": 1, + "api_name": None, + } + + +def run_yamllint_engine( + repo_path: Path, + config_path: Path, + file_patterns: Optional[List[str]] = None, +) -> List[dict]: + """Top-level entry point for the orchestrator. + + Args: + repo_path: Root of the repository being validated. + config_path: Path to the yamllint configuration file. + file_patterns: Override glob patterns (default: + ``["code/API_definitions/*.yaml"]``). + + Returns: + List of finding dicts conforming to ``findings-schema.yaml``. + """ + if file_patterns is None: + file_patterns = [DEFAULT_SPEC_GLOB] + + logger.info("Running yamllint with config: %s", config_path) + + result = run_yamllint(config_path, file_patterns, cwd=repo_path) + + if not result.success: + logger.error("yamllint engine error: %s", result.error_message) + return [_make_error_finding(result.error_message)] + + logger.info("yamllint produced %d finding(s)", len(result.findings)) + return result.findings diff --git a/validation/tests/test_gherkin_adapter.py b/validation/tests/test_gherkin_adapter.py new file mode 100644 index 00000000..fff51e20 --- /dev/null +++ b/validation/tests/test_gherkin_adapter.py @@ -0,0 +1,246 @@ +"""Unit tests for validation.engines.gherkin_adapter.""" + +from __future__ import annotations + +import json +import subprocess +from unittest.mock import patch + +import pytest + +from validation.engines.gherkin_adapter import ( + DEFAULT_LEVEL, + ENGINE_NAME, + GherkinResult, + derive_api_name, + normalize_file_errors, + parse_gherkin_output, + run_gherkin_engine, + run_gherkin_lint, +) + + +# --------------------------------------------------------------------------- +# Fixtures — sample gherkin-lint JSON entries +# --------------------------------------------------------------------------- + +SAMPLE_FILE_ENTRY = { + "filePath": "/repo/code/Test_definitions/quality-on-demand.feature", + "errors": [ + {"message": "Missing Feature name", "rule": "no-unnamed-features", "line": 1}, + {"message": "Missing Scenario name", "rule": "no-unnamed-scenarios", "line": 5}, + ], +} + +SAMPLE_CLEAN_ENTRY = { + "filePath": "/repo/code/Test_definitions/clean.feature", + "errors": [], +} + + +# --------------------------------------------------------------------------- +# TestDeriveApiName +# --------------------------------------------------------------------------- + + +class TestDeriveApiName: + def test_standard_test_path(self): + assert ( + derive_api_name("code/Test_definitions/quality-on-demand.feature") + == "quality-on-demand" + ) + + def test_non_test_path(self): + assert derive_api_name("code/API_definitions/api.yaml") is None + + def test_empty_string(self): + assert derive_api_name("") is None + + def test_nested_prefix(self): + assert ( + derive_api_name("some/prefix/Test_definitions/my-api.feature") + == "my-api" + ) + + +# --------------------------------------------------------------------------- +# TestNormalizeFileErrors +# --------------------------------------------------------------------------- + + +class TestNormalizeFileErrors: + def test_standard_errors(self): + findings = normalize_file_errors(SAMPLE_FILE_ENTRY, "/repo") + assert len(findings) == 2 + assert findings[0]["engine"] == "gherkin" + assert findings[0]["engine_rule"] == "no-unnamed-features" + assert findings[0]["level"] == DEFAULT_LEVEL + assert findings[0]["message"] == "Missing Feature name" + assert findings[0]["path"] == "code/Test_definitions/quality-on-demand.feature" + assert findings[0]["line"] == 1 + assert findings[0]["api_name"] == "quality-on-demand" + + def test_empty_errors_list(self): + findings = normalize_file_errors(SAMPLE_CLEAN_ENTRY, "/repo") + assert findings == [] + + def test_path_relativization(self): + entry = { + "filePath": "/workspace/project/code/Test_definitions/api.feature", + "errors": [{"message": "m", "rule": "r", "line": 1}], + } + findings = normalize_file_errors(entry, "/workspace/project") + assert findings[0]["path"] == "code/Test_definitions/api.feature" + + def test_missing_fields_use_defaults(self): + entry = { + "filePath": "/repo/test.feature", + "errors": [{}], + } + findings = normalize_file_errors(entry, "/repo") + assert len(findings) == 1 + assert findings[0]["engine_rule"] == "unknown" + assert findings[0]["message"] == "" + assert findings[0]["line"] == 1 + + +# --------------------------------------------------------------------------- +# TestParseGherkinOutput +# --------------------------------------------------------------------------- + + +class TestParseGherkinOutput: + def test_valid_json_with_errors(self): + raw = json.dumps([SAMPLE_FILE_ENTRY]) + findings = parse_gherkin_output(raw, "/repo") + assert len(findings) == 2 + assert findings[0]["engine_rule"] == "no-unnamed-features" + + def test_empty_array(self): + assert parse_gherkin_output("[]", "/repo") == [] + + def test_file_with_no_errors_skipped(self): + raw = json.dumps([SAMPLE_CLEAN_ENTRY]) + findings = parse_gherkin_output(raw, "/repo") + assert findings == [] + + def test_invalid_json_returns_empty(self): + assert parse_gherkin_output("not json", "/repo") == [] + + def test_empty_string(self): + assert parse_gherkin_output("", "/repo") == [] + + def test_multiple_files(self): + raw = json.dumps([SAMPLE_FILE_ENTRY, SAMPLE_CLEAN_ENTRY, SAMPLE_FILE_ENTRY]) + findings = parse_gherkin_output(raw, "/repo") + # Two files with 2 errors each = 4 findings. + assert len(findings) == 4 + + def test_json_object_instead_of_array(self): + findings = parse_gherkin_output('{"error": "oops"}', "/repo") + assert findings == [] + + +# --------------------------------------------------------------------------- +# TestRunGherkinLint +# --------------------------------------------------------------------------- + + +class TestRunGherkinLint: + @patch("validation.engines.gherkin_adapter.subprocess.run") + def test_exit_0_no_findings(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=0, stdout="[]", stderr="", + ) + result = run_gherkin_lint( + tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path, + ) + assert result.success is True + assert result.findings == [] + + @patch("validation.engines.gherkin_adapter.subprocess.run") + def test_exit_1_with_findings(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], + returncode=1, + stdout=json.dumps([{ + "filePath": str(tmp_path / "code/Test_definitions/api.feature"), + "errors": [{"message": "m", "rule": "r", "line": 1}], + }]), + stderr="", + ) + result = run_gherkin_lint( + tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path, + ) + assert result.success is True + assert len(result.findings) == 1 + + @patch("validation.engines.gherkin_adapter.subprocess.run") + def test_runtime_error(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=2, stdout="", stderr="config not found", + ) + result = run_gherkin_lint( + tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path, + ) + assert result.success is False + assert "config not found" in result.error_message + + @patch("validation.engines.gherkin_adapter.subprocess.run") + def test_npx_not_found(self, mock_run, tmp_path): + mock_run.side_effect = FileNotFoundError("npx") + result = run_gherkin_lint( + tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path, + ) + assert result.success is False + assert "not found" in result.error_message + + @patch("validation.engines.gherkin_adapter.subprocess.run") + def test_timeout(self, mock_run, tmp_path): + mock_run.side_effect = subprocess.TimeoutExpired(cmd="npx", timeout=120) + result = run_gherkin_lint( + tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path, + ) + assert result.success is False + assert "timed out" in result.error_message + + +# --------------------------------------------------------------------------- +# TestRunGherkinEngine +# --------------------------------------------------------------------------- + + +class TestRunGherkinEngine: + @patch("validation.engines.gherkin_adapter.run_gherkin_lint") + def test_normal_execution(self, mock_run, tmp_path): + findings = [{"engine": "gherkin", "engine_rule": "r1", "level": "warn", + "message": "m", "path": "f.feature", "line": 1}] + mock_run.return_value = GherkinResult(findings=findings, success=True) + + result = run_gherkin_engine(tmp_path, tmp_path / ".gherkin-lintrc") + assert result == findings + + @patch("validation.engines.gherkin_adapter.run_gherkin_lint") + def test_error_returns_error_finding(self, mock_run, tmp_path): + mock_run.return_value = GherkinResult( + findings=[], success=False, error_message="npx missing", + ) + result = run_gherkin_engine(tmp_path, tmp_path / ".gherkin-lintrc") + assert len(result) == 1 + assert result[0]["level"] == "error" + assert result[0]["engine_rule"] == "gherkin-execution-error" + + @patch("validation.engines.gherkin_adapter.run_gherkin_lint") + def test_default_patterns(self, mock_run, tmp_path): + mock_run.return_value = GherkinResult(findings=[], success=True) + run_gherkin_engine(tmp_path, tmp_path / ".gherkin-lintrc") + call_args = mock_run.call_args + assert call_args[0][1] == ["code/Test_definitions/**/*.feature"] + + @patch("validation.engines.gherkin_adapter.run_gherkin_lint") + def test_custom_patterns(self, mock_run, tmp_path): + mock_run.return_value = GherkinResult(findings=[], success=True) + custom = ["tests/*.feature"] + run_gherkin_engine(tmp_path, tmp_path / ".gherkin-lintrc", file_patterns=custom) + call_args = mock_run.call_args + assert call_args[0][1] == custom diff --git a/validation/tests/test_yamllint_adapter.py b/validation/tests/test_yamllint_adapter.py new file mode 100644 index 00000000..c60cfe71 --- /dev/null +++ b/validation/tests/test_yamllint_adapter.py @@ -0,0 +1,246 @@ +"""Unit tests for validation.engines.yamllint_adapter.""" + +from __future__ import annotations + +import subprocess +from unittest.mock import patch + +import pytest + +from validation.engines.yamllint_adapter import ( + ENGINE_NAME, + YamllintResult, + derive_api_name, + map_severity, + parse_parsable_line, + parse_yamllint_output, + run_yamllint, + run_yamllint_engine, +) + + +# --------------------------------------------------------------------------- +# TestMapSeverity +# --------------------------------------------------------------------------- + + +class TestMapSeverity: + def test_error(self): + assert map_severity("error") == "error" + + def test_warning(self): + assert map_severity("warning") == "warn" + + def test_unknown_raises(self): + with pytest.raises(KeyError): + map_severity("info") + + +# --------------------------------------------------------------------------- +# TestDeriveApiName +# --------------------------------------------------------------------------- + + +class TestDeriveApiName: + def test_standard_api_path(self): + assert ( + derive_api_name("code/API_definitions/quality-on-demand.yaml") + == "quality-on-demand" + ) + + def test_non_api_path(self): + assert derive_api_name("release-plan.yaml") is None + + def test_empty_string(self): + assert derive_api_name("") is None + + def test_nested_prefix(self): + assert ( + derive_api_name("some/prefix/API_definitions/my-api.yaml") + == "my-api" + ) + + +# --------------------------------------------------------------------------- +# TestParseParsableLine +# --------------------------------------------------------------------------- + + +class TestParseParsableLine: + def test_error_with_rule(self): + line = 'code/API_definitions/api.yaml:2:1: [error] duplication of key "key" in mapping (key-duplicates)' + finding = parse_parsable_line(line) + assert finding is not None + assert finding["engine"] == "yamllint" + assert finding["engine_rule"] == "key-duplicates" + assert finding["level"] == "error" + assert finding["message"] == 'duplication of key "key" in mapping' + assert finding["path"] == "code/API_definitions/api.yaml" + assert finding["line"] == 2 + assert finding["column"] == 1 + assert finding["api_name"] == "api" + + def test_warning_with_rule(self): + line = "file.yaml:10:5: [warning] trailing spaces (trailing-spaces)" + finding = parse_parsable_line(line) + assert finding is not None + assert finding["level"] == "warn" + assert finding["engine_rule"] == "trailing-spaces" + + def test_no_rule_suffix(self): + """Syntax errors may not have a rule name in parentheses.""" + line = "file.yaml:3:1: [error] syntax error: mapping values are not allowed here" + finding = parse_parsable_line(line) + assert finding is not None + assert finding["engine_rule"] == "syntax-error" + assert "mapping values" in finding["message"] + + def test_message_with_parentheses(self): + """Rule name is always the last (...) group.""" + line = 'file.yaml:5:1: [error] wrong value (got "yes") (truthy)' + finding = parse_parsable_line(line) + assert finding is not None + assert finding["engine_rule"] == "truthy" + assert 'wrong value (got "yes")' in finding["message"] + + def test_invalid_line_returns_none(self): + assert parse_parsable_line("not a yamllint line") is None + + def test_empty_line_returns_none(self): + assert parse_parsable_line("") is None + + +# --------------------------------------------------------------------------- +# TestParseYamllintOutput +# --------------------------------------------------------------------------- + + +class TestParseYamllintOutput: + def test_multiple_findings(self): + raw = ( + "a.yaml:1:1: [error] dup key (key-duplicates)\n" + "b.yaml:5:3: [warning] trailing spaces (trailing-spaces)\n" + ) + findings = parse_yamllint_output(raw) + assert len(findings) == 2 + assert findings[0]["level"] == "error" + assert findings[1]["level"] == "warn" + + def test_empty_output(self): + assert parse_yamllint_output("") == [] + + def test_blank_lines_skipped(self): + raw = "\na.yaml:1:1: [error] bad (truthy)\n\n" + findings = parse_yamllint_output(raw) + assert len(findings) == 1 + + def test_mixed_levels(self): + raw = ( + "f.yaml:1:1: [error] err (e1)\n" + "f.yaml:2:1: [warning] warn (w1)\n" + "f.yaml:3:1: [error] err2 (e2)\n" + ) + findings = parse_yamllint_output(raw) + levels = [f["level"] for f in findings] + assert levels == ["error", "warn", "error"] + + def test_all_warnings(self): + raw = ( + "f.yaml:1:1: [warning] w1 (r1)\n" + "f.yaml:2:1: [warning] w2 (r2)\n" + ) + findings = parse_yamllint_output(raw) + assert all(f["level"] == "warn" for f in findings) + + +# --------------------------------------------------------------------------- +# TestRunYamllint +# --------------------------------------------------------------------------- + + +class TestRunYamllint: + @patch("validation.engines.yamllint_adapter.subprocess.run") + def test_exit_0_no_findings(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=0, stdout="", stderr="", + ) + result = run_yamllint(tmp_path / ".yamllint.yaml", ["*.yaml"], cwd=tmp_path) + assert result.success is True + assert result.findings == [] + + @patch("validation.engines.yamllint_adapter.subprocess.run") + def test_exit_1_with_findings(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], + returncode=1, + stdout="f.yaml:1:1: [error] dup (key-duplicates)\n", + stderr="", + ) + result = run_yamllint(tmp_path / ".yamllint.yaml", ["*.yaml"], cwd=tmp_path) + assert result.success is True + assert len(result.findings) == 1 + assert result.findings[0]["engine_rule"] == "key-duplicates" + + @patch("validation.engines.yamllint_adapter.subprocess.run") + def test_runtime_error(self, mock_run, tmp_path): + mock_run.return_value = subprocess.CompletedProcess( + args=[], returncode=2, stdout="", stderr="invalid config", + ) + result = run_yamllint(tmp_path / ".yamllint.yaml", ["*.yaml"], cwd=tmp_path) + assert result.success is False + assert "invalid config" in result.error_message + + @patch("validation.engines.yamllint_adapter.subprocess.run") + def test_not_installed(self, mock_run, tmp_path): + mock_run.side_effect = FileNotFoundError("python3") + result = run_yamllint(tmp_path / ".yamllint.yaml", ["*.yaml"], cwd=tmp_path) + assert result.success is False + assert "not found" in result.error_message + + @patch("validation.engines.yamllint_adapter.subprocess.run") + def test_timeout(self, mock_run, tmp_path): + mock_run.side_effect = subprocess.TimeoutExpired(cmd="yamllint", timeout=120) + result = run_yamllint(tmp_path / ".yamllint.yaml", ["*.yaml"], cwd=tmp_path) + assert result.success is False + assert "timed out" in result.error_message + + +# --------------------------------------------------------------------------- +# TestRunYamllintEngine +# --------------------------------------------------------------------------- + + +class TestRunYamllintEngine: + @patch("validation.engines.yamllint_adapter.run_yamllint") + def test_normal_execution(self, mock_run, tmp_path): + findings = [{"engine": "yamllint", "engine_rule": "r1", "level": "warn", + "message": "m", "path": "f.yaml", "line": 1}] + mock_run.return_value = YamllintResult(findings=findings, success=True) + + result = run_yamllint_engine(tmp_path, tmp_path / ".yamllint.yaml") + assert result == findings + + @patch("validation.engines.yamllint_adapter.run_yamllint") + def test_error_returns_error_finding(self, mock_run, tmp_path): + mock_run.return_value = YamllintResult( + findings=[], success=False, error_message="not found", + ) + result = run_yamllint_engine(tmp_path, tmp_path / ".yamllint.yaml") + assert len(result) == 1 + assert result[0]["level"] == "error" + assert result[0]["engine_rule"] == "yamllint-execution-error" + + @patch("validation.engines.yamllint_adapter.run_yamllint") + def test_default_patterns(self, mock_run, tmp_path): + mock_run.return_value = YamllintResult(findings=[], success=True) + run_yamllint_engine(tmp_path, tmp_path / ".yamllint.yaml") + call_args = mock_run.call_args + assert call_args[0][1] == ["code/API_definitions/*.yaml"] + + @patch("validation.engines.yamllint_adapter.run_yamllint") + def test_custom_patterns(self, mock_run, tmp_path): + mock_run.return_value = YamllintResult(findings=[], success=True) + custom = ["custom/*.yaml"] + run_yamllint_engine(tmp_path, tmp_path / ".yamllint.yaml", file_patterns=custom) + call_args = mock_run.call_args + assert call_args[0][1] == custom From d677b806ca63c17f1b98648ef9607dfd0fc0f46f Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Thu, 26 Mar 2026 09:44:31 +0100 Subject: [PATCH 007/157] feat(validation): add Python check engine adapter and core checks Add the fourth engine adapter for native Python validation checks. Unlike external engine adapters (Spectral, yamllint, gherkin-lint), Python checks run in-process with direct access to ValidationContext. Harness architecture: - python_adapter.py: entry point with per-check error isolation - python_checks/ package: explicit registry, shared types, domain modules - CheckScope (REPO/API): harness iterates per-API checks automatically - make_finding() helper ensures structural consistency Core checks implemented (12 checks across 7 modules): - version_checks: info.version format, server URL version/api-name - filename_checks: kebab-case naming, file existence - test_checks: test directory, test files, version alignment - release_plan_checks: track/type consistency, file existence (ported) - changelog_checks: CHANGELOG existence and format - metadata_checks: license and x-camara-commonalities consistency - release_review_checks: file restriction on snapshot branches Version segment builder implements full CAMARA URL mapping: wip->vwip, 1.0.0->v1, 0.1.0->v0.1, 0.2.0-alpha.2->v0.2alpha2 131 new tests, 293 total passing. --- validation/engines/__init__.py | 8 +- validation/engines/python_adapter.py | 99 +++++ validation/engines/python_checks/__init__.py | 44 +++ validation/engines/python_checks/_types.py | 127 +++++++ .../engines/python_checks/changelog_checks.py | 87 +++++ .../engines/python_checks/filename_checks.py | 79 ++++ .../engines/python_checks/metadata_checks.py | 119 ++++++ .../python_checks/release_plan_checks.py | 276 ++++++++++++++ .../python_checks/release_review_checks.py | 97 +++++ .../engines/python_checks/test_checks.py | 185 ++++++++++ .../engines/python_checks/version_checks.py | 282 ++++++++++++++ validation/tests/test_python_adapter.py | 220 +++++++++++ .../tests/test_python_checks_changelog.py | 124 +++++++ .../tests/test_python_checks_filename.py | 132 +++++++ .../tests/test_python_checks_metadata.py | 139 +++++++ .../tests/test_python_checks_release_plan.py | 254 +++++++++++++ .../test_python_checks_release_review.py | 127 +++++++ validation/tests/test_python_checks_test.py | 187 ++++++++++ .../tests/test_python_checks_version.py | 346 ++++++++++++++++++ 19 files changed, 2931 insertions(+), 1 deletion(-) create mode 100644 validation/engines/python_adapter.py create mode 100644 validation/engines/python_checks/__init__.py create mode 100644 validation/engines/python_checks/_types.py create mode 100644 validation/engines/python_checks/changelog_checks.py create mode 100644 validation/engines/python_checks/filename_checks.py create mode 100644 validation/engines/python_checks/metadata_checks.py create mode 100644 validation/engines/python_checks/release_plan_checks.py create mode 100644 validation/engines/python_checks/release_review_checks.py create mode 100644 validation/engines/python_checks/test_checks.py create mode 100644 validation/engines/python_checks/version_checks.py create mode 100644 validation/tests/test_python_adapter.py create mode 100644 validation/tests/test_python_checks_changelog.py create mode 100644 validation/tests/test_python_checks_filename.py create mode 100644 validation/tests/test_python_checks_metadata.py create mode 100644 validation/tests/test_python_checks_release_plan.py create mode 100644 validation/tests/test_python_checks_release_review.py create mode 100644 validation/tests/test_python_checks_test.py create mode 100644 validation/tests/test_python_checks_version.py diff --git a/validation/engines/__init__.py b/validation/engines/__init__.py index 9e47ccc8..5a602148 100644 --- a/validation/engines/__init__.py +++ b/validation/engines/__init__.py @@ -3,7 +3,13 @@ # findings model (see schemas/findings-schema.yaml). from .gherkin_adapter import run_gherkin_engine +from .python_adapter import run_python_engine from .spectral_adapter import run_spectral_engine from .yamllint_adapter import run_yamllint_engine -__all__ = ["run_gherkin_engine", "run_spectral_engine", "run_yamllint_engine"] +__all__ = [ + "run_gherkin_engine", + "run_python_engine", + "run_spectral_engine", + "run_yamllint_engine", +] diff --git a/validation/engines/python_adapter.py b/validation/engines/python_adapter.py new file mode 100644 index 00000000..ca23564b --- /dev/null +++ b/validation/engines/python_adapter.py @@ -0,0 +1,99 @@ +"""Python check engine adapter for the CAMARA validation framework. + +Runs native Python check functions against the repository, producing +findings conforming to the common findings model. Unlike the other +engine adapters (Spectral, yamllint, gherkin-lint), Python checks run +in-process — no subprocess invocation. + +Design doc references: + - Section 8.1 step 7: full validation (Python checks invocation) + - Section 2.2: check areas (Python check coverage) +""" + +from __future__ import annotations + +import dataclasses +import logging +from pathlib import Path +from typing import List + +from validation.context import ValidationContext + +from .python_checks import CHECKS, CheckScope + +logger = logging.getLogger(__name__) + +ENGINE_NAME = "python" + +# Sentinel rule name for adapter-level errors. +_EXECUTION_ERROR_RULE = "python-execution-error" + + +# --------------------------------------------------------------------------- +# Error finding builder +# --------------------------------------------------------------------------- + + +def _make_error_finding(message: str, check_name: str = "") -> dict: + """Create an error finding for adapter-level or check-level failures.""" + return { + "engine": ENGINE_NAME, + "engine_rule": check_name or _EXECUTION_ERROR_RULE, + "level": "error", + "message": message, + "path": "", + "line": 1, + "api_name": None, + } + + +# --------------------------------------------------------------------------- +# Entry point +# --------------------------------------------------------------------------- + + +def run_python_engine( + repo_path: Path, + context: ValidationContext, +) -> List[dict]: + """Top-level entry point for the orchestrator. + + Executes all registered Python checks and collects their findings. + Each check is isolated: if a check raises an exception, an error + finding is emitted for that check and execution continues with the + next check. + + Args: + repo_path: Root of the repository being validated. + context: Unified validation context. + + Returns: + List of finding dicts conforming to ``findings-schema.yaml``. + """ + all_findings: List[dict] = [] + + for descriptor in CHECKS: + try: + if descriptor.scope == CheckScope.REPO: + findings = descriptor.fn(repo_path, context) + all_findings.extend(findings) + elif descriptor.scope == CheckScope.API: + for api_ctx in context.apis: + single_api_context = dataclasses.replace( + context, apis=(api_ctx,) + ) + findings = descriptor.fn(repo_path, single_api_context) + all_findings.extend(findings) + except Exception as exc: + logger.exception( + "Python check %s raised an exception", descriptor.name + ) + all_findings.append( + _make_error_finding( + f"Check {descriptor.name!r} failed: {exc}", + check_name=descriptor.name, + ) + ) + + logger.info("Python checks produced %d finding(s)", len(all_findings)) + return all_findings diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py new file mode 100644 index 00000000..e7e757c8 --- /dev/null +++ b/validation/engines/python_checks/__init__.py @@ -0,0 +1,44 @@ +# Python check registry. +# Each check is a CheckDescriptor with name, scope, and function. +# The CHECKS list defines execution order. + +from __future__ import annotations + +from ._types import CheckDescriptor, CheckScope + +from .changelog_checks import check_changelog_format +from .filename_checks import check_filename_kebab_case, check_filename_matches_api_name +from .metadata_checks import check_license_commonalities_consistency +from .release_plan_checks import check_release_plan_semantics +from .release_review_checks import check_release_review_file_restriction +from .test_checks import ( + check_test_directory_exists, + check_test_file_version, + check_test_files_exist, +) +from .version_checks import ( + check_info_version_format, + check_server_url_api_name, + check_server_url_version, +) + +# Ordered registry. Execution order matches this list. +# Adding a new check: import the function and append a CheckDescriptor. +CHECKS: list[CheckDescriptor] = [ + # --- Per-API checks (run once per API in context.apis) --- + CheckDescriptor("check-filename-kebab-case", CheckScope.API, check_filename_kebab_case), + CheckDescriptor("check-filename-matches-api-name", CheckScope.API, check_filename_matches_api_name), + CheckDescriptor("check-info-version-format", CheckScope.API, check_info_version_format), + CheckDescriptor("check-server-url-version", CheckScope.API, check_server_url_version), + CheckDescriptor("check-server-url-api-name", CheckScope.API, check_server_url_api_name), + CheckDescriptor("check-test-files-exist", CheckScope.API, check_test_files_exist), + CheckDescriptor("check-test-file-version", CheckScope.API, check_test_file_version), + # --- Repo-level checks (run once) --- + CheckDescriptor("check-test-directory-exists", CheckScope.REPO, check_test_directory_exists), + CheckDescriptor("check-release-plan-semantics", CheckScope.REPO, check_release_plan_semantics), + CheckDescriptor("check-changelog-format", CheckScope.REPO, check_changelog_format), + CheckDescriptor("check-license-commonalities-consistency", CheckScope.REPO, check_license_commonalities_consistency), + CheckDescriptor("check-release-review-file-restriction", CheckScope.REPO, check_release_review_file_restriction), +] + +__all__ = ["CHECKS", "CheckDescriptor", "CheckScope"] diff --git a/validation/engines/python_checks/_types.py b/validation/engines/python_checks/_types.py new file mode 100644 index 00000000..5a1659a9 --- /dev/null +++ b/validation/engines/python_checks/_types.py @@ -0,0 +1,127 @@ +"""Shared types and helpers for Python check modules. + +Provides the check descriptor, scope enum, finding builder, and common +utilities used across all check modules. +""" + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from pathlib import Path, PurePosixPath +from typing import Callable, List, Optional + +import yaml + +logger = logging.getLogger(__name__) + +ENGINE_NAME = "python" + + +# --------------------------------------------------------------------------- +# Check scope and descriptor +# --------------------------------------------------------------------------- + + +class CheckScope(Enum): + """Whether a check runs once per repository or once per API.""" + + REPO = "repo" + API = "api" + + +@dataclass(frozen=True) +class CheckDescriptor: + """Registry entry for one Python check. + + Attributes: + name: Kebab-case identifier used as ``engine_rule`` in findings. + scope: REPO (called once) or API (called per API in context). + fn: The check function — ``(repo_path, context) -> List[dict]``. + """ + + name: str + scope: CheckScope + fn: Callable[..., List[dict]] + + +# --------------------------------------------------------------------------- +# Finding builder +# --------------------------------------------------------------------------- + + +def make_finding( + engine_rule: str, + level: str, + message: str, + path: str = "", + line: int = 1, + api_name: Optional[str] = None, + **extra: object, +) -> dict: + """Build a finding dict conforming to findings-schema.yaml. + + Args: + engine_rule: Kebab-case check name (matches CheckDescriptor.name). + level: ``"error"``, ``"warn"``, or ``"hint"``. + message: Human-readable description of the issue. + path: File path relative to the repository root. + line: 1-indexed line number. + api_name: API this finding belongs to, or ``None`` for repo-level. + **extra: Additional fields (e.g. ``column``). + + Returns: + Dict with all required finding fields. + """ + finding: dict = { + "engine": ENGINE_NAME, + "engine_rule": engine_rule, + "level": level, + "message": message, + "path": path, + "line": line, + "api_name": api_name, + } + if extra: + finding.update(extra) + return finding + + +# --------------------------------------------------------------------------- +# Common utilities +# --------------------------------------------------------------------------- + + +def load_yaml_safe(file_path: Path) -> Optional[dict]: + """Load a YAML file, returning ``None`` on any error. + + Returns ``None`` when the file does not exist, is empty, contains + non-dict content, or has a YAML syntax error. + """ + if not file_path.is_file(): + return None + try: + data = yaml.safe_load(file_path.read_text(encoding="utf-8")) + return data if isinstance(data, dict) else None + except yaml.YAMLError: + return None + + +def derive_api_name(file_path: str) -> Optional[str]: + """Extract the API name from a spec file path. + + Expects paths like ``code/API_definitions/quality-on-demand.yaml``. + Returns the file stem (without extension) or ``None`` for paths + that are not under ``API_definitions``. + """ + if not file_path: + return None + parts = PurePosixPath(file_path).parts + try: + idx = parts.index("API_definitions") + except ValueError: + return None + if idx + 1 < len(parts): + return PurePosixPath(parts[idx + 1]).stem + return None diff --git a/validation/engines/python_checks/changelog_checks.py b/validation/engines/python_checks/changelog_checks.py new file mode 100644 index 00000000..06c855f1 --- /dev/null +++ b/validation/engines/python_checks/changelog_checks.py @@ -0,0 +1,87 @@ +"""CHANGELOG format checks. + +Validates that CHANGELOG.md (or CHANGELOG/ directory) exists when a +release is targeted, and that it contains version heading entries. +""" + +from __future__ import annotations + +import re +from pathlib import Path +from typing import List + +from validation.context import ValidationContext + +from ._types import make_finding + +# Matches a version heading in CHANGELOG.md. +# Patterns: "## v1.0.0", "## 1.0.0", "## 0.2.0-alpha.1", "## v0.3.0-rc.1" +_VERSION_HEADING_RE = re.compile( + r"^##\s+v?(\d+\.\d+\.\d+(?:-[a-zA-Z0-9.]+)?)", re.MULTILINE +) + + +def check_changelog_format( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Validate CHANGELOG existence and format. + + Repo-level check. Only runs when the repository targets a release + (``target_release_type`` is not ``None`` and not ``"none"``). + """ + if not context.target_release_type or context.target_release_type == "none": + return [] + + findings: List[dict] = [] + + changelog_file = repo_path / "CHANGELOG.md" + changelog_dir = repo_path / "CHANGELOG" + + has_changelog = changelog_file.is_file() or changelog_dir.is_dir() + if not has_changelog: + findings.append( + make_finding( + engine_rule="check-changelog-format", + level="error", + message=( + "CHANGELOG.md or CHANGELOG/ directory is missing — " + "required when targeting a release" + ), + path="CHANGELOG.md", + line=1, + ) + ) + return findings + + # If CHANGELOG is a directory, check for at least one file inside. + if changelog_dir.is_dir(): + md_files = [f for f in changelog_dir.iterdir() if f.suffix == ".md"] + if not md_files: + findings.append( + make_finding( + engine_rule="check-changelog-format", + level="error", + message="CHANGELOG/ directory exists but contains no .md files", + path="CHANGELOG", + line=1, + ) + ) + return findings + + # CHANGELOG.md exists — check for version headings. + content = changelog_file.read_text(encoding="utf-8") + if not _VERSION_HEADING_RE.search(content): + findings.append( + make_finding( + engine_rule="check-changelog-format", + level="error", + message=( + "CHANGELOG.md has no version heading entries " + "(expected '## x.y.z' or '## vx.y.z')" + ), + path="CHANGELOG.md", + line=1, + ) + ) + + return findings diff --git a/validation/engines/python_checks/filename_checks.py b/validation/engines/python_checks/filename_checks.py new file mode 100644 index 00000000..24d00cff --- /dev/null +++ b/validation/engines/python_checks/filename_checks.py @@ -0,0 +1,79 @@ +"""Filename convention checks. + +Validates that API names (from release-plan.yaml) follow kebab-case +naming and that the corresponding spec files exist on disk. +""" + +from __future__ import annotations + +import re +from pathlib import Path +from typing import List + +from validation.context import ValidationContext + +from ._types import make_finding + +# Kebab-case: lowercase letters/digits, separated by single hyphens. +_KEBAB_CASE_RE = re.compile(r"^[a-z][a-z0-9]*(-[a-z0-9]+)*$") + + +def check_filename_kebab_case( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Validate that the API name uses kebab-case. + + Since the spec filename is derived from ``api_name`` in + release-plan.yaml (``code/API_definitions/{api_name}.yaml``), + this effectively validates that the release-plan entry follows + the CAMARA naming convention. + """ + api = context.apis[0] + if _KEBAB_CASE_RE.match(api.api_name): + return [] + + return [ + make_finding( + engine_rule="check-filename-kebab-case", + level="error", + message=( + f"API name '{api.api_name}' does not follow kebab-case " + f"convention (expected pattern: lowercase-with-hyphens)" + ), + path=api.spec_file, + line=1, + api_name=api.api_name, + ) + ] + + +def check_filename_matches_api_name( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Verify the spec file exists at the expected path. + + The expected path is ``code/API_definitions/{api_name}.yaml``, + derived from the ``api_name`` in release-plan.yaml. If the file + does not exist, the api_name likely doesn't match the actual + filename on disk. + """ + api = context.apis[0] + spec_path = repo_path / api.spec_file + + if spec_path.is_file(): + return [] + + return [ + make_finding( + engine_rule="check-filename-matches-api-name", + level="error", + message=( + f"Expected spec file '{api.spec_file}' not found — " + f"check that api_name '{api.api_name}' in release-plan.yaml " + f"matches the actual filename" + ), + path=api.spec_file, + line=1, + api_name=api.api_name, + ) + ] diff --git a/validation/engines/python_checks/metadata_checks.py b/validation/engines/python_checks/metadata_checks.py new file mode 100644 index 00000000..f10f1bc1 --- /dev/null +++ b/validation/engines/python_checks/metadata_checks.py @@ -0,0 +1,119 @@ +"""Metadata consistency checks. + +Validates that ``info.license`` and ``info.x-camara-commonalities`` are +present in all API spec files and consistent across them. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any, List, Optional + +from validation.context import ValidationContext + +from ._types import load_yaml_safe, make_finding + + +def _extract_metadata(spec: dict) -> tuple[Optional[Any], Optional[Any]]: + """Extract license and x-camara-commonalities from a spec.""" + info = spec.get("info", {}) + license_val = info.get("license") + commonalities_val = info.get("x-camara-commonalities") + return license_val, commonalities_val + + +def check_license_commonalities_consistency( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Verify license and x-camara-commonalities are present and consistent. + + Repo-level check. Loads all spec files referenced in ``context.apis``, + checks that each has ``info.license`` and ``info.x-camara-commonalities``, + and verifies the values are identical across all API files. + """ + if not context.apis: + return [] + + findings: List[dict] = [] + first_license: Optional[Any] = None + first_commonalities: Optional[Any] = None + first_api: Optional[str] = None + + for api in context.apis: + spec_path = repo_path / api.spec_file + spec = load_yaml_safe(spec_path) + + if spec is None: + # Missing file — filename check reports this. + continue + + license_val, commonalities_val = _extract_metadata(spec) + + # Check presence. + if license_val is None: + findings.append( + make_finding( + engine_rule="check-license-commonalities-consistency", + level="error", + message=f"info.license is missing in {api.spec_file}", + path=api.spec_file, + line=1, + api_name=api.api_name, + ) + ) + if commonalities_val is None: + findings.append( + make_finding( + engine_rule="check-license-commonalities-consistency", + level="error", + message=( + f"info.x-camara-commonalities is missing in " + f"{api.spec_file}" + ), + path=api.spec_file, + line=1, + api_name=api.api_name, + ) + ) + + # Track first values for consistency check. + if first_api is None: + first_api = api.api_name + first_license = license_val + first_commonalities = commonalities_val + continue + + # Consistency: compare against first API's values. + if license_val is not None and first_license is not None: + if license_val != first_license: + findings.append( + make_finding( + engine_rule="check-license-commonalities-consistency", + level="error", + message=( + f"info.license in {api.spec_file} differs from " + f"{first_api}" + ), + path=api.spec_file, + line=1, + api_name=api.api_name, + ) + ) + + if commonalities_val is not None and first_commonalities is not None: + if commonalities_val != first_commonalities: + findings.append( + make_finding( + engine_rule="check-license-commonalities-consistency", + level="error", + message=( + f"info.x-camara-commonalities in {api.spec_file} " + f"differs from {first_api}" + ), + path=api.spec_file, + line=1, + api_name=api.api_name, + ) + ) + + return findings diff --git a/validation/engines/python_checks/release_plan_checks.py b/validation/engines/python_checks/release_plan_checks.py new file mode 100644 index 00000000..b9d40217 --- /dev/null +++ b/validation/engines/python_checks/release_plan_checks.py @@ -0,0 +1,276 @@ +"""Release-plan.yaml semantic checks. + +Validates semantic rules beyond JSON schema: track/meta-release consistency, +release-type/API-status alignment, and API file existence. + +Logic ported from ``validation/scripts/validate-release-plan.py`` as pure +functions producing findings (not print/exit). The original script is NOT +imported or modified. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import List, Optional + +from validation.context import ValidationContext + +from ._types import load_yaml_safe, make_finding + +# Allowed meta-release values. Update as new meta-releases are added. +ALLOWED_META_RELEASES = ["Fall25", "Spring26", "Fall26", "Sync26", "Signal27"] + +_RELEASE_PLAN_PATH = "release-plan.yaml" + + +# --------------------------------------------------------------------------- +# Semantic check functions (ported from validate-release-plan.py) +# --------------------------------------------------------------------------- + + +def _check_track_consistency( + release_plan: dict, +) -> List[dict]: + """Check release_track and meta_release are consistent.""" + repo = release_plan.get("repository", {}) + release_track = repo.get("release_track") + meta_release = repo.get("meta_release") + + findings: List[dict] = [] + + if release_track == "meta-release" and not meta_release: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="error", + message=( + "release_track is 'meta-release' but meta_release " + "field is missing" + ), + path=_RELEASE_PLAN_PATH, + line=1, + ) + ) + elif release_track == "independent" and meta_release: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="warn", + message=( + f"release_track is '{release_track}' but meta_release " + f"field is present" + ), + path=_RELEASE_PLAN_PATH, + line=1, + ) + ) + + if meta_release and meta_release not in ALLOWED_META_RELEASES: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="error", + message=( + f"meta_release '{meta_release}' is not valid. " + f"Allowed values: {', '.join(ALLOWED_META_RELEASES)}" + ), + path=_RELEASE_PLAN_PATH, + line=1, + ) + ) + + return findings + + +def _check_release_type_consistency( + release_plan: dict, +) -> List[dict]: + """Check API statuses align with target_release_type. + + Rules: + - none: no constraints + - pre-release-alpha: all APIs >= alpha (no draft) + - pre-release-rc: all APIs >= rc (no draft or alpha) + - public-release: all APIs must be public + - maintenance-release: all APIs must be public + """ + repo = release_plan.get("repository", {}) + apis = release_plan.get("apis", []) + release_type = repo.get("target_release_type") + + if not release_type or release_type == "none": + return [] + + findings: List[dict] = [] + + if release_type == "pre-release-alpha": + draft_apis = [ + api.get("api_name", "?") + for api in apis + if api.get("target_api_status") == "draft" + ] + if draft_apis: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="error", + message=( + f"target_release_type is 'pre-release-alpha' but " + f"these APIs are 'draft': {', '.join(draft_apis)}" + ), + path=_RELEASE_PLAN_PATH, + line=1, + ) + ) + + elif release_type == "pre-release-rc": + invalid_apis = [ + api.get("api_name", "?") + for api in apis + if api.get("target_api_status") in ("draft", "alpha") + ] + if invalid_apis: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="error", + message=( + f"target_release_type is 'pre-release-rc' but " + f"these APIs are not rc/public: " + f"{', '.join(invalid_apis)}" + ), + path=_RELEASE_PLAN_PATH, + line=1, + ) + ) + + elif release_type in ("public-release", "maintenance-release"): + non_public = [ + api.get("api_name", "?") + for api in apis + if api.get("target_api_status") != "public" + ] + if non_public: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="error", + message=( + f"target_release_type is '{release_type}' but " + f"these APIs are not 'public': " + f"{', '.join(non_public)}" + ), + path=_RELEASE_PLAN_PATH, + line=1, + ) + ) + + return findings + + +def _check_file_existence( + release_plan: dict, repo_path: Path +) -> List[dict]: + """Check API definition files exist. + + Two-tier severity: + - alpha/rc/public: missing file is ERROR + - draft: missing file with orphan files is WARNING + """ + apis = release_plan.get("apis", []) + api_dir = repo_path / "code" / "API_definitions" + + # Collect declared API names. + all_api_names = { + api.get("api_name") + for api in apis + if api.get("api_name") + } + + # Discover existing files. + existing_stems: set[str] = set() + if api_dir.is_dir(): + existing_stems = { + f.stem for f in api_dir.iterdir() + if f.suffix == ".yaml" and f.is_file() + } + + orphan_files = existing_stems - all_api_names + + findings: List[dict] = [] + + for api in apis: + api_name = api.get("api_name") + status = api.get("target_api_status") + + if not api_name: + continue + + api_file = api_dir / f"{api_name}.yaml" + file_exists = api_file.exists() + + if status in ("alpha", "rc", "public"): + if not file_exists: + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="error", + message=( + f"API definition file not found for '{api_name}' " + f"(status: {status}). Expected: " + f"code/API_definitions/{api_name}.yaml" + ), + path=f"code/API_definitions/{api_name}.yaml", + line=1, + api_name=api_name, + ) + ) + elif status == "draft": + if not file_exists and orphan_files: + orphan_list = ", ".join(sorted(orphan_files)) + findings.append( + make_finding( + engine_rule="check-release-plan-semantics", + level="warn", + message=( + f"No API definition file found for draft API " + f"'{api_name}'. Unmatched files in " + f"code/API_definitions/: {orphan_list}. " + f"Check for possible naming mismatch" + ), + path=_RELEASE_PLAN_PATH, + line=1, + api_name=api_name, + ) + ) + + return findings + + +# --------------------------------------------------------------------------- +# Top-level check function +# --------------------------------------------------------------------------- + + +def check_release_plan_semantics( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Run all release-plan.yaml semantic checks. + + Repo-level check. Reads release-plan.yaml from the repository root + and performs track consistency, release-type consistency, and file + existence checks. + """ + plan_path = repo_path / _RELEASE_PLAN_PATH + release_plan = load_yaml_safe(plan_path) + + if release_plan is None: + # No release-plan.yaml — nothing to validate. + return [] + + findings: List[dict] = [] + findings.extend(_check_track_consistency(release_plan)) + findings.extend(_check_release_type_consistency(release_plan)) + findings.extend(_check_file_existence(release_plan, repo_path)) + + return findings diff --git a/validation/engines/python_checks/release_review_checks.py b/validation/engines/python_checks/release_review_checks.py new file mode 100644 index 00000000..8b2ff687 --- /dev/null +++ b/validation/engines/python_checks/release_review_checks.py @@ -0,0 +1,97 @@ +"""Release review PR checks. + +Validates that release review PRs (targeting release-snapshot branches) +only modify allowed files (CHANGELOG.md, CHANGELOG/, README.md). +""" + +from __future__ import annotations + +import logging +import subprocess +from pathlib import Path +from typing import List + +from validation.context import ValidationContext + +from ._types import make_finding + +logger = logging.getLogger(__name__) + +# Files and directories allowed to change on release review PRs. +_ALLOWED_PATHS = frozenset({"CHANGELOG.md", "README.md"}) +_ALLOWED_PREFIXES = ("CHANGELOG/",) + + +def _get_changed_files(repo_path: Path) -> List[str]: + """Get files changed in the current PR via git diff. + + Compares HEAD against the merge-base with the target branch. + Falls back to diffing HEAD~1 if git operations fail. + """ + try: + # In a PR context, the diff against origin/base shows changed files. + # Use --diff-filter=ACMR to only show added/copied/modified/renamed. + result = subprocess.run( + ["git", "diff", "--name-only", "--diff-filter=ACMR", "HEAD~1"], + capture_output=True, + text=True, + cwd=str(repo_path), + timeout=30, + ) + if result.returncode == 0: + return [ + f.strip() for f in result.stdout.strip().split("\n") + if f.strip() + ] + except (FileNotFoundError, subprocess.TimeoutExpired, OSError): + pass + + return [] + + +def _is_allowed(file_path: str) -> bool: + """Check if a file path is in the allowed set for release review PRs.""" + if file_path in _ALLOWED_PATHS: + return True + for prefix in _ALLOWED_PREFIXES: + if file_path.startswith(prefix): + return True + return False + + +def check_release_review_file_restriction( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Verify release review PRs only modify allowed files. + + Only runs when ``context.is_release_review_pr`` is ``True``. + Returns empty list otherwise. + + Allowed files: CHANGELOG.md, CHANGELOG/*, README.md. + All other files on the snapshot branch are immutable. + """ + if not context.is_release_review_pr: + return [] + + changed_files = _get_changed_files(repo_path) + if not changed_files: + return [] + + findings: List[dict] = [] + for file_path in changed_files: + if not _is_allowed(file_path): + findings.append( + make_finding( + engine_rule="check-release-review-file-restriction", + level="error", + message=( + f"File '{file_path}' must not be modified on a " + f"release review PR — only CHANGELOG.md, CHANGELOG/, " + f"and README.md are allowed" + ), + path=file_path, + line=1, + ) + ) + + return findings diff --git a/validation/engines/python_checks/test_checks.py b/validation/engines/python_checks/test_checks.py new file mode 100644 index 00000000..2ee106fb --- /dev/null +++ b/validation/engines/python_checks/test_checks.py @@ -0,0 +1,185 @@ +"""Test file checks. + +Validates that test files exist for each API, are located in +``code/Test_definitions/``, and have version-aligned filenames. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import List + +from validation.context import ValidationContext + +from ._types import make_finding +from .version_checks import build_version_segment + +_TEST_DIR = "code/Test_definitions" + + +def _stem_matches_api(stem: str, api_name: str) -> bool: + """Check if a test file stem matches an API name. + + Matches: + - Exact: ``{api-name}`` + - With version: ``{api-name}.{version}`` + - With suffix: ``{api-name}-{suffix}`` + - With suffix + version: ``{api-name}-{suffix}.{version}`` + """ + if stem == api_name: + return True + if stem.startswith(f"{api_name}."): + return True + if stem.startswith(f"{api_name}-"): + return True + return False + + +def check_test_directory_exists( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Verify ``code/Test_definitions/`` exists when APIs are present. + + Repo-level check — runs once, not per-API. + """ + if not context.apis: + return [] + + test_dir = repo_path / _TEST_DIR + if test_dir.is_dir(): + return [] + + return [ + make_finding( + engine_rule="check-test-directory-exists", + level="error", + message=( + f"Directory '{_TEST_DIR}/' is missing — " + f"test definitions are required when API specs are present" + ), + path=_TEST_DIR, + line=1, + ) + ] + + +def check_test_files_exist( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Verify at least one ``.feature`` file exists for the API. + + Per-API check. Looks for files matching the api-name prefix in + ``code/Test_definitions/``. + """ + api = context.apis[0] + test_dir = repo_path / _TEST_DIR + + if not test_dir.is_dir(): + # Directory-level check reports this; avoid duplicate findings. + return [] + + # Match files starting with the api-name. + # Patterns: {api-name}.feature, {api-name}.{version}.feature, + # {api-name}-{suffix}.feature, {api-name}-{suffix}.{version}.feature + matching = [ + f for f in test_dir.iterdir() + if f.is_file() + and f.suffix == ".feature" + and _stem_matches_api(f.stem, api.api_name) + ] + + if matching: + return [] + + return [ + make_finding( + engine_rule="check-test-files-exist", + level="error", + message=( + f"No .feature test file found for API '{api.api_name}' " + f"in {_TEST_DIR}/" + ), + path=_TEST_DIR, + line=1, + api_name=api.api_name, + ) + ] + + +def check_test_file_version( + repo_path: Path, context: ValidationContext +) -> List[dict]: + """Validate test file version suffix matches API version. + + Per-API check. Uses CAMARA version-to-URL mapping rules to derive + the expected version suffix. Test files should be named like: + ``{api-name}.{version-suffix}.feature`` or + ``{api-name}-{operationId}.{version-suffix}.feature``. + + Example: ``quality-on-demand.v0.2alpha2.feature`` + """ + api = context.apis[0] + test_dir = repo_path / _TEST_DIR + + if not test_dir.is_dir(): + return [] + + expected_segment = build_version_segment(api.target_api_version) + if expected_segment is None: + return [] + + # Find all .feature files matching this API. + matching = [ + f for f in test_dir.iterdir() + if f.is_file() + and f.suffix == ".feature" + and (f.stem == api.api_name or f.stem.startswith(f"{api.api_name}-") + or f.stem.startswith(f"{api.api_name}.")) + ] + + if not matching: + # No test files found — check_test_files_exist reports this. + return [] + + findings: List[dict] = [] + for test_file in matching: + # Extract version suffix: everything after the first dot in the stem. + # e.g. "quality-on-demand.v1" -> "v1" + # e.g. "quality-on-demand-createSession.v0.3" -> "v0.3" + stem = test_file.stem + dot_idx = stem.find(".") + if dot_idx == -1: + # No version suffix in filename — report as finding. + findings.append( + make_finding( + engine_rule="check-test-file-version", + level="error", + message=( + f"Test file '{test_file.name}' has no version suffix " + f"(expected '.{expected_segment}' before .feature)" + ), + path=f"{_TEST_DIR}/{test_file.name}", + line=1, + api_name=api.api_name, + ) + ) + continue + + actual_suffix = stem[dot_idx + 1:] + if actual_suffix.lower() != expected_segment.lower(): + findings.append( + make_finding( + engine_rule="check-test-file-version", + level="error", + message=( + f"Test file '{test_file.name}' has version suffix " + f"'{actual_suffix}' but expected '{expected_segment}' " + f"(from API version '{api.target_api_version}')" + ), + path=f"{_TEST_DIR}/{test_file.name}", + line=1, + api_name=api.api_name, + ) + ) + + return findings diff --git a/validation/engines/python_checks/version_checks.py b/validation/engines/python_checks/version_checks.py new file mode 100644 index 00000000..1a5f7374 --- /dev/null +++ b/validation/engines/python_checks/version_checks.py @@ -0,0 +1,282 @@ +"""Version-related checks. + +Validates info.version format (wip vs semver by branch type), server URL +version segment construction, and server URL api-name alignment. +""" + +from __future__ import annotations + +import re +from pathlib import Path +from typing import List, Optional + +from validation.context import ValidationContext + +from ._types import load_yaml_safe, make_finding + +# Matches a semantic version (optionally with pre-release label). +# Examples: "1.0.0", "0.2.0-alpha.2", "1.0.0-rc.1" +_SEMVER_RE = re.compile( + r"^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)" + r"(?:-(?P
[a-zA-Z0-9]+(?:\.[a-zA-Z0-9]+)*))?$"
+)
+
+# Extracts the version segment from a server URL path.
+# Matches the last path component starting with "v".
+# e.g. "https://example.com/qod/v1" -> "v1"
+#      "{apiRoot}/quality-on-demand/v0.2alpha2" -> "v0.2alpha2"
+_URL_VERSION_RE = re.compile(r"/(?Pv[a-z0-9.]+)/?$", re.IGNORECASE)
+
+# Extracts the api-name segment from a server URL (segment before version).
+# e.g. "{apiRoot}/quality-on-demand/v1" -> "quality-on-demand"
+_URL_API_NAME_RE = re.compile(r"/(?P[^/]+)/v[a-z0-9.]+/?$", re.IGNORECASE)
+
+
+# ---------------------------------------------------------------------------
+# Version segment builder
+# ---------------------------------------------------------------------------
+
+
+def build_version_segment(info_version: str) -> Optional[str]:
+    """Build the CAMARA URL version segment from info.version.
+
+    Mapping rules:
+      - ``"wip"`` -> ``"vwip"``
+      - ``"1.0.0"`` -> ``"v1"`` (stable: major only)
+      - ``"0.1.0"`` -> ``"v0.1"`` (initial: major.minor)
+      - ``"0.2.0-alpha.2"`` -> ``"v0.2alpha2"`` (pre-release appended,
+        dots/hyphens stripped)
+      - ``"1.0.0-rc.1"`` -> ``"v1rc1"``
+
+    Returns:
+        The version segment string, or ``None`` if *info_version* is
+        not a recognised format.
+    """
+    if info_version == "wip":
+        return "vwip"
+
+    m = _SEMVER_RE.match(info_version)
+    if not m:
+        return None
+
+    major = int(m.group("major"))
+    minor = int(m.group("minor"))
+    pre = m.group("pre") or ""
+
+    # Strip dots and hyphens from pre-release label.
+    pre_clean = re.sub(r"[.\-]", "", pre)
+
+    if major >= 1:
+        # Stable: v{major}
+        base = f"v{major}"
+    else:
+        # Initial: v{major}.{minor}
+        base = f"v{major}.{minor}"
+
+    return f"{base}{pre_clean}"
+
+
+# ---------------------------------------------------------------------------
+# Checks
+# ---------------------------------------------------------------------------
+
+
+def check_info_version_format(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate info.version format based on branch type.
+
+    On main: must be ``"wip"``.
+    On release/maintenance: must be a valid semantic version (not wip).
+    On feature branches: no constraint (skip).
+    """
+    api = context.apis[0]
+    spec_path = repo_path / api.spec_file
+
+    spec = load_yaml_safe(spec_path)
+    if spec is None:
+        # File missing or unparseable — filename check reports this.
+        return []
+
+    info_version = spec.get("info", {}).get("version")
+    if info_version is None:
+        return [
+            make_finding(
+                engine_rule="check-info-version-format",
+                level="error",
+                message="info.version is missing",
+                path=api.spec_file,
+                line=1,
+                api_name=api.api_name,
+            )
+        ]
+
+    info_version = str(info_version).strip()
+
+    if context.branch_type == "main":
+        if info_version != "wip":
+            return [
+                make_finding(
+                    engine_rule="check-info-version-format",
+                    level="error",
+                    message=(
+                        f"info.version must be 'wip' on main branch, "
+                        f"found '{info_version}'"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            ]
+    elif context.branch_type in ("release", "maintenance"):
+        if info_version == "wip":
+            return [
+                make_finding(
+                    engine_rule="check-info-version-format",
+                    level="error",
+                    message=(
+                        "info.version must not be 'wip' on "
+                        f"{context.branch_type} branch"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            ]
+        if not _SEMVER_RE.match(info_version):
+            return [
+                make_finding(
+                    engine_rule="check-info-version-format",
+                    level="error",
+                    message=(
+                        f"info.version '{info_version}' is not a valid "
+                        f"semantic version on {context.branch_type} branch"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            ]
+
+    # Feature branches: no constraint.
+    return []
+
+
+def check_server_url_version(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate server URL version segment matches info.version.
+
+    Extracts the version path segment from each server URL and compares
+    it against the expected segment derived from info.version using the
+    CAMARA version mapping rules.
+    """
+    api = context.apis[0]
+    spec_path = repo_path / api.spec_file
+
+    spec = load_yaml_safe(spec_path)
+    if spec is None:
+        return []
+
+    info_version = str(spec.get("info", {}).get("version", "")).strip()
+    if not info_version:
+        return []  # Missing version is caught by check_info_version_format.
+
+    expected_segment = build_version_segment(info_version)
+    if expected_segment is None:
+        return []  # Unrecognised format is caught by check_info_version_format.
+
+    servers = spec.get("servers", [])
+    if not isinstance(servers, list):
+        return []
+
+    findings: List[dict] = []
+    for i, server in enumerate(servers):
+        url = server.get("url", "") if isinstance(server, dict) else ""
+        m = _URL_VERSION_RE.search(url)
+        if m is None:
+            findings.append(
+                make_finding(
+                    engine_rule="check-server-url-version",
+                    level="error",
+                    message=(
+                        f"Server URL '{url}' has no recognisable version "
+                        f"segment (expected '.../{expected_segment}')"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+            continue
+
+        actual_segment = m.group("version").lower()
+        if actual_segment != expected_segment.lower():
+            findings.append(
+                make_finding(
+                    engine_rule="check-server-url-version",
+                    level="error",
+                    message=(
+                        f"Server URL version segment '{actual_segment}' does "
+                        f"not match expected '{expected_segment}' "
+                        f"(derived from info.version '{info_version}')"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+
+    return findings
+
+
+def check_server_url_api_name(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate server URL api-name segment matches context api_name.
+
+    The api-name in the server URL (path segment before the version)
+    must match the ``api_name`` from release-plan.yaml.
+    """
+    api = context.apis[0]
+    spec_path = repo_path / api.spec_file
+
+    spec = load_yaml_safe(spec_path)
+    if spec is None:
+        return []
+
+    servers = spec.get("servers", [])
+    if not isinstance(servers, list):
+        return []
+
+    findings: List[dict] = []
+    for server in servers:
+        url = server.get("url", "") if isinstance(server, dict) else ""
+        m = _URL_API_NAME_RE.search(url)
+        if m is None:
+            # No api-name segment found — version check already flags bad URLs.
+            continue
+
+        url_api_name = m.group("api_name")
+
+        # Skip hostname-like segments (contain dots) — not an api-name.
+        if "." in url_api_name:
+            continue
+
+        if url_api_name != api.api_name:
+            findings.append(
+                make_finding(
+                    engine_rule="check-server-url-api-name",
+                    level="error",
+                    message=(
+                        f"Server URL api-name segment '{url_api_name}' does "
+                        f"not match api_name '{api.api_name}' from "
+                        f"release-plan.yaml"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+
+    return findings
diff --git a/validation/tests/test_python_adapter.py b/validation/tests/test_python_adapter.py
new file mode 100644
index 00000000..ee15d24f
--- /dev/null
+++ b/validation/tests/test_python_adapter.py
@@ -0,0 +1,220 @@
+"""Unit tests for validation.engines.python_adapter."""
+
+from __future__ import annotations
+
+from pathlib import Path
+from unittest.mock import patch
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_adapter import (
+    ENGINE_NAME,
+    _make_error_finding,
+    run_python_engine,
+)
+from validation.engines.python_checks import CheckDescriptor, CheckScope
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    apis: tuple[ApiContext, ...] = (),
+    branch_type: str = "main",
+) -> ValidationContext:
+    """Build a minimal ValidationContext for testing."""
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=apis,
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_api(name: str = "quality-on-demand") -> ApiContext:
+    return ApiContext(
+        api_name=name,
+        target_api_version="1.0.0",
+        target_api_status="public",
+        target_api_maturity="stable",
+        api_pattern="request-response",
+        spec_file=f"code/API_definitions/{name}.yaml",
+    )
+
+
+def _good_repo_check(repo_path: Path, context: ValidationContext) -> list[dict]:
+    """A check that always produces one finding."""
+    return [
+        {
+            "engine": "python",
+            "engine_rule": "good-repo-check",
+            "level": "hint",
+            "message": "repo-level finding",
+            "path": "",
+            "line": 1,
+            "api_name": None,
+        }
+    ]
+
+
+def _good_api_check(repo_path: Path, context: ValidationContext) -> list[dict]:
+    """A per-API check that produces one finding per API."""
+    api = context.apis[0]
+    return [
+        {
+            "engine": "python",
+            "engine_rule": "good-api-check",
+            "level": "warn",
+            "message": f"finding for {api.api_name}",
+            "path": api.spec_file,
+            "line": 1,
+            "api_name": api.api_name,
+        }
+    ]
+
+
+def _empty_check(repo_path: Path, context: ValidationContext) -> list[dict]:
+    """A check that produces no findings."""
+    return []
+
+
+def _crashing_check(repo_path: Path, context: ValidationContext) -> list[dict]:
+    """A check that raises."""
+    raise RuntimeError("something went wrong")
+
+
+# ---------------------------------------------------------------------------
+# TestMakeErrorFinding
+# ---------------------------------------------------------------------------
+
+
+class TestMakeErrorFinding:
+    def test_default_rule(self):
+        f = _make_error_finding("oops")
+        assert f["engine"] == ENGINE_NAME
+        assert f["engine_rule"] == "python-execution-error"
+        assert f["level"] == "error"
+        assert f["message"] == "oops"
+        assert f["path"] == ""
+        assert f["line"] == 1
+        assert f["api_name"] is None
+
+    def test_custom_check_name(self):
+        f = _make_error_finding("oops", check_name="my-check")
+        assert f["engine_rule"] == "my-check"
+
+
+# ---------------------------------------------------------------------------
+# TestRunPythonEngine
+# ---------------------------------------------------------------------------
+
+
+class TestRunPythonEngine:
+    def test_empty_registry(self, tmp_path: Path):
+        """No checks registered -> empty findings."""
+        with patch("validation.engines.python_adapter.CHECKS", []):
+            result = run_python_engine(tmp_path, _make_context())
+        assert result == []
+
+    def test_repo_scope_called_once(self, tmp_path: Path):
+        """REPO-scope check is called exactly once."""
+        checks = [CheckDescriptor("repo-check", CheckScope.REPO, _good_repo_check)]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            result = run_python_engine(tmp_path, _make_context())
+        assert len(result) == 1
+        assert result[0]["engine_rule"] == "good-repo-check"
+
+    def test_api_scope_called_per_api(self, tmp_path: Path):
+        """API-scope check is called once per API in context.apis."""
+        api_a = _make_api("api-a")
+        api_b = _make_api("api-b")
+        ctx = _make_context(apis=(api_a, api_b))
+        checks = [CheckDescriptor("api-check", CheckScope.API, _good_api_check)]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            result = run_python_engine(tmp_path, ctx)
+        assert len(result) == 2
+        assert result[0]["message"] == "finding for api-a"
+        assert result[1]["message"] == "finding for api-b"
+
+    def test_api_scope_no_apis(self, tmp_path: Path):
+        """API-scope check produces nothing when context.apis is empty."""
+        ctx = _make_context(apis=())
+        checks = [CheckDescriptor("api-check", CheckScope.API, _good_api_check)]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            result = run_python_engine(tmp_path, ctx)
+        assert result == []
+
+    def test_api_scope_receives_single_api(self, tmp_path: Path):
+        """Each API-scope call receives a context with exactly one API."""
+        received_apis: list[tuple[ApiContext, ...]] = []
+
+        def spy_check(repo_path: Path, ctx: ValidationContext) -> list[dict]:
+            received_apis.append(ctx.apis)
+            return []
+
+        api_a = _make_api("api-a")
+        api_b = _make_api("api-b")
+        ctx = _make_context(apis=(api_a, api_b))
+        checks = [CheckDescriptor("spy-check", CheckScope.API, spy_check)]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            run_python_engine(tmp_path, ctx)
+
+        assert len(received_apis) == 2
+        assert received_apis[0] == (api_a,)
+        assert received_apis[1] == (api_b,)
+
+    def test_error_isolation(self, tmp_path: Path):
+        """A crashing check produces an error finding; other checks still run."""
+        checks = [
+            CheckDescriptor("crash", CheckScope.REPO, _crashing_check),
+            CheckDescriptor("ok", CheckScope.REPO, _good_repo_check),
+        ]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            result = run_python_engine(tmp_path, _make_context())
+
+        assert len(result) == 2
+        # First: error finding from the crash
+        assert result[0]["engine_rule"] == "crash"
+        assert result[0]["level"] == "error"
+        assert "something went wrong" in result[0]["message"]
+        # Second: normal finding from the good check
+        assert result[1]["engine_rule"] == "good-repo-check"
+
+    def test_empty_check_contributes_nothing(self, tmp_path: Path):
+        """A check returning [] adds no findings."""
+        checks = [
+            CheckDescriptor("empty", CheckScope.REPO, _empty_check),
+            CheckDescriptor("ok", CheckScope.REPO, _good_repo_check),
+        ]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            result = run_python_engine(tmp_path, _make_context())
+        assert len(result) == 1
+        assert result[0]["engine_rule"] == "good-repo-check"
+
+    def test_mixed_scopes(self, tmp_path: Path):
+        """REPO and API checks can coexist in the registry."""
+        api = _make_api("my-api")
+        ctx = _make_context(apis=(api,))
+        checks = [
+            CheckDescriptor("repo", CheckScope.REPO, _good_repo_check),
+            CheckDescriptor("api", CheckScope.API, _good_api_check),
+        ]
+        with patch("validation.engines.python_adapter.CHECKS", checks):
+            result = run_python_engine(tmp_path, ctx)
+
+        assert len(result) == 2
+        assert result[0]["engine_rule"] == "good-repo-check"
+        assert result[1]["engine_rule"] == "good-api-check"
diff --git a/validation/tests/test_python_checks_changelog.py b/validation/tests/test_python_checks_changelog.py
new file mode 100644
index 00000000..55dffa4b
--- /dev/null
+++ b/validation/tests/test_python_checks_changelog.py
@@ -0,0 +1,124 @@
+"""Unit tests for validation.engines.python_checks.changelog_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.changelog_checks import (
+    check_changelog_format,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    target_release_type: str | None = "public-release",
+) -> ValidationContext:
+    api = ApiContext(
+        api_name="qod",
+        target_api_version="1.0.0",
+        target_api_status="public",
+        target_api_maturity="stable",
+        api_pattern="request-response",
+        spec_file="code/API_definitions/qod.yaml",
+    )
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=target_release_type,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(api,),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestCheckChangelogFormat:
+    def test_no_release_type_skip(self, tmp_path: Path):
+        ctx = _make_context(target_release_type=None)
+        assert check_changelog_format(tmp_path, ctx) == []
+
+    def test_none_release_type_skip(self, tmp_path: Path):
+        ctx = _make_context(target_release_type="none")
+        assert check_changelog_format(tmp_path, ctx) == []
+
+    def test_missing_changelog(self, tmp_path: Path):
+        ctx = _make_context()
+        findings = check_changelog_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "missing" in findings[0]["message"]
+
+    def test_changelog_file_with_version(self, tmp_path: Path):
+        (tmp_path / "CHANGELOG.md").write_text(
+            "# Changelog\n\n## 1.0.0\n\n- Initial release\n"
+        )
+        ctx = _make_context()
+        assert check_changelog_format(tmp_path, ctx) == []
+
+    def test_changelog_file_with_v_prefix(self, tmp_path: Path):
+        (tmp_path / "CHANGELOG.md").write_text(
+            "# Changelog\n\n## v1.0.0\n\n- Initial release\n"
+        )
+        ctx = _make_context()
+        assert check_changelog_format(tmp_path, ctx) == []
+
+    def test_changelog_file_with_pre_release(self, tmp_path: Path):
+        (tmp_path / "CHANGELOG.md").write_text(
+            "# Changelog\n\n## 0.2.0-alpha.1\n\n- Alpha release\n"
+        )
+        ctx = _make_context()
+        assert check_changelog_format(tmp_path, ctx) == []
+
+    def test_changelog_file_no_version_heading(self, tmp_path: Path):
+        (tmp_path / "CHANGELOG.md").write_text(
+            "# Changelog\n\nSome text without version headings.\n"
+        )
+        ctx = _make_context()
+        findings = check_changelog_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "no version heading" in findings[0]["message"]
+
+    def test_changelog_directory_with_files(self, tmp_path: Path):
+        changelog_dir = tmp_path / "CHANGELOG"
+        changelog_dir.mkdir()
+        (changelog_dir / "r1.0.md").write_text("## 1.0.0\n")
+        ctx = _make_context()
+        assert check_changelog_format(tmp_path, ctx) == []
+
+    def test_changelog_directory_empty(self, tmp_path: Path):
+        changelog_dir = tmp_path / "CHANGELOG"
+        changelog_dir.mkdir()
+        ctx = _make_context()
+        findings = check_changelog_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "no .md files" in findings[0]["message"]
+
+    def test_changelog_directory_preferred_over_file(self, tmp_path: Path):
+        """Both exist — directory takes precedence (checked first)."""
+        (tmp_path / "CHANGELOG.md").write_text("no version heading")
+        changelog_dir = tmp_path / "CHANGELOG"
+        changelog_dir.mkdir()
+        (changelog_dir / "r1.0.md").write_text("## 1.0.0\n")
+        ctx = _make_context()
+        # Both exist, has_changelog is True. Since we check file first in
+        # the code and directory is also checked, let's verify behavior.
+        assert check_changelog_format(tmp_path, ctx) == []
diff --git a/validation/tests/test_python_checks_filename.py b/validation/tests/test_python_checks_filename.py
new file mode 100644
index 00000000..8e2a7634
--- /dev/null
+++ b/validation/tests/test_python_checks_filename.py
@@ -0,0 +1,132 @@
+"""Unit tests for validation.engines.python_checks.filename_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.filename_checks import (
+    check_filename_kebab_case,
+    check_filename_matches_api_name,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(api_name: str) -> ValidationContext:
+    api = ApiContext(
+        api_name=api_name,
+        target_api_version="1.0.0",
+        target_api_status="public",
+        target_api_maturity="stable",
+        api_pattern="request-response",
+        spec_file=f"code/API_definitions/{api_name}.yaml",
+    )
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(api,),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+# ---------------------------------------------------------------------------
+# TestCheckFilenameKebabCase
+# ---------------------------------------------------------------------------
+
+
+class TestCheckFilenameKebabCase:
+    def test_valid_kebab_case(self, tmp_path: Path):
+        ctx = _make_context("quality-on-demand")
+        assert check_filename_kebab_case(tmp_path, ctx) == []
+
+    def test_single_word(self, tmp_path: Path):
+        ctx = _make_context("location")
+        assert check_filename_kebab_case(tmp_path, ctx) == []
+
+    def test_with_numbers(self, tmp_path: Path):
+        ctx = _make_context("sim-swap-2g")
+        assert check_filename_kebab_case(tmp_path, ctx) == []
+
+    def test_camel_case_rejected(self, tmp_path: Path):
+        ctx = _make_context("qualityOnDemand")
+        findings = check_filename_kebab_case(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert findings[0]["engine_rule"] == "check-filename-kebab-case"
+        assert "kebab-case" in findings[0]["message"]
+        assert findings[0]["api_name"] == "qualityOnDemand"
+
+    def test_underscore_rejected(self, tmp_path: Path):
+        ctx = _make_context("quality_on_demand")
+        findings = check_filename_kebab_case(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    def test_uppercase_rejected(self, tmp_path: Path):
+        ctx = _make_context("QualityOnDemand")
+        findings = check_filename_kebab_case(tmp_path, ctx)
+        assert len(findings) == 1
+
+    def test_starts_with_number_rejected(self, tmp_path: Path):
+        ctx = _make_context("2g-sim-swap")
+        findings = check_filename_kebab_case(tmp_path, ctx)
+        assert len(findings) == 1
+
+    def test_trailing_hyphen_rejected(self, tmp_path: Path):
+        ctx = _make_context("quality-")
+        findings = check_filename_kebab_case(tmp_path, ctx)
+        assert len(findings) == 1
+
+    def test_double_hyphen_rejected(self, tmp_path: Path):
+        ctx = _make_context("quality--on-demand")
+        findings = check_filename_kebab_case(tmp_path, ctx)
+        assert len(findings) == 1
+
+
+# ---------------------------------------------------------------------------
+# TestCheckFilenameMatchesApiName
+# ---------------------------------------------------------------------------
+
+
+class TestCheckFilenameMatchesApiName:
+    def test_file_exists(self, tmp_path: Path):
+        ctx = _make_context("quality-on-demand")
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "quality-on-demand.yaml").write_text("openapi: 3.0.0")
+        assert check_filename_matches_api_name(tmp_path, ctx) == []
+
+    def test_file_missing(self, tmp_path: Path):
+        ctx = _make_context("quality-on-demand")
+        findings = check_filename_matches_api_name(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert findings[0]["engine_rule"] == "check-filename-matches-api-name"
+        assert "not found" in findings[0]["message"]
+        assert "quality-on-demand" in findings[0]["message"]
+
+    def test_wrong_name_on_disk(self, tmp_path: Path):
+        """release-plan says 'qos-booking' but file is 'qos_booking.yaml'."""
+        ctx = _make_context("qos-booking")
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qos_booking.yaml").write_text("openapi: 3.0.0")
+        findings = check_filename_matches_api_name(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
diff --git a/validation/tests/test_python_checks_metadata.py b/validation/tests/test_python_checks_metadata.py
new file mode 100644
index 00000000..61d203b9
--- /dev/null
+++ b/validation/tests/test_python_checks_metadata.py
@@ -0,0 +1,139 @@
+"""Unit tests for validation.engines.python_checks.metadata_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.metadata_checks import (
+    check_license_commonalities_consistency,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_api(name: str) -> ApiContext:
+    return ApiContext(
+        api_name=name,
+        target_api_version="1.0.0",
+        target_api_status="public",
+        target_api_maturity="stable",
+        api_pattern="request-response",
+        spec_file=f"code/API_definitions/{name}.yaml",
+    )
+
+
+def _make_context(api_names: list[str]) -> ValidationContext:
+    apis = tuple(_make_api(n) for n in api_names)
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=apis,
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _write_spec(
+    tmp_path: Path,
+    api_name: str,
+    license_val: dict | None = None,
+    commonalities_val: str | None = None,
+) -> None:
+    spec: dict = {
+        "openapi": "3.0.3",
+        "info": {"title": api_name, "version": "1.0.0"},
+    }
+    if license_val is not None:
+        spec["info"]["license"] = license_val
+    if commonalities_val is not None:
+        spec["info"]["x-camara-commonalities"] = commonalities_val
+    api_dir = tmp_path / "code" / "API_definitions"
+    api_dir.mkdir(parents=True, exist_ok=True)
+    (api_dir / f"{api_name}.yaml").write_text(
+        yaml.dump(spec, default_flow_style=False)
+    )
+
+
+LICENSE_A = {"name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0"}
+LICENSE_B = {"name": "MIT", "url": "https://opensource.org/licenses/MIT"}
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestCheckLicenseCommonalitiesConsistency:
+    def test_no_apis(self, tmp_path: Path):
+        ctx = _make_context([])
+        assert check_license_commonalities_consistency(tmp_path, ctx) == []
+
+    def test_single_api_all_present(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", license_val=LICENSE_A, commonalities_val="r4.1")
+        ctx = _make_context(["qod"])
+        assert check_license_commonalities_consistency(tmp_path, ctx) == []
+
+    def test_single_api_missing_license(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", commonalities_val="r4.1")
+        ctx = _make_context(["qod"])
+        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "license" in findings[0]["message"]
+
+    def test_single_api_missing_commonalities(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", license_val=LICENSE_A)
+        ctx = _make_context(["qod"])
+        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "x-camara-commonalities" in findings[0]["message"]
+
+    def test_single_api_both_missing(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod")
+        ctx = _make_context(["qod"])
+        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert len(findings) == 2
+
+    def test_two_apis_consistent(self, tmp_path: Path):
+        _write_spec(tmp_path, "api-a", license_val=LICENSE_A, commonalities_val="r4.1")
+        _write_spec(tmp_path, "api-b", license_val=LICENSE_A, commonalities_val="r4.1")
+        ctx = _make_context(["api-a", "api-b"])
+        assert check_license_commonalities_consistency(tmp_path, ctx) == []
+
+    def test_two_apis_license_mismatch(self, tmp_path: Path):
+        _write_spec(tmp_path, "api-a", license_val=LICENSE_A, commonalities_val="r4.1")
+        _write_spec(tmp_path, "api-b", license_val=LICENSE_B, commonalities_val="r4.1")
+        ctx = _make_context(["api-a", "api-b"])
+        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "license" in findings[0]["message"]
+        assert "differs" in findings[0]["message"]
+
+    def test_two_apis_commonalities_mismatch(self, tmp_path: Path):
+        _write_spec(tmp_path, "api-a", license_val=LICENSE_A, commonalities_val="r4.1")
+        _write_spec(tmp_path, "api-b", license_val=LICENSE_A, commonalities_val="r3.4")
+        ctx = _make_context(["api-a", "api-b"])
+        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "x-camara-commonalities" in findings[0]["message"]
+        assert "differs" in findings[0]["message"]
+
+    def test_missing_spec_file_skipped(self, tmp_path: Path):
+        """Missing spec file is silently skipped (filename check reports)."""
+        ctx = _make_context(["qod"])
+        assert check_license_commonalities_consistency(tmp_path, ctx) == []
diff --git a/validation/tests/test_python_checks_release_plan.py b/validation/tests/test_python_checks_release_plan.py
new file mode 100644
index 00000000..a7055bf8
--- /dev/null
+++ b/validation/tests/test_python_checks_release_plan.py
@@ -0,0 +1,254 @@
+"""Unit tests for validation.engines.python_checks.release_plan_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.release_plan_checks import (
+    ALLOWED_META_RELEASES,
+    _check_file_existence,
+    _check_release_type_consistency,
+    _check_track_consistency,
+    check_release_plan_semantics,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context() -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _write_release_plan(tmp_path: Path, plan: dict) -> None:
+    (tmp_path / "release-plan.yaml").write_text(
+        yaml.dump(plan, default_flow_style=False)
+    )
+
+
+def _make_plan(
+    release_track: str = "meta-release",
+    meta_release: str | None = "Spring26",
+    target_release_type: str = "public-release",
+    apis: list[dict] | None = None,
+) -> dict:
+    repo: dict = {
+        "release_track": release_track,
+        "target_release_type": target_release_type,
+    }
+    if meta_release is not None:
+        repo["meta_release"] = meta_release
+    if apis is None:
+        apis = [{"api_name": "qod", "target_api_status": "public", "target_api_version": "1.0.0"}]
+    return {"repository": repo, "apis": apis}
+
+
+# ---------------------------------------------------------------------------
+# TestCheckTrackConsistency
+# ---------------------------------------------------------------------------
+
+
+class TestCheckTrackConsistency:
+    def test_meta_release_with_value(self):
+        plan = _make_plan(release_track="meta-release", meta_release="Spring26")
+        assert _check_track_consistency(plan) == []
+
+    def test_meta_release_missing_value(self):
+        plan = _make_plan(release_track="meta-release", meta_release=None)
+        findings = _check_track_consistency(plan)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "meta_release field is missing" in findings[0]["message"]
+
+    def test_independent_with_meta_release(self):
+        plan = _make_plan(release_track="independent", meta_release="Spring26")
+        findings = _check_track_consistency(plan)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+
+    def test_independent_without_meta_release(self):
+        plan = _make_plan(release_track="independent", meta_release=None)
+        assert _check_track_consistency(plan) == []
+
+    def test_invalid_meta_release_value(self):
+        plan = _make_plan(meta_release="Winter99")
+        findings = _check_track_consistency(plan)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "Winter99" in findings[0]["message"]
+
+    def test_valid_meta_release_values(self):
+        for value in ALLOWED_META_RELEASES:
+            plan = _make_plan(meta_release=value)
+            assert _check_track_consistency(plan) == [], f"Failed for {value}"
+
+
+# ---------------------------------------------------------------------------
+# TestCheckReleaseTypeConsistency
+# ---------------------------------------------------------------------------
+
+
+class TestCheckReleaseTypeConsistency:
+    def test_none_no_constraints(self):
+        plan = _make_plan(
+            target_release_type="none",
+            apis=[{"api_name": "qod", "target_api_status": "draft"}],
+        )
+        assert _check_release_type_consistency(plan) == []
+
+    def test_alpha_with_draft_error(self):
+        plan = _make_plan(
+            target_release_type="pre-release-alpha",
+            apis=[{"api_name": "qod", "target_api_status": "draft"}],
+        )
+        findings = _check_release_type_consistency(plan)
+        assert len(findings) == 1
+        assert "draft" in findings[0]["message"]
+
+    def test_alpha_with_alpha_ok(self):
+        plan = _make_plan(
+            target_release_type="pre-release-alpha",
+            apis=[{"api_name": "qod", "target_api_status": "alpha"}],
+        )
+        assert _check_release_type_consistency(plan) == []
+
+    def test_rc_with_alpha_error(self):
+        plan = _make_plan(
+            target_release_type="pre-release-rc",
+            apis=[{"api_name": "qod", "target_api_status": "alpha"}],
+        )
+        findings = _check_release_type_consistency(plan)
+        assert len(findings) == 1
+
+    def test_rc_with_rc_ok(self):
+        plan = _make_plan(
+            target_release_type="pre-release-rc",
+            apis=[{"api_name": "qod", "target_api_status": "rc"}],
+        )
+        assert _check_release_type_consistency(plan) == []
+
+    def test_public_with_rc_error(self):
+        plan = _make_plan(
+            target_release_type="public-release",
+            apis=[{"api_name": "qod", "target_api_status": "rc"}],
+        )
+        findings = _check_release_type_consistency(plan)
+        assert len(findings) == 1
+
+    def test_public_with_public_ok(self):
+        plan = _make_plan(
+            target_release_type="public-release",
+            apis=[{"api_name": "qod", "target_api_status": "public"}],
+        )
+        assert _check_release_type_consistency(plan) == []
+
+    def test_maintenance_with_non_public_error(self):
+        plan = _make_plan(
+            target_release_type="maintenance-release",
+            apis=[{"api_name": "qod", "target_api_status": "alpha"}],
+        )
+        findings = _check_release_type_consistency(plan)
+        assert len(findings) == 1
+
+    def test_multiple_apis_some_invalid(self):
+        plan = _make_plan(
+            target_release_type="public-release",
+            apis=[
+                {"api_name": "good", "target_api_status": "public"},
+                {"api_name": "bad", "target_api_status": "rc"},
+            ],
+        )
+        findings = _check_release_type_consistency(plan)
+        assert len(findings) == 1
+        assert "bad" in findings[0]["message"]
+        assert "good" not in findings[0]["message"]
+
+
+# ---------------------------------------------------------------------------
+# TestCheckFileExistence
+# ---------------------------------------------------------------------------
+
+
+class TestCheckFileExistence:
+    def test_file_exists(self, tmp_path: Path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").touch()
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "public"}])
+        assert _check_file_existence(plan, tmp_path) == []
+
+    def test_public_missing_file_error(self, tmp_path: Path):
+        (tmp_path / "code" / "API_definitions").mkdir(parents=True)
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "public"}])
+        findings = _check_file_existence(plan, tmp_path)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    def test_draft_missing_with_orphans_warn(self, tmp_path: Path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "quality-on-demand.yaml").touch()  # orphan
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "draft"}])
+        findings = _check_file_existence(plan, tmp_path)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert "quality-on-demand" in findings[0]["message"]
+
+    def test_draft_missing_without_orphans(self, tmp_path: Path):
+        (tmp_path / "code" / "API_definitions").mkdir(parents=True)
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "draft"}])
+        assert _check_file_existence(plan, tmp_path) == []
+
+
+# ---------------------------------------------------------------------------
+# TestCheckReleasePlanSemantics (integration)
+# ---------------------------------------------------------------------------
+
+
+class TestCheckReleasePlanSemantics:
+    def test_no_release_plan(self, tmp_path: Path):
+        ctx = _make_context()
+        assert check_release_plan_semantics(tmp_path, ctx) == []
+
+    def test_valid_plan(self, tmp_path: Path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").touch()
+        _write_release_plan(tmp_path, _make_plan())
+        ctx = _make_context()
+        assert check_release_plan_semantics(tmp_path, ctx) == []
+
+    def test_collects_all_findings(self, tmp_path: Path):
+        """Multiple issues are collected from all sub-checks."""
+        plan = _make_plan(
+            release_track="meta-release",
+            meta_release=None,
+            target_release_type="public-release",
+            apis=[{"api_name": "qod", "target_api_status": "draft"}],
+        )
+        _write_release_plan(tmp_path, plan)
+        ctx = _make_context()
+        findings = check_release_plan_semantics(tmp_path, ctx)
+        # meta_release missing (track) + draft in public-release (type) = 2
+        assert len(findings) >= 2
diff --git a/validation/tests/test_python_checks_release_review.py b/validation/tests/test_python_checks_release_review.py
new file mode 100644
index 00000000..70f808c0
--- /dev/null
+++ b/validation/tests/test_python_checks_release_review.py
@@ -0,0 +1,127 @@
+"""Unit tests for validation.engines.python_checks.release_review_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+from unittest.mock import patch
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.release_review_checks import (
+    _is_allowed,
+    check_release_review_file_restriction,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(is_release_review: bool = True) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="release",
+        trigger_type="pr",
+        profile="strict",
+        stage="standard",
+        target_release_type="public-release",
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=is_release_review,
+        release_plan_changed=None,
+        pr_number=42,
+        apis=(),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+# ---------------------------------------------------------------------------
+# TestIsAllowed
+# ---------------------------------------------------------------------------
+
+
+class TestIsAllowed:
+    def test_changelog_md(self):
+        assert _is_allowed("CHANGELOG.md") is True
+
+    def test_readme_md(self):
+        assert _is_allowed("README.md") is True
+
+    def test_changelog_dir_file(self):
+        assert _is_allowed("CHANGELOG/r1.0.md") is True
+
+    def test_api_spec_rejected(self):
+        assert _is_allowed("code/API_definitions/qod.yaml") is False
+
+    def test_release_plan_rejected(self):
+        assert _is_allowed("release-plan.yaml") is False
+
+    def test_workflow_rejected(self):
+        assert _is_allowed(".github/workflows/pr_validation.yml") is False
+
+
+# ---------------------------------------------------------------------------
+# TestCheckReleaseReviewFileRestriction
+# ---------------------------------------------------------------------------
+
+
+class TestCheckReleaseReviewFileRestriction:
+    def test_not_release_review_skip(self, tmp_path: Path):
+        ctx = _make_context(is_release_review=False)
+        assert check_release_review_file_restriction(tmp_path, ctx) == []
+
+    @patch(
+        "validation.engines.python_checks.release_review_checks._get_changed_files"
+    )
+    def test_allowed_files_only(self, mock_changed, tmp_path: Path):
+        mock_changed.return_value = ["CHANGELOG.md", "README.md"]
+        ctx = _make_context()
+        assert check_release_review_file_restriction(tmp_path, ctx) == []
+
+    @patch(
+        "validation.engines.python_checks.release_review_checks._get_changed_files"
+    )
+    def test_disallowed_file(self, mock_changed, tmp_path: Path):
+        mock_changed.return_value = [
+            "CHANGELOG.md",
+            "code/API_definitions/qod.yaml",
+        ]
+        ctx = _make_context()
+        findings = check_release_review_file_restriction(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "qod.yaml" in findings[0]["message"]
+
+    @patch(
+        "validation.engines.python_checks.release_review_checks._get_changed_files"
+    )
+    def test_changelog_directory_allowed(self, mock_changed, tmp_path: Path):
+        mock_changed.return_value = [
+            "CHANGELOG/r1.0.md",
+            "CHANGELOG/r1.1.md",
+        ]
+        ctx = _make_context()
+        assert check_release_review_file_restriction(tmp_path, ctx) == []
+
+    @patch(
+        "validation.engines.python_checks.release_review_checks._get_changed_files"
+    )
+    def test_multiple_disallowed(self, mock_changed, tmp_path: Path):
+        mock_changed.return_value = [
+            "release-plan.yaml",
+            "code/API_definitions/qod.yaml",
+        ]
+        ctx = _make_context()
+        findings = check_release_review_file_restriction(tmp_path, ctx)
+        assert len(findings) == 2
+
+    @patch(
+        "validation.engines.python_checks.release_review_checks._get_changed_files"
+    )
+    def test_no_changed_files(self, mock_changed, tmp_path: Path):
+        mock_changed.return_value = []
+        ctx = _make_context()
+        assert check_release_review_file_restriction(tmp_path, ctx) == []
diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py
new file mode 100644
index 00000000..35de0174
--- /dev/null
+++ b/validation/tests/test_python_checks_test.py
@@ -0,0 +1,187 @@
+"""Unit tests for validation.engines.python_checks.test_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.test_checks import (
+    check_test_directory_exists,
+    check_test_file_version,
+    check_test_files_exist,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    api_name: str = "quality-on-demand",
+    version: str = "1.0.0",
+    apis: tuple[ApiContext, ...] | None = None,
+) -> ValidationContext:
+    if apis is None:
+        api = ApiContext(
+            api_name=api_name,
+            target_api_version=version,
+            target_api_status="public",
+            target_api_maturity="stable",
+            api_pattern="request-response",
+            spec_file=f"code/API_definitions/{api_name}.yaml",
+        )
+        apis = (api,)
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=apis,
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_test_dir(tmp_path: Path) -> Path:
+    test_dir = tmp_path / "code" / "Test_definitions"
+    test_dir.mkdir(parents=True)
+    return test_dir
+
+
+# ---------------------------------------------------------------------------
+# TestCheckTestDirectoryExists
+# ---------------------------------------------------------------------------
+
+
+class TestCheckTestDirectoryExists:
+    def test_directory_present(self, tmp_path: Path):
+        _make_test_dir(tmp_path)
+        ctx = _make_context()
+        assert check_test_directory_exists(tmp_path, ctx) == []
+
+    def test_directory_absent(self, tmp_path: Path):
+        ctx = _make_context()
+        findings = check_test_directory_exists(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert findings[0]["engine_rule"] == "check-test-directory-exists"
+
+    def test_no_apis_skip(self, tmp_path: Path):
+        ctx = _make_context(apis=())
+        assert check_test_directory_exists(tmp_path, ctx) == []
+
+
+# ---------------------------------------------------------------------------
+# TestCheckTestFilesExist
+# ---------------------------------------------------------------------------
+
+
+class TestCheckTestFilesExist:
+    def test_exact_match(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "quality-on-demand.v1.feature").touch()
+        ctx = _make_context("quality-on-demand")
+        assert check_test_files_exist(tmp_path, ctx) == []
+
+    def test_prefix_match(self, tmp_path: Path):
+        """operation-specific test file: api-name-operationId.feature"""
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "quality-on-demand-createSession.v1.feature").touch()
+        ctx = _make_context("quality-on-demand")
+        assert check_test_files_exist(tmp_path, ctx) == []
+
+    def test_no_matching_file(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "other-api.v1.feature").touch()
+        ctx = _make_context("quality-on-demand")
+        findings = check_test_files_exist(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "quality-on-demand" in findings[0]["message"]
+
+    def test_no_test_directory(self, tmp_path: Path):
+        """No test dir => skip (directory check reports it)."""
+        ctx = _make_context("quality-on-demand")
+        assert check_test_files_exist(tmp_path, ctx) == []
+
+    def test_non_feature_file_ignored(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "quality-on-demand.yaml").touch()
+        ctx = _make_context("quality-on-demand")
+        findings = check_test_files_exist(tmp_path, ctx)
+        assert len(findings) == 1
+
+
+# ---------------------------------------------------------------------------
+# TestCheckTestFileVersion
+# ---------------------------------------------------------------------------
+
+
+class TestCheckTestFileVersion:
+    def test_matching_version(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.v1.feature").touch()
+        ctx = _make_context("qod", version="1.0.0")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_matching_initial_version(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.v0.3.feature").touch()
+        ctx = _make_context("qod", version="0.3.0")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_matching_wip_version(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.vwip.feature").touch()
+        ctx = _make_context("qod", version="wip")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_matching_alpha_version(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.v0.2alpha2.feature").touch()
+        ctx = _make_context("qod", version="0.2.0-alpha.2")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_mismatched_version(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.v2.feature").touch()
+        ctx = _make_context("qod", version="1.0.0")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "v2" in findings[0]["message"]
+        assert "v1" in findings[0]["message"]
+
+    def test_no_version_suffix(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.feature").touch()
+        ctx = _make_context("qod", version="1.0.0")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "no version suffix" in findings[0]["message"]
+
+    def test_no_test_dir(self, tmp_path: Path):
+        ctx = _make_context("qod")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_no_matching_files(self, tmp_path: Path):
+        """No test files for this API => skip (other check reports it)."""
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "other-api.v1.feature").touch()
+        ctx = _make_context("qod")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_operation_specific_file(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod-createSession.v1.feature").touch()
+        ctx = _make_context("qod", version="1.0.0")
+        assert check_test_file_version(tmp_path, ctx) == []
diff --git a/validation/tests/test_python_checks_version.py b/validation/tests/test_python_checks_version.py
new file mode 100644
index 00000000..3ff7bacd
--- /dev/null
+++ b/validation/tests/test_python_checks_version.py
@@ -0,0 +1,346 @@
+"""Unit tests for validation.engines.python_checks.version_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.version_checks import (
+    build_version_segment,
+    check_info_version_format,
+    check_server_url_api_name,
+    check_server_url_version,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    api_name: str = "quality-on-demand",
+    branch_type: str = "main",
+    version: str = "1.0.0",
+) -> ValidationContext:
+    api = ApiContext(
+        api_name=api_name,
+        target_api_version=version,
+        target_api_status="public",
+        target_api_maturity="stable",
+        api_pattern="request-response",
+        spec_file=f"code/API_definitions/{api_name}.yaml",
+    )
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(api,),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _write_spec(
+    tmp_path: Path,
+    api_name: str,
+    info_version: str,
+    server_urls: list[str] | None = None,
+) -> None:
+    """Write a minimal OpenAPI spec file."""
+    spec: dict = {
+        "openapi": "3.0.3",
+        "info": {"title": api_name, "version": info_version},
+    }
+    if server_urls is not None:
+        spec["servers"] = [{"url": url} for url in server_urls]
+    api_dir = tmp_path / "code" / "API_definitions"
+    api_dir.mkdir(parents=True, exist_ok=True)
+    (api_dir / f"{api_name}.yaml").write_text(
+        yaml.dump(spec, default_flow_style=False)
+    )
+
+
+# ---------------------------------------------------------------------------
+# TestBuildVersionSegment
+# ---------------------------------------------------------------------------
+
+
+class TestBuildVersionSegment:
+    def test_wip(self):
+        assert build_version_segment("wip") == "vwip"
+
+    def test_stable_major(self):
+        assert build_version_segment("1.0.0") == "v1"
+
+    def test_stable_major_higher(self):
+        assert build_version_segment("2.1.0") == "v2"
+
+    def test_initial_minor(self):
+        assert build_version_segment("0.1.0") == "v0.1"
+
+    def test_initial_higher_minor(self):
+        assert build_version_segment("0.3.0") == "v0.3"
+
+    def test_alpha_pre_release(self):
+        assert build_version_segment("0.2.0-alpha.2") == "v0.2alpha2"
+
+    def test_rc_pre_release_initial(self):
+        assert build_version_segment("0.5.0-rc.1") == "v0.5rc1"
+
+    def test_rc_pre_release_stable(self):
+        assert build_version_segment("1.0.0-rc.1") == "v1rc1"
+
+    def test_alpha_stable(self):
+        assert build_version_segment("2.0.0-alpha.1") == "v2alpha1"
+
+    def test_invalid_returns_none(self):
+        assert build_version_segment("not-a-version") is None
+
+    def test_empty_returns_none(self):
+        assert build_version_segment("") is None
+
+
+# ---------------------------------------------------------------------------
+# TestCheckInfoVersionFormat
+# ---------------------------------------------------------------------------
+
+
+class TestCheckInfoVersionFormat:
+    def test_wip_on_main_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        ctx = _make_context("qod", branch_type="main")
+        assert check_info_version_format(tmp_path, ctx) == []
+
+    def test_semver_on_main_error(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "1.0.0")
+        ctx = _make_context("qod", branch_type="main")
+        findings = check_info_version_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "wip" in findings[0]["message"]
+
+    def test_semver_on_release_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "1.0.0")
+        ctx = _make_context("qod", branch_type="release")
+        assert check_info_version_format(tmp_path, ctx) == []
+
+    def test_wip_on_release_error(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        ctx = _make_context("qod", branch_type="release")
+        findings = check_info_version_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "must not be 'wip'" in findings[0]["message"]
+
+    def test_invalid_version_on_release_error(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "not-semver")
+        ctx = _make_context("qod", branch_type="release")
+        findings = check_info_version_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "not a valid semantic version" in findings[0]["message"]
+
+    def test_wip_on_maintenance_error(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        ctx = _make_context("qod", branch_type="maintenance")
+        findings = check_info_version_format(tmp_path, ctx)
+        assert len(findings) == 1
+
+    def test_feature_branch_no_constraint(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "anything-goes")
+        ctx = _make_context("qod", branch_type="feature")
+        assert check_info_version_format(tmp_path, ctx) == []
+
+    def test_missing_spec_file(self, tmp_path: Path):
+        """Missing spec file => empty (filename check reports this)."""
+        ctx = _make_context("qod", branch_type="main")
+        assert check_info_version_format(tmp_path, ctx) == []
+
+    def test_missing_info_version(self, tmp_path: Path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").write_text(
+            yaml.dump({"openapi": "3.0.3", "info": {"title": "test"}})
+        )
+        ctx = _make_context("qod", branch_type="main")
+        findings = check_info_version_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "missing" in findings[0]["message"]
+
+    def test_pre_release_on_release_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "0.2.0-alpha.2")
+        ctx = _make_context("qod", branch_type="release")
+        assert check_info_version_format(tmp_path, ctx) == []
+
+
+# ---------------------------------------------------------------------------
+# TestCheckServerUrlVersion
+# ---------------------------------------------------------------------------
+
+
+class TestCheckServerUrlVersion:
+    def test_matching_stable(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/v1"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_matching_initial(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "0.3.0",
+            server_urls=["{apiRoot}/qod/v0.3"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_matching_wip(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "wip",
+            server_urls=["{apiRoot}/qod/vwip"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_matching_alpha(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "0.2.0-alpha.2",
+            server_urls=["{apiRoot}/qod/v0.2alpha2"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_matching_rc(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0-rc.1",
+            server_urls=["{apiRoot}/qod/v1rc1"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_mismatch(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/v2"],
+        )
+        ctx = _make_context("qod")
+        findings = check_server_url_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "v2" in findings[0]["message"]
+        assert "v1" in findings[0]["message"]
+
+    def test_no_version_in_url(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["https://example.com/qod"],
+        )
+        ctx = _make_context("qod")
+        findings = check_server_url_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "no recognisable version" in findings[0]["message"]
+
+    def test_no_servers(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "1.0.0")
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_multiple_servers_one_bad(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/v1", "{apiRoot}/qod/v2"],
+        )
+        ctx = _make_context("qod")
+        findings = check_server_url_version(tmp_path, ctx)
+        assert len(findings) == 1  # Only the v2 mismatch
+
+    def test_missing_spec(self, tmp_path: Path):
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_case_insensitive_match(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/V1"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+    def test_trailing_slash(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/v1/"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_version(tmp_path, ctx) == []
+
+
+# ---------------------------------------------------------------------------
+# TestCheckServerUrlApiName
+# ---------------------------------------------------------------------------
+
+
+class TestCheckServerUrlApiName:
+    def test_matching(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "quality-on-demand", "1.0.0",
+            server_urls=["{apiRoot}/quality-on-demand/v1"],
+        )
+        ctx = _make_context("quality-on-demand")
+        assert check_server_url_api_name(tmp_path, ctx) == []
+
+    def test_mismatch(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "quality-on-demand", "1.0.0",
+            server_urls=["{apiRoot}/qod/v1"],
+        )
+        ctx = _make_context("quality-on-demand")
+        findings = check_server_url_api_name(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "qod" in findings[0]["message"]
+        assert "quality-on-demand" in findings[0]["message"]
+
+    def test_no_api_name_segment(self, tmp_path: Path):
+        """URL without recognisable api-name segment is silently skipped."""
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["https://example.com/v1"],
+        )
+        ctx = _make_context("qod")
+        # No api-name segment to extract — version check handles this.
+        assert check_server_url_api_name(tmp_path, ctx) == []
+
+    def test_missing_spec(self, tmp_path: Path):
+        ctx = _make_context("qod")
+        assert check_server_url_api_name(tmp_path, ctx) == []
+
+    def test_multiple_servers_all_matching(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/v1", "https://sandbox.example.com/qod/v1"],
+        )
+        ctx = _make_context("qod")
+        assert check_server_url_api_name(tmp_path, ctx) == []
+
+    def test_multiple_servers_one_mismatch(self, tmp_path: Path):
+        _write_spec(
+            tmp_path, "qod", "1.0.0",
+            server_urls=["{apiRoot}/qod/v1", "{apiRoot}/wrong-name/v1"],
+        )
+        ctx = _make_context("qod")
+        findings = check_server_url_api_name(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "wrong-name" in findings[0]["message"]

From 9a0f6469d8f826620cb0c427845aba55199c18cd Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 10:42:58 +0100
Subject: [PATCH 008/157] feat(validation): add post-filter engine for rule
 metadata and profile blocking

Implement the post-filter pipeline that sits between engine adapters
and the output stage. Processes raw findings through rule metadata
lookup, applicability evaluation, conditional severity resolution,
and profile-based blocking to produce pass/fail/error verdicts.

Modules:
- metadata_loader: YAML loading, frozen dataclasses, (engine, engine_rule) index
- condition_evaluator: AND/OR condition logic, version range comparison
- level_resolver: first-match-wins overrides, advisory/standard/strict profiles
- engine: orchestration entry point (run_post_filter -> PostFilterResult)

Handles empty rules directory gracefully (all findings pass through).
114 new tests, 407 total passing.
---
 validation/postfilter/__init__.py             |  12 +-
 validation/postfilter/condition_evaluator.py  | 193 +++++++
 validation/postfilter/engine.py               | 246 +++++++++
 validation/postfilter/level_resolver.py       |  82 +++
 validation/postfilter/metadata_loader.py      | 231 +++++++++
 .../tests/test_postfilter_conditions.py       | 297 +++++++++++
 validation/tests/test_postfilter_engine.py    | 473 ++++++++++++++++++
 validation/tests/test_postfilter_levels.py    | 211 ++++++++
 validation/tests/test_postfilter_metadata.py  | 270 ++++++++++
 9 files changed, 2012 insertions(+), 3 deletions(-)
 create mode 100644 validation/postfilter/condition_evaluator.py
 create mode 100644 validation/postfilter/engine.py
 create mode 100644 validation/postfilter/level_resolver.py
 create mode 100644 validation/postfilter/metadata_loader.py
 create mode 100644 validation/tests/test_postfilter_conditions.py
 create mode 100644 validation/tests/test_postfilter_engine.py
 create mode 100644 validation/tests/test_postfilter_levels.py
 create mode 100644 validation/tests/test_postfilter_metadata.py

diff --git a/validation/postfilter/__init__.py b/validation/postfilter/__init__.py
index 0cba252a..889458dc 100644
--- a/validation/postfilter/__init__.py
+++ b/validation/postfilter/__init__.py
@@ -1,3 +1,9 @@
-# Post-filter pipeline.
-# Applies rule metadata lookup, applicability evaluation, conditional
-# severity resolution, and profile-based blocking decisions.
+"""Post-filter pipeline.
+
+Applies rule metadata lookup, applicability evaluation, conditional
+severity resolution, and profile-based blocking decisions.
+"""
+
+from .engine import PostFilterResult, run_post_filter
+
+__all__ = ["PostFilterResult", "run_post_filter"]
diff --git a/validation/postfilter/condition_evaluator.py b/validation/postfilter/condition_evaluator.py
new file mode 100644
index 00000000..d96386a9
--- /dev/null
+++ b/validation/postfilter/condition_evaluator.py
@@ -0,0 +1,193 @@
+"""Condition evaluation for applicability and conditional-level overrides.
+
+Pure functions that evaluate condition dicts against ``ValidationContext``
+and optional ``ApiContext``.  No I/O, no external dependencies.
+
+Design doc references:
+  - Section 8.4: applicability evaluation (AND across fields, OR within arrays)
+  - Section 8.4.1: condition field vocabulary
+"""
+
+from __future__ import annotations
+
+import logging
+import re
+from typing import Optional, Tuple
+
+from validation.context import ApiContext, ValidationContext
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Version range parsing
+# ---------------------------------------------------------------------------
+
+_RANGE_RE = re.compile(r"^(>=|<=|!=|==|>|<)\s*(.+)$")
+
+# Fields resolved from ApiContext rather than ValidationContext
+_API_CONTEXT_FIELDS = frozenset(
+    {"target_api_status", "target_api_maturity", "api_pattern"}
+)
+
+
+def parse_version_tuple(version_str: str) -> Tuple[int, ...]:
+    """Parse a Commonalities version string into a comparable tuple.
+
+    Strips a leading ``r`` or ``R`` prefix and splits on ``.``.
+
+    Examples:
+        >>> parse_version_tuple("r3.4")
+        (3, 4)
+        >>> parse_version_tuple("r4.1")
+        (4, 1)
+        >>> parse_version_tuple("4.1")
+        (4, 1)
+
+    Returns ``(0,)`` on parse failure.
+    """
+    s = version_str.lstrip("rR")
+    try:
+        return tuple(int(part) for part in s.split("."))
+    except (ValueError, AttributeError):
+        return (0,)
+
+
+def evaluate_version_range(
+    range_expr: str,
+    actual_version: Optional[str],
+) -> bool:
+    """Evaluate a range expression against a concrete version string.
+
+    Supports operators ``>=``, ``>``, ``<=``, ``<``, ``==``, ``!=``.
+
+    Args:
+        range_expr: Expression like ``">=r3.4"`` or ``"=":
+        return actual >= expected
+    if operator == ">":
+        return actual > expected
+    if operator == "<=":
+        return actual <= expected
+    if operator == "<":
+        return actual < expected
+    if operator == "==":
+        return actual == expected
+    if operator == "!=":
+        return actual != expected
+
+    return False  # pragma: no cover
+
+
+# ---------------------------------------------------------------------------
+# Condition evaluation
+# ---------------------------------------------------------------------------
+
+
+def evaluate_condition(
+    condition: dict,
+    context: ValidationContext,
+    api_context: Optional[ApiContext],
+) -> bool:
+    """Evaluate a condition dict against context.
+
+    All present fields must match (AND logic).  Array fields use OR
+    logic (the context value must be contained in the array).
+
+    Per-API fields (``target_api_status``, ``target_api_maturity``,
+    ``api_pattern``) are resolved from *api_context*.  When *api_context*
+    is ``None`` (repo-level finding), these fields are treated as
+    unconstrained (always match).
+
+    Args:
+        condition: Dict of field → value from rule metadata.
+        context: Repository-level validation context.
+        api_context: Per-API context, or ``None`` for repo-level findings.
+
+    Returns:
+        ``True`` if all conditions match, ``False`` otherwise.
+    """
+    for field, expected in condition.items():
+        if not _evaluate_single_field(field, expected, context, api_context):
+            return False
+    return True
+
+
+def _evaluate_single_field(
+    field: str,
+    expected: object,
+    context: ValidationContext,
+    api_context: Optional[ApiContext],
+) -> bool:
+    """Evaluate one condition field against the context."""
+
+    # --- Per-API array fields ---
+    if field in _API_CONTEXT_FIELDS:
+        if api_context is None:
+            # Repo-level finding — per-API conditions are unconstrained
+            return True
+        actual = getattr(api_context, field, None)
+        if not isinstance(expected, list):
+            return actual == expected
+        return actual in expected
+
+    # --- commonalities_release: range expression ---
+    if field == "commonalities_release":
+        return evaluate_version_range(str(expected), context.commonalities_release)
+
+    # --- Boolean fields ---
+    if field in ("is_release_review_pr", "release_plan_changed"):
+        actual = getattr(context, field, None)
+        return actual == expected
+
+    # --- Repository-level array fields ---
+    # branch_types, trigger_types, target_release_type
+    _FIELD_TO_ATTR = {
+        "branch_types": "branch_type",
+        "trigger_types": "trigger_type",
+        "target_release_type": "target_release_type",
+    }
+    attr_name = _FIELD_TO_ATTR.get(field)
+    if attr_name is not None:
+        actual = getattr(context, attr_name, None)
+        if not isinstance(expected, list):
+            return actual == expected
+        return actual in expected
+
+    # Unknown field — treat as non-matching to be safe
+    logger.warning("Unknown condition field: %r", field)
+    return False
+
+
+def is_applicable(
+    applicability: dict,
+    context: ValidationContext,
+    api_context: Optional[ApiContext],
+) -> bool:
+    """Check whether a rule is applicable in the current context.
+
+    Returns ``True`` if *applicability* is empty (unconstrained) or all
+    conditions match.
+    """
+    if not applicability:
+        return True
+    return evaluate_condition(applicability, context, api_context)
diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
new file mode 100644
index 00000000..3a054d46
--- /dev/null
+++ b/validation/postfilter/engine.py
@@ -0,0 +1,246 @@
+"""Post-filter engine — main orchestration entry point.
+
+Processes raw engine findings through rule metadata lookup, applicability
+evaluation, conditional severity resolution, and profile-based blocking
+to produce a structured result with an overall pass/fail/error verdict.
+
+Design doc references:
+  - Section 8.4: post-filter pipeline
+  - Section 8.1 step 8: post-filter in end-to-end flow
+  - Section 2.1: overall result computation
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+from typing import List, Optional
+
+from validation.context import ApiContext, ValidationContext
+
+from .condition_evaluator import is_applicable
+from .level_resolver import apply_profile_blocking, resolve_level
+from .metadata_loader import RuleMetadata, build_lookup_index, load_all_rules
+
+logger = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Result type
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class PostFilterResult:
+    """Result of post-filter processing.
+
+    Attributes:
+        findings: Processed findings with resolved level, hint, blocks.
+        result: Overall verdict — ``"pass"``, ``"fail"``, or ``"error"``.
+        summary: Human-readable one-line summary.
+    """
+
+    findings: List[dict]
+    result: str
+    summary: str
+
+
+# ---------------------------------------------------------------------------
+# Internal helpers
+# ---------------------------------------------------------------------------
+
+
+def _is_engine_error_finding(finding: dict) -> bool:
+    """Check if a finding represents an engine execution error.
+
+    Engine adapters emit these with ``engine_rule`` ending in
+    ``-execution-error`` (e.g. ``spectral-execution-error``).
+    """
+    return finding.get("engine_rule", "").endswith("-execution-error")
+
+
+def _resolve_api_context(
+    finding: dict,
+    context: ValidationContext,
+) -> Optional[ApiContext]:
+    """Look up the ``ApiContext`` for a finding by matching ``api_name``.
+
+    Returns ``None`` for repo-level findings or when no matching API
+    exists in the context.
+    """
+    api_name = finding.get("api_name")
+    if not api_name:
+        return None
+    for api in context.apis:
+        if api.api_name == api_name:
+            return api
+    return None
+
+
+def _enrich_finding(
+    finding: dict,
+    rule: RuleMetadata,
+    resolved_level: str,
+) -> dict:
+    """Create an enriched copy of a finding with metadata applied."""
+    enriched = dict(finding)
+    enriched["rule_id"] = rule.id
+    enriched["level"] = resolved_level
+    enriched["hint"] = rule.hint
+    return enriched
+
+
+def _passthrough_finding(finding: dict) -> dict:
+    """Create a pass-through copy: keep engine level, hint = message."""
+    enriched = dict(finding)
+    if "hint" not in enriched or enriched.get("hint") is None:
+        enriched["hint"] = finding.get("message", "")
+    return enriched
+
+
+def compute_overall_result(
+    findings: List[dict],
+    had_engine_error: bool,
+) -> str:
+    """Compute the overall result from processed findings.
+
+    Priority: ``"error"`` > ``"fail"`` > ``"pass"``.
+
+    Args:
+        findings: Post-filtered findings with ``blocks`` field set.
+        had_engine_error: Whether any engine execution error occurred.
+
+    Returns:
+        ``"error"`` if evaluation was incomplete (engine failure),
+        ``"fail"`` if any finding has ``blocks=True``,
+        ``"pass"`` otherwise.
+    """
+    if had_engine_error:
+        return "error"
+    if any(f.get("blocks") for f in findings):
+        return "fail"
+    return "pass"
+
+
+def _build_summary(result: str, findings: List[dict]) -> str:
+    """Build a human-readable one-line summary."""
+    total = len(findings)
+    blocking = sum(1 for f in findings if f.get("blocks"))
+    errors = sum(1 for f in findings if f.get("level") == "error")
+    warnings = sum(1 for f in findings if f.get("level") == "warn")
+    hints = sum(1 for f in findings if f.get("level") == "hint")
+
+    if result == "error":
+        return (
+            f"Incomplete evaluation: {total} findings "
+            f"({errors} errors, {warnings} warnings, {hints} hints)"
+        )
+    if result == "fail":
+        return (
+            f"Failed: {blocking} blocking out of {total} findings "
+            f"({errors} errors, {warnings} warnings, {hints} hints)"
+        )
+    if total == 0:
+        return "Passed: no findings"
+    return (
+        f"Passed: {total} findings "
+        f"({errors} errors, {warnings} warnings, {hints} hints)"
+    )
+
+
+# ---------------------------------------------------------------------------
+# Main entry point
+# ---------------------------------------------------------------------------
+
+
+def run_post_filter(
+    findings: List[dict],
+    context: ValidationContext,
+    rules_dir: Path,
+) -> PostFilterResult:
+    """Process all findings through the post-filter pipeline.
+
+    Algorithm per finding:
+
+    1. Engine execution errors pass through unchanged, flag
+       ``had_engine_error``.
+    2. Look up ``(engine, engine_rule)`` in the metadata index.
+    3. **Mapped rule**: evaluate applicability (remove if not applicable),
+       resolve conditional level (remove if ``"off"``), enrich with
+       ``rule_id``, ``hint``, and adjusted ``level``.
+    4. **Unmapped rule** (pass-through): keep engine severity, set
+       ``hint = message``.
+    5. Apply profile blocking to all surviving findings.
+    6. Compute overall result.
+
+    Args:
+        findings: Raw findings from all engine adapters.
+        context: Unified validation context.
+        rules_dir: Path to the ``validation/rules/`` directory.
+
+    Returns:
+        :class:`PostFilterResult` with processed findings and verdict.
+    """
+    # Load rule metadata and build lookup index
+    all_rules = load_all_rules(rules_dir)
+    index = build_lookup_index(all_rules)
+
+    logger.info(
+        "Post-filter: %d findings, %d rules loaded, profile=%s",
+        len(findings),
+        len(all_rules),
+        context.profile,
+    )
+
+    processed: list[dict] = []
+    had_engine_error = False
+
+    for finding in findings:
+        # Step 1: Engine execution errors pass through
+        if _is_engine_error_finding(finding):
+            had_engine_error = True
+            enriched = _passthrough_finding(finding)
+            enriched["blocks"] = True
+            processed.append(enriched)
+            continue
+
+        # Step 2: Metadata lookup
+        key = (finding.get("engine", ""), finding.get("engine_rule", ""))
+        rule = index.get(key)
+
+        if rule is not None:
+            # Step 3: Mapped rule
+            api_ctx = _resolve_api_context(finding, context)
+
+            # Applicability check — remove if not applicable
+            if not is_applicable(rule.applicability, context, api_ctx):
+                continue
+
+            # Conditional level resolution
+            resolved_level = resolve_level(rule, context, api_ctx)
+            if resolved_level == "off":
+                continue
+
+            enriched = _enrich_finding(finding, rule, resolved_level)
+        else:
+            # Step 4: Unmapped rule — pass-through
+            enriched = _passthrough_finding(finding)
+
+        # Step 5: Profile blocking
+        enriched["blocks"] = apply_profile_blocking(
+            enriched["level"], context.profile
+        )
+        processed.append(enriched)
+
+    # Step 6: Overall result
+    result = compute_overall_result(processed, had_engine_error)
+    summary = _build_summary(result, processed)
+
+    logger.info("Post-filter result: %s — %s", result, summary)
+
+    return PostFilterResult(
+        findings=processed,
+        result=result,
+        summary=summary,
+    )
diff --git a/validation/postfilter/level_resolver.py b/validation/postfilter/level_resolver.py
new file mode 100644
index 00000000..947a1676
--- /dev/null
+++ b/validation/postfilter/level_resolver.py
@@ -0,0 +1,82 @@
+"""Conditional level resolution and profile-based blocking.
+
+Pure functions that resolve the effective severity for a finding and
+determine whether it blocks under the active profile.
+
+Design doc references:
+  - Section 8.4: conditional level overrides (first match wins)
+  - Section 2.1: validation profiles (advisory / standard / strict)
+"""
+
+from __future__ import annotations
+
+from typing import Optional
+
+from validation.context import ApiContext, ValidationContext
+from validation.context.context_builder import (
+    PROFILE_ADVISORY,
+    PROFILE_STANDARD,
+    PROFILE_STRICT,
+)
+
+from .condition_evaluator import evaluate_condition
+from .metadata_loader import RuleMetadata
+
+
+# ---------------------------------------------------------------------------
+# Level resolution
+# ---------------------------------------------------------------------------
+
+
+def resolve_level(
+    rule: RuleMetadata,
+    context: ValidationContext,
+    api_context: Optional[ApiContext],
+) -> str:
+    """Resolve the effective severity level for a finding.
+
+    Walks ``conditional_level.overrides`` in declaration order.  The
+    first override whose condition matches the context wins.  If no
+    override matches, the default level is returned.
+
+    Args:
+        rule: Rule metadata containing the conditional level spec.
+        context: Repository-level validation context.
+        api_context: Per-API context, or ``None`` for repo-level findings.
+
+    Returns:
+        Resolved level: ``"error"``, ``"warn"``, ``"hint"``, or ``"off"``.
+    """
+    for override in rule.conditional_level.overrides:
+        if evaluate_condition(override.condition, context, api_context):
+            return override.level
+    return rule.conditional_level.default
+
+
+# ---------------------------------------------------------------------------
+# Profile blocking
+# ---------------------------------------------------------------------------
+
+
+def apply_profile_blocking(level: str, profile: str) -> bool:
+    """Determine whether a finding at *level* blocks under *profile*.
+
+    Profile semantics:
+        - **advisory**: nothing blocks (always ``False``)
+        - **standard**: errors block
+        - **strict**: errors and warnings block
+
+    Args:
+        level: Resolved finding level (``"error"``, ``"warn"``, ``"hint"``).
+        profile: Active validation profile.
+
+    Returns:
+        ``True`` if the finding should block.
+    """
+    if profile == PROFILE_ADVISORY:
+        return False
+    if profile == PROFILE_STANDARD:
+        return level == "error"
+    if profile == PROFILE_STRICT:
+        return level in ("error", "warn")
+    return False
diff --git a/validation/postfilter/metadata_loader.py b/validation/postfilter/metadata_loader.py
new file mode 100644
index 00000000..64fa2ec8
--- /dev/null
+++ b/validation/postfilter/metadata_loader.py
@@ -0,0 +1,231 @@
+"""Rule metadata loading and lookup index.
+
+Loads rule metadata YAML files from the ``validation/rules/`` directory,
+parses them into frozen dataclasses, and builds a lookup index keyed by
+``(engine, engine_rule)`` for O(1) finding-to-metadata resolution.
+
+Design doc references:
+  - Section 1.1: rule metadata model
+  - Section 8.4.1: rule metadata lookup
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+import yaml
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Required fields in a rule metadata entry
+# ---------------------------------------------------------------------------
+
+_REQUIRED_FIELDS = ("id", "name", "engine", "engine_rule", "hint", "conditional_level")
+
+
+# ---------------------------------------------------------------------------
+# Dataclasses
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class ConditionalOverride:
+    """One condition/level pair within ``conditional_level.overrides``.
+
+    Attributes:
+        condition: Condition dict using the same field vocabulary as
+            applicability (AND across fields, OR within arrays).
+        level: Severity when this override matches — ``error``, ``warn``,
+            ``hint``, or ``off``.
+    """
+
+    condition: dict
+    level: str
+
+
+@dataclass(frozen=True)
+class ConditionalLevel:
+    """Conditional severity specification for a rule.
+
+    Attributes:
+        default: Base severity when no override matches.
+        overrides: Ordered list of overrides — first match wins.
+    """
+
+    default: str
+    overrides: Tuple[ConditionalOverride, ...]
+
+
+@dataclass(frozen=True)
+class RuleMetadata:
+    """Framework-level metadata for a single validation rule.
+
+    Attributes:
+        id: Stable ID with engine prefix (e.g. ``"S-042"``).
+        name: Human-readable kebab-case name.
+        engine: Engine responsible for producing the finding.
+        engine_rule: Native rule identifier within the engine.
+        hint: Actionable fix guidance shown to developers.
+        applicability: Condition dict — omitted fields are unconstrained.
+        conditional_level: Severity specification with optional overrides.
+    """
+
+    id: str
+    name: str
+    engine: str
+    engine_rule: str
+    hint: str
+    applicability: dict
+    conditional_level: ConditionalLevel
+
+
+# ---------------------------------------------------------------------------
+# Parsing
+# ---------------------------------------------------------------------------
+
+
+def _parse_conditional_level(raw: object) -> ConditionalLevel:
+    """Parse the ``conditional_level`` block from raw YAML data.
+
+    Raises:
+        ValueError: If ``default`` is missing or data is malformed.
+    """
+    if not isinstance(raw, dict):
+        raise ValueError("conditional_level must be a mapping")
+    if "default" not in raw:
+        raise ValueError("conditional_level.default is required")
+
+    overrides: list[ConditionalOverride] = []
+    for entry in raw.get("overrides", []):
+        if not isinstance(entry, dict):
+            continue
+        overrides.append(
+            ConditionalOverride(
+                condition=entry.get("condition", {}),
+                level=entry["level"],
+            )
+        )
+
+    return ConditionalLevel(
+        default=raw["default"],
+        overrides=tuple(overrides),
+    )
+
+
+def parse_rule_metadata(raw: dict) -> RuleMetadata:
+    """Parse a single rule metadata dict into a :class:`RuleMetadata`.
+
+    Args:
+        raw: Dict from YAML with keys matching ``rule-metadata-schema.yaml``.
+
+    Returns:
+        Parsed rule metadata.
+
+    Raises:
+        ValueError: If required fields are missing or malformed.
+    """
+    missing = [f for f in _REQUIRED_FIELDS if f not in raw]
+    if missing:
+        raise ValueError(f"Missing required fields: {', '.join(missing)}")
+
+    return RuleMetadata(
+        id=raw["id"],
+        name=raw["name"],
+        engine=raw["engine"],
+        engine_rule=raw["engine_rule"],
+        hint=raw["hint"],
+        applicability=raw.get("applicability", {}),
+        conditional_level=_parse_conditional_level(raw["conditional_level"]),
+    )
+
+
+# ---------------------------------------------------------------------------
+# File loading
+# ---------------------------------------------------------------------------
+
+
+def load_rules_from_file(file_path: Path) -> List[RuleMetadata]:
+    """Load a YAML file containing an array of rule metadata objects.
+
+    Returns an empty list if the file does not exist, is empty, or
+    contains malformed data.  Individual malformed entries are skipped
+    with a warning.
+    """
+    if not file_path.is_file():
+        return []
+
+    try:
+        data = yaml.safe_load(file_path.read_text(encoding="utf-8"))
+    except yaml.YAMLError as exc:
+        logger.warning("Failed to parse %s: %s", file_path, exc)
+        return []
+
+    if not isinstance(data, list):
+        logger.warning("Expected array in %s, got %s", file_path, type(data).__name__)
+        return []
+
+    rules: list[RuleMetadata] = []
+    for i, entry in enumerate(data):
+        if not isinstance(entry, dict):
+            logger.warning("Skipping non-dict entry at index %d in %s", i, file_path)
+            continue
+        try:
+            rules.append(parse_rule_metadata(entry))
+        except (ValueError, KeyError) as exc:
+            logger.warning(
+                "Skipping malformed rule at index %d in %s: %s", i, file_path, exc
+            )
+    return rules
+
+
+def load_all_rules(rules_dir: Path) -> List[RuleMetadata]:
+    """Load all ``*-rules.yaml`` files from *rules_dir*.
+
+    Returns an empty list if the directory does not exist or contains
+    no rule files.
+    """
+    if not rules_dir.is_dir():
+        logger.info("Rules directory does not exist: %s", rules_dir)
+        return []
+
+    files = sorted(rules_dir.glob("*-rules.yaml"))
+    if not files:
+        logger.info("No rule metadata files found in %s", rules_dir)
+        return []
+
+    all_rules: list[RuleMetadata] = []
+    for f in files:
+        all_rules.extend(load_rules_from_file(f))
+    return all_rules
+
+
+# ---------------------------------------------------------------------------
+# Lookup index
+# ---------------------------------------------------------------------------
+
+
+def build_lookup_index(
+    rules: List[RuleMetadata],
+) -> Dict[Tuple[str, str], RuleMetadata]:
+    """Build a dict keyed by ``(engine, engine_rule)`` for O(1) lookup.
+
+    On duplicate keys the first entry wins and a warning is logged.
+    """
+    index: dict[tuple[str, str], RuleMetadata] = {}
+    for rule in rules:
+        key = (rule.engine, rule.engine_rule)
+        if key in index:
+            logger.warning(
+                "Duplicate rule metadata for (%s, %s): keeping %s, ignoring %s",
+                rule.engine,
+                rule.engine_rule,
+                index[key].id,
+                rule.id,
+            )
+            continue
+        index[key] = rule
+    return index
diff --git a/validation/tests/test_postfilter_conditions.py b/validation/tests/test_postfilter_conditions.py
new file mode 100644
index 00000000..613dffc2
--- /dev/null
+++ b/validation/tests/test_postfilter_conditions.py
@@ -0,0 +1,297 @@
+"""Unit tests for validation.postfilter.condition_evaluator."""
+
+from __future__ import annotations
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.postfilter.condition_evaluator import (
+    evaluate_condition,
+    evaluate_version_range,
+    is_applicable,
+    parse_version_tuple,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    branch_type: str = "main",
+    trigger_type: str = "pr",
+    profile: str = "standard",
+    target_release_type: str | None = "public-release",
+    commonalities_release: str | None = "r4.1",
+    is_release_review_pr: bool = False,
+    release_plan_changed: bool | None = None,
+    apis: tuple[ApiContext, ...] = (),
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type=trigger_type,
+        profile=profile,
+        stage="standard",
+        target_release_type=target_release_type,
+        commonalities_release=commonalities_release,
+        icm_release=None,
+        is_release_review_pr=is_release_review_pr,
+        release_plan_changed=release_plan_changed,
+        pr_number=None,
+        apis=apis,
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_api(
+    api_name: str = "quality-on-demand",
+    target_api_status: str = "public",
+    target_api_maturity: str = "stable",
+    api_pattern: str = "request-response",
+) -> ApiContext:
+    return ApiContext(
+        api_name=api_name,
+        target_api_version="1.0.0",
+        target_api_status=target_api_status,
+        target_api_maturity=target_api_maturity,
+        api_pattern=api_pattern,
+        spec_file=f"code/API_definitions/{api_name}.yaml",
+    )
+
+
+# ---------------------------------------------------------------------------
+# TestParseVersionTuple
+# ---------------------------------------------------------------------------
+
+
+class TestParseVersionTuple:
+    def test_r_prefix(self):
+        assert parse_version_tuple("r3.4") == (3, 4)
+
+    def test_capital_r_prefix(self):
+        assert parse_version_tuple("R4.1") == (4, 1)
+
+    def test_no_prefix(self):
+        assert parse_version_tuple("4.1") == (4, 1)
+
+    def test_single_digit(self):
+        assert parse_version_tuple("r5") == (5,)
+
+    def test_three_parts(self):
+        assert parse_version_tuple("r4.1.2") == (4, 1, 2)
+
+    def test_malformed(self):
+        assert parse_version_tuple("abc") == (0,)
+
+    def test_empty(self):
+        assert parse_version_tuple("") == (0,)
+
+
+# ---------------------------------------------------------------------------
+# TestEvaluateVersionRange
+# ---------------------------------------------------------------------------
+
+
+class TestEvaluateVersionRange:
+    def test_gte_match(self):
+        assert evaluate_version_range(">=r3.4", "r4.1") is True
+
+    def test_gte_exact(self):
+        assert evaluate_version_range(">=r3.4", "r3.4") is True
+
+    def test_gte_below(self):
+        assert evaluate_version_range(">=r4.0", "r3.4") is False
+
+    def test_gt(self):
+        assert evaluate_version_range(">r3.4", "r3.5") is True
+        assert evaluate_version_range(">r3.4", "r3.4") is False
+
+    def test_lte(self):
+        assert evaluate_version_range("<=r4.0", "r3.4") is True
+        assert evaluate_version_range("<=r4.0", "r4.0") is True
+        assert evaluate_version_range("<=r4.0", "r4.1") is False
+
+    def test_lt(self):
+        assert evaluate_version_range("=r3.4", None) is False
+
+    def test_malformed_expression(self):
+        assert evaluate_version_range("r3.4", "r4.1") is False
+
+    def test_whitespace(self):
+        assert evaluate_version_range(">= r3.4", "r4.1") is True
+
+
+# ---------------------------------------------------------------------------
+# TestEvaluateCondition
+# ---------------------------------------------------------------------------
+
+
+class TestEvaluateCondition:
+    """Test evaluate_condition with individual and combined fields."""
+
+    # --- Array fields (repo-level) ---
+
+    def test_branch_types_match(self):
+        ctx = _make_context(branch_type="main")
+        assert evaluate_condition({"branch_types": ["main", "release"]}, ctx, None)
+
+    def test_branch_types_no_match(self):
+        ctx = _make_context(branch_type="feature")
+        assert not evaluate_condition({"branch_types": ["main", "release"]}, ctx, None)
+
+    def test_trigger_types_match(self):
+        ctx = _make_context(trigger_type="pr")
+        assert evaluate_condition({"trigger_types": ["pr", "dispatch"]}, ctx, None)
+
+    def test_trigger_types_no_match(self):
+        ctx = _make_context(trigger_type="local")
+        assert not evaluate_condition({"trigger_types": ["pr"]}, ctx, None)
+
+    def test_target_release_type_match(self):
+        ctx = _make_context(target_release_type="public-release")
+        assert evaluate_condition(
+            {"target_release_type": ["public-release", "pre-release-rc"]}, ctx, None
+        )
+
+    def test_target_release_type_none(self):
+        ctx = _make_context(target_release_type=None)
+        assert not evaluate_condition(
+            {"target_release_type": ["public-release"]}, ctx, None
+        )
+
+    # --- Per-API array fields ---
+
+    def test_target_api_status_match(self):
+        ctx = _make_context()
+        api = _make_api(target_api_status="public")
+        assert evaluate_condition(
+            {"target_api_status": ["public", "rc"]}, ctx, api
+        )
+
+    def test_target_api_status_no_match(self):
+        ctx = _make_context()
+        api = _make_api(target_api_status="draft")
+        assert not evaluate_condition(
+            {"target_api_status": ["public"]}, ctx, api
+        )
+
+    def test_target_api_maturity_match(self):
+        ctx = _make_context()
+        api = _make_api(target_api_maturity="stable")
+        assert evaluate_condition(
+            {"target_api_maturity": ["stable"]}, ctx, api
+        )
+
+    def test_api_pattern_match(self):
+        ctx = _make_context()
+        api = _make_api(api_pattern="implicit-subscription")
+        assert evaluate_condition(
+            {"api_pattern": ["implicit-subscription", "explicit-subscription"]},
+            ctx,
+            api,
+        )
+
+    def test_api_field_with_none_api_context(self):
+        """Per-API conditions are unconstrained when api_context is None."""
+        ctx = _make_context()
+        assert evaluate_condition(
+            {"target_api_status": ["public"]}, ctx, None
+        )
+
+    # --- Range field ---
+
+    def test_commonalities_release_match(self):
+        ctx = _make_context(commonalities_release="r4.1")
+        assert evaluate_condition(
+            {"commonalities_release": ">=r3.4"}, ctx, None
+        )
+
+    def test_commonalities_release_no_match(self):
+        ctx = _make_context(commonalities_release="r3.3")
+        assert not evaluate_condition(
+            {"commonalities_release": ">=r3.4"}, ctx, None
+        )
+
+    # --- Boolean fields ---
+
+    def test_is_release_review_pr_true(self):
+        ctx = _make_context(is_release_review_pr=True)
+        assert evaluate_condition({"is_release_review_pr": True}, ctx, None)
+
+    def test_is_release_review_pr_false(self):
+        ctx = _make_context(is_release_review_pr=False)
+        assert not evaluate_condition({"is_release_review_pr": True}, ctx, None)
+
+    def test_release_plan_changed_true(self):
+        ctx = _make_context(release_plan_changed=True)
+        assert evaluate_condition({"release_plan_changed": True}, ctx, None)
+
+    def test_release_plan_changed_none(self):
+        ctx = _make_context(release_plan_changed=None)
+        assert not evaluate_condition({"release_plan_changed": True}, ctx, None)
+
+    # --- AND across fields ---
+
+    def test_and_logic_all_match(self):
+        ctx = _make_context(branch_type="release", trigger_type="pr")
+        api = _make_api(target_api_maturity="stable")
+        condition = {
+            "branch_types": ["release"],
+            "trigger_types": ["pr"],
+            "target_api_maturity": ["stable"],
+        }
+        assert evaluate_condition(condition, ctx, api)
+
+    def test_and_logic_one_fails(self):
+        ctx = _make_context(branch_type="main", trigger_type="pr")
+        condition = {
+            "branch_types": ["release"],
+            "trigger_types": ["pr"],
+        }
+        assert not evaluate_condition(condition, ctx, None)
+
+    # --- Empty condition ---
+
+    def test_empty_condition_matches(self):
+        ctx = _make_context()
+        assert evaluate_condition({}, ctx, None)
+
+    # --- Unknown field ---
+
+    def test_unknown_field_does_not_match(self):
+        ctx = _make_context()
+        assert not evaluate_condition({"unknown_field": "value"}, ctx, None)
+
+
+# ---------------------------------------------------------------------------
+# TestIsApplicable
+# ---------------------------------------------------------------------------
+
+
+class TestIsApplicable:
+    def test_empty_applicability(self):
+        ctx = _make_context()
+        assert is_applicable({}, ctx, None)
+
+    def test_matching_applicability(self):
+        ctx = _make_context(branch_type="main")
+        assert is_applicable({"branch_types": ["main"]}, ctx, None)
+
+    def test_non_matching_applicability(self):
+        ctx = _make_context(branch_type="feature")
+        assert not is_applicable({"branch_types": ["main"]}, ctx, None)
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
new file mode 100644
index 00000000..cc68dcc2
--- /dev/null
+++ b/validation/tests/test_postfilter_engine.py
@@ -0,0 +1,473 @@
+"""Unit tests for validation.postfilter.engine (integration)."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.context import ApiContext, ValidationContext
+from validation.postfilter.engine import (
+    PostFilterResult,
+    _is_engine_error_finding,
+    _resolve_api_context,
+    compute_overall_result,
+    run_post_filter,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    branch_type: str = "main",
+    trigger_type: str = "pr",
+    profile: str = "standard",
+    target_release_type: str | None = "public-release",
+    commonalities_release: str | None = "r4.1",
+    is_release_review_pr: bool = False,
+    apis: tuple[ApiContext, ...] = (),
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type=trigger_type,
+        profile=profile,
+        stage="standard",
+        target_release_type=target_release_type,
+        commonalities_release=commonalities_release,
+        icm_release=None,
+        is_release_review_pr=is_release_review_pr,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=apis,
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_api(
+    api_name: str = "quality-on-demand",
+    target_api_status: str = "public",
+    target_api_maturity: str = "stable",
+) -> ApiContext:
+    return ApiContext(
+        api_name=api_name,
+        target_api_version="1.0.0",
+        target_api_status=target_api_status,
+        target_api_maturity=target_api_maturity,
+        api_pattern="request-response",
+        spec_file=f"code/API_definitions/{api_name}.yaml",
+    )
+
+
+def _make_finding(
+    engine: str = "spectral",
+    engine_rule: str = "some-rule",
+    level: str = "warn",
+    message: str = "Something is wrong",
+    path: str = "code/API_definitions/quality-on-demand.yaml",
+    line: int = 10,
+    api_name: str | None = "quality-on-demand",
+) -> dict:
+    return {
+        "engine": engine,
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": message,
+        "path": path,
+        "line": line,
+        "api_name": api_name,
+    }
+
+
+def _write_rules(tmp_path: Path, rules: list[dict], filename: str = "spectral-rules.yaml") -> None:
+    (tmp_path / filename).write_text(
+        yaml.dump(rules, default_flow_style=False), encoding="utf-8"
+    )
+
+
+def _minimal_rule(
+    id: str = "S-001",
+    engine: str = "spectral",
+    engine_rule: str = "some-rule",
+    hint: str = "Fix this.",
+    default_level: str = "warn",
+    applicability: dict | None = None,
+    overrides: list[dict] | None = None,
+) -> dict:
+    rule: dict = {
+        "id": id,
+        "name": "test-rule",
+        "engine": engine,
+        "engine_rule": engine_rule,
+        "hint": hint,
+        "conditional_level": {"default": default_level},
+    }
+    if applicability:
+        rule["applicability"] = applicability
+    if overrides:
+        rule["conditional_level"]["overrides"] = overrides
+    return rule
+
+
+# ---------------------------------------------------------------------------
+# TestIsEngineErrorFinding
+# ---------------------------------------------------------------------------
+
+
+class TestIsEngineErrorFinding:
+    def test_spectral_error(self):
+        f = _make_finding(engine_rule="spectral-execution-error")
+        assert _is_engine_error_finding(f) is True
+
+    def test_yamllint_error(self):
+        f = _make_finding(engine_rule="yamllint-execution-error")
+        assert _is_engine_error_finding(f) is True
+
+    def test_normal_rule(self):
+        f = _make_finding(engine_rule="camara-path-kebab-case")
+        assert _is_engine_error_finding(f) is False
+
+    def test_missing_engine_rule(self):
+        assert _is_engine_error_finding({}) is False
+
+
+# ---------------------------------------------------------------------------
+# TestResolveApiContext
+# ---------------------------------------------------------------------------
+
+
+class TestResolveApiContext:
+    def test_matching_api(self):
+        api = _make_api(api_name="quality-on-demand")
+        ctx = _make_context(apis=(api,))
+        f = _make_finding(api_name="quality-on-demand")
+        result = _resolve_api_context(f, ctx)
+        assert result is not None
+        assert result.api_name == "quality-on-demand"
+
+    def test_no_matching_api(self):
+        api = _make_api(api_name="qos-booking")
+        ctx = _make_context(apis=(api,))
+        f = _make_finding(api_name="quality-on-demand")
+        assert _resolve_api_context(f, ctx) is None
+
+    def test_repo_level_finding(self):
+        ctx = _make_context()
+        f = _make_finding(api_name=None)
+        assert _resolve_api_context(f, ctx) is None
+
+    def test_empty_api_name(self):
+        ctx = _make_context()
+        f = _make_finding()
+        f["api_name"] = ""
+        assert _resolve_api_context(f, ctx) is None
+
+
+# ---------------------------------------------------------------------------
+# TestComputeOverallResult
+# ---------------------------------------------------------------------------
+
+
+class TestComputeOverallResult:
+    def test_engine_error_trumps_all(self):
+        findings = [{"blocks": True}, {"blocks": False}]
+        assert compute_overall_result(findings, had_engine_error=True) == "error"
+
+    def test_blocking_finding_fails(self):
+        findings = [{"blocks": True}, {"blocks": False}]
+        assert compute_overall_result(findings, had_engine_error=False) == "fail"
+
+    def test_no_blocking_passes(self):
+        findings = [{"blocks": False}, {"blocks": False}]
+        assert compute_overall_result(findings, had_engine_error=False) == "pass"
+
+    def test_empty_findings_passes(self):
+        assert compute_overall_result([], had_engine_error=False) == "pass"
+
+    def test_empty_with_engine_error(self):
+        assert compute_overall_result([], had_engine_error=True) == "error"
+
+
+# ---------------------------------------------------------------------------
+# TestRunPostFilter — integration tests
+# ---------------------------------------------------------------------------
+
+
+class TestRunPostFilter:
+    """End-to-end tests for the full post-filter pipeline."""
+
+    def test_empty_findings(self, tmp_path: Path):
+        ctx = _make_context()
+        result = run_post_filter([], ctx, tmp_path)
+        assert result.result == "pass"
+        assert result.findings == []
+
+    def test_unmapped_rule_passthrough(self, tmp_path: Path):
+        """Findings without metadata pass through with identity mapping."""
+        ctx = _make_context(profile="standard")
+        findings = [_make_finding(level="warn", message="Use kebab-case")]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        assert result.result == "pass"  # warn doesn't block in standard
+        assert len(result.findings) == 1
+        f = result.findings[0]
+        assert f["level"] == "warn"
+        assert f["hint"] == "Use kebab-case"
+        assert f["blocks"] is False
+        assert "rule_id" not in f  # unmapped → no rule_id
+
+    def test_unmapped_error_blocks_in_standard(self, tmp_path: Path):
+        ctx = _make_context(profile="standard")
+        findings = [_make_finding(level="error")]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.result == "fail"
+        assert result.findings[0]["blocks"] is True
+
+    def test_mapped_rule_enrichment(self, tmp_path: Path):
+        """Mapped rules get rule_id, hint, and resolved level."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                id="S-001",
+                engine_rule="some-rule",
+                hint="Do this instead.",
+                default_level="error",
+            )
+        ])
+        ctx = _make_context(profile="standard")
+        findings = [_make_finding(level="warn")]  # engine reports warn
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        assert len(result.findings) == 1
+        f = result.findings[0]
+        assert f["rule_id"] == "S-001"
+        assert f["hint"] == "Do this instead."
+        assert f["level"] == "error"  # remapped from warn to error by metadata
+        assert f["blocks"] is True
+
+    def test_applicability_filters_finding(self, tmp_path: Path):
+        """Non-applicable findings are silently removed."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                applicability={"branch_types": ["release"]},
+            )
+        ])
+        ctx = _make_context(branch_type="main")
+        findings = [_make_finding()]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.findings == []
+        assert result.result == "pass"
+
+    def test_level_off_removes_finding(self, tmp_path: Path):
+        """Level resolved to 'off' removes the finding."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                default_level="off",
+            )
+        ])
+        ctx = _make_context()
+        findings = [_make_finding()]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.findings == []
+
+    def test_conditional_override(self, tmp_path: Path):
+        """Override changes level based on context."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                default_level="hint",
+                overrides=[
+                    {
+                        "condition": {"branch_types": ["release"]},
+                        "level": "error",
+                    }
+                ],
+            )
+        ])
+        # On main → default hint
+        ctx_main = _make_context(branch_type="main", profile="standard")
+        result_main = run_post_filter([_make_finding()], ctx_main, tmp_path)
+        assert result_main.findings[0]["level"] == "hint"
+        assert result_main.findings[0]["blocks"] is False
+
+        # On release → override to error
+        ctx_release = _make_context(branch_type="release", profile="standard")
+        result_release = run_post_filter([_make_finding()], ctx_release, tmp_path)
+        assert result_release.findings[0]["level"] == "error"
+        assert result_release.findings[0]["blocks"] is True
+
+    def test_engine_error_finding(self, tmp_path: Path):
+        """Engine execution errors pass through and set result to 'error'."""
+        ctx = _make_context()
+        findings = [
+            _make_finding(engine_rule="spectral-execution-error", level="error"),
+        ]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.result == "error"
+        assert len(result.findings) == 1
+        assert result.findings[0]["blocks"] is True
+
+    def test_advisory_profile_nothing_blocks(self, tmp_path: Path):
+        ctx = _make_context(profile="advisory")
+        findings = [_make_finding(level="error")]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.result == "pass"
+        assert result.findings[0]["blocks"] is False
+
+    def test_strict_profile_warns_block(self, tmp_path: Path):
+        ctx = _make_context(profile="strict")
+        findings = [_make_finding(level="warn")]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.result == "fail"
+        assert result.findings[0]["blocks"] is True
+
+    def test_empty_rules_dir(self, tmp_path: Path):
+        """Empty rules directory → all findings pass through."""
+        ctx = _make_context(profile="standard")
+        findings = [
+            _make_finding(level="warn"),
+            _make_finding(engine_rule="other-rule", level="hint"),
+        ]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert len(result.findings) == 2
+        assert result.result == "pass"  # warn and hint don't block in standard
+
+    def test_mixed_findings(self, tmp_path: Path):
+        """Mix of mapped, unmapped, and filtered findings."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                id="S-001",
+                engine_rule="mapped-rule",
+                default_level="error",
+            ),
+            _minimal_rule(
+                id="S-002",
+                engine_rule="filtered-rule",
+                applicability={"branch_types": ["release"]},
+                default_level="error",
+            ),
+        ])
+        ctx = _make_context(branch_type="main", profile="standard")
+        findings = [
+            _make_finding(engine_rule="mapped-rule"),
+            _make_finding(engine_rule="filtered-rule"),
+            _make_finding(engine_rule="unmapped-rule", level="hint"),
+        ]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        # mapped-rule: enriched, error, blocks
+        # filtered-rule: removed (applicability: release only)
+        # unmapped-rule: pass-through, hint, doesn't block
+        assert len(result.findings) == 2
+        assert result.result == "fail"
+
+        mapped = [f for f in result.findings if f.get("rule_id") == "S-001"]
+        assert len(mapped) == 1
+        assert mapped[0]["blocks"] is True
+
+        unmapped = [f for f in result.findings if f["engine_rule"] == "unmapped-rule"]
+        assert len(unmapped) == 1
+        assert unmapped[0]["blocks"] is False
+
+    def test_python_finding_lookup(self, tmp_path: Path):
+        """Python findings go through metadata lookup like other engines."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                id="P-001",
+                engine="python",
+                engine_rule="check-info-version-format",
+                hint="Use wip on main.",
+                default_level="error",
+            )
+        ], filename="python-rules.yaml")
+
+        ctx = _make_context(profile="standard")
+        findings = [
+            _make_finding(
+                engine="python",
+                engine_rule="check-info-version-format",
+                level="warn",
+            )
+        ]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert len(result.findings) == 1
+        f = result.findings[0]
+        assert f["rule_id"] == "P-001"
+        assert f["level"] == "error"  # remapped by metadata
+        assert f["hint"] == "Use wip on main."
+
+    def test_per_api_conditional_level(self, tmp_path: Path):
+        """Different APIs get different levels based on their context."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                default_level="hint",
+                overrides=[
+                    {
+                        "condition": {"target_api_maturity": ["stable"]},
+                        "level": "error",
+                    }
+                ],
+            )
+        ])
+
+        stable_api = _make_api(api_name="stable-api", target_api_maturity="stable")
+        initial_api = _make_api(api_name="initial-api", target_api_maturity="initial")
+        ctx = _make_context(profile="standard", apis=(stable_api, initial_api))
+
+        findings = [
+            _make_finding(api_name="stable-api"),
+            _make_finding(api_name="initial-api"),
+        ]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        stable_f = [f for f in result.findings if f["api_name"] == "stable-api"][0]
+        initial_f = [f for f in result.findings if f["api_name"] == "initial-api"][0]
+
+        assert stable_f["level"] == "error"
+        assert stable_f["blocks"] is True
+        assert initial_f["level"] == "hint"
+        assert initial_f["blocks"] is False
+
+    def test_finding_with_unknown_api_name(self, tmp_path: Path):
+        """Finding for an API not in context falls back to no api_context."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                default_level="warn",
+                applicability={"target_api_status": ["public"]},
+            )
+        ])
+        ctx = _make_context()  # no APIs in context
+        findings = [_make_finding(api_name="unknown-api")]
+        result = run_post_filter(findings, ctx, tmp_path)
+        # Per-API conditions with None api_context are unconstrained → applicable
+        assert len(result.findings) == 1
+
+    def test_passthrough_preserves_existing_hint(self, tmp_path: Path):
+        """If a finding already has a hint, pass-through preserves it."""
+        ctx = _make_context()
+        finding = _make_finding()
+        finding["hint"] = "Pre-existing hint"
+        result = run_post_filter([finding], ctx, tmp_path)
+        assert result.findings[0]["hint"] == "Pre-existing hint"
+
+    def test_result_summary_content(self, tmp_path: Path):
+        """Summary string contains useful information."""
+        ctx = _make_context(profile="standard")
+        findings = [
+            _make_finding(level="error"),
+            _make_finding(engine_rule="r2", level="warn"),
+            _make_finding(engine_rule="r3", level="hint"),
+        ]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.result == "fail"
+        assert "1 errors" in result.summary
+        assert "1 warnings" in result.summary
+        assert "1 hints" in result.summary
diff --git a/validation/tests/test_postfilter_levels.py b/validation/tests/test_postfilter_levels.py
new file mode 100644
index 00000000..7db30b04
--- /dev/null
+++ b/validation/tests/test_postfilter_levels.py
@@ -0,0 +1,211 @@
+"""Unit tests for validation.postfilter.level_resolver."""
+
+from __future__ import annotations
+
+import pytest
+
+from validation.context import ApiContext, ValidationContext
+from validation.postfilter.level_resolver import (
+    apply_profile_blocking,
+    resolve_level,
+)
+from validation.postfilter.metadata_loader import (
+    ConditionalLevel,
+    ConditionalOverride,
+    RuleMetadata,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    branch_type: str = "main",
+    trigger_type: str = "pr",
+    profile: str = "standard",
+    target_release_type: str | None = "public-release",
+    commonalities_release: str | None = "r4.1",
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type=trigger_type,
+        profile=profile,
+        stage="standard",
+        target_release_type=target_release_type,
+        commonalities_release=commonalities_release,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_api(
+    target_api_maturity: str = "stable",
+    target_api_status: str = "public",
+) -> ApiContext:
+    return ApiContext(
+        api_name="quality-on-demand",
+        target_api_version="1.0.0",
+        target_api_status=target_api_status,
+        target_api_maturity=target_api_maturity,
+        api_pattern="request-response",
+        spec_file="code/API_definitions/quality-on-demand.yaml",
+    )
+
+
+def _make_rule(
+    default: str = "warn",
+    overrides: list[tuple[dict, str]] | None = None,
+) -> RuleMetadata:
+    """Build a RuleMetadata with given conditional level."""
+    override_objs = tuple(
+        ConditionalOverride(condition=cond, level=lvl)
+        for cond, lvl in (overrides or [])
+    )
+    return RuleMetadata(
+        id="S-001",
+        name="test-rule",
+        engine="spectral",
+        engine_rule="test-rule",
+        hint="Fix it.",
+        applicability={},
+        conditional_level=ConditionalLevel(
+            default=default,
+            overrides=override_objs,
+        ),
+    )
+
+
+# ---------------------------------------------------------------------------
+# TestResolveLevel
+# ---------------------------------------------------------------------------
+
+
+class TestResolveLevel:
+    def test_default_only(self):
+        rule = _make_rule(default="warn")
+        ctx = _make_context()
+        assert resolve_level(rule, ctx, None) == "warn"
+
+    def test_override_matches(self):
+        rule = _make_rule(
+            default="hint",
+            overrides=[
+                ({"branch_types": ["main"]}, "warn"),
+            ],
+        )
+        ctx = _make_context(branch_type="main")
+        assert resolve_level(rule, ctx, None) == "warn"
+
+    def test_override_does_not_match(self):
+        rule = _make_rule(
+            default="hint",
+            overrides=[
+                ({"branch_types": ["release"]}, "error"),
+            ],
+        )
+        ctx = _make_context(branch_type="main")
+        assert resolve_level(rule, ctx, None) == "hint"
+
+    def test_first_match_wins(self):
+        rule = _make_rule(
+            default="hint",
+            overrides=[
+                ({"branch_types": ["main"]}, "warn"),
+                ({"branch_types": ["main"]}, "error"),
+            ],
+        )
+        ctx = _make_context(branch_type="main")
+        assert resolve_level(rule, ctx, None) == "warn"
+
+    def test_second_override_matches(self):
+        rule = _make_rule(
+            default="hint",
+            overrides=[
+                ({"branch_types": ["release"]}, "error"),
+                ({"branch_types": ["main"]}, "warn"),
+            ],
+        )
+        ctx = _make_context(branch_type="main")
+        assert resolve_level(rule, ctx, None) == "warn"
+
+    def test_override_resolves_to_off(self):
+        rule = _make_rule(
+            default="warn",
+            overrides=[
+                ({"target_api_status": ["draft"]}, "off"),
+            ],
+        )
+        ctx = _make_context()
+        api = _make_api(target_api_status="draft")
+        assert resolve_level(rule, ctx, api) == "off"
+
+    def test_api_context_used_in_override(self):
+        rule = _make_rule(
+            default="hint",
+            overrides=[
+                ({"target_api_maturity": ["stable"]}, "warn"),
+            ],
+        )
+        ctx = _make_context()
+        api = _make_api(target_api_maturity="stable")
+        assert resolve_level(rule, ctx, api) == "warn"
+
+    def test_api_context_initial_no_match(self):
+        rule = _make_rule(
+            default="hint",
+            overrides=[
+                ({"target_api_maturity": ["stable"]}, "warn"),
+            ],
+        )
+        ctx = _make_context()
+        api = _make_api(target_api_maturity="initial")
+        assert resolve_level(rule, ctx, api) == "hint"
+
+
+# ---------------------------------------------------------------------------
+# TestApplyProfileBlocking
+# ---------------------------------------------------------------------------
+
+
+class TestApplyProfileBlocking:
+    # Advisory — nothing blocks
+    def test_advisory_error(self):
+        assert apply_profile_blocking("error", "advisory") is False
+
+    def test_advisory_warn(self):
+        assert apply_profile_blocking("warn", "advisory") is False
+
+    def test_advisory_hint(self):
+        assert apply_profile_blocking("hint", "advisory") is False
+
+    # Standard — only errors block
+    def test_standard_error(self):
+        assert apply_profile_blocking("error", "standard") is True
+
+    def test_standard_warn(self):
+        assert apply_profile_blocking("warn", "standard") is False
+
+    def test_standard_hint(self):
+        assert apply_profile_blocking("hint", "standard") is False
+
+    # Strict — errors and warnings block
+    def test_strict_error(self):
+        assert apply_profile_blocking("error", "strict") is True
+
+    def test_strict_warn(self):
+        assert apply_profile_blocking("warn", "strict") is True
+
+    def test_strict_hint(self):
+        assert apply_profile_blocking("hint", "strict") is False
+
+    # Unknown profile — safe default
+    def test_unknown_profile(self):
+        assert apply_profile_blocking("error", "unknown") is False
diff --git a/validation/tests/test_postfilter_metadata.py b/validation/tests/test_postfilter_metadata.py
new file mode 100644
index 00000000..39b52fa8
--- /dev/null
+++ b/validation/tests/test_postfilter_metadata.py
@@ -0,0 +1,270 @@
+"""Unit tests for validation.postfilter.metadata_loader."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.postfilter.metadata_loader import (
+    ConditionalLevel,
+    ConditionalOverride,
+    RuleMetadata,
+    build_lookup_index,
+    load_all_rules,
+    load_rules_from_file,
+    parse_rule_metadata,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _minimal_rule_dict(**overrides: object) -> dict:
+    """Build a minimal valid rule metadata dict."""
+    base = {
+        "id": "S-001",
+        "name": "test-rule",
+        "engine": "spectral",
+        "engine_rule": "camara-test-rule",
+        "hint": "Fix this issue.",
+        "conditional_level": {"default": "warn"},
+    }
+    base.update(overrides)
+    return base
+
+
+def _write_yaml(path: Path, data: object) -> None:
+    path.write_text(yaml.dump(data, default_flow_style=False), encoding="utf-8")
+
+
+# ---------------------------------------------------------------------------
+# TestParseRuleMetadata
+# ---------------------------------------------------------------------------
+
+
+class TestParseRuleMetadata:
+    def test_minimal_valid(self):
+        raw = _minimal_rule_dict()
+        rule = parse_rule_metadata(raw)
+        assert rule.id == "S-001"
+        assert rule.name == "test-rule"
+        assert rule.engine == "spectral"
+        assert rule.engine_rule == "camara-test-rule"
+        assert rule.hint == "Fix this issue."
+        assert rule.applicability == {}
+        assert rule.conditional_level.default == "warn"
+        assert rule.conditional_level.overrides == ()
+
+    def test_with_applicability(self):
+        raw = _minimal_rule_dict(
+            applicability={"branch_types": ["main", "release"]}
+        )
+        rule = parse_rule_metadata(raw)
+        assert rule.applicability == {"branch_types": ["main", "release"]}
+
+    def test_with_overrides(self):
+        raw = _minimal_rule_dict(
+            conditional_level={
+                "default": "hint",
+                "overrides": [
+                    {
+                        "condition": {"target_api_maturity": ["stable"]},
+                        "level": "warn",
+                    },
+                    {
+                        "condition": {"branch_types": ["release"]},
+                        "level": "error",
+                    },
+                ],
+            }
+        )
+        rule = parse_rule_metadata(raw)
+        assert rule.conditional_level.default == "hint"
+        assert len(rule.conditional_level.overrides) == 2
+        assert rule.conditional_level.overrides[0].level == "warn"
+        assert rule.conditional_level.overrides[1].level == "error"
+        assert rule.conditional_level.overrides[0].condition == {
+            "target_api_maturity": ["stable"]
+        }
+
+    def test_missing_required_field(self):
+        raw = _minimal_rule_dict()
+        del raw["hint"]
+        with pytest.raises(ValueError, match="hint"):
+            parse_rule_metadata(raw)
+
+    def test_missing_multiple_fields(self):
+        with pytest.raises(ValueError, match="id.*name|name.*id"):
+            parse_rule_metadata({"engine": "spectral"})
+
+    def test_conditional_level_missing_default(self):
+        raw = _minimal_rule_dict(conditional_level={"overrides": []})
+        with pytest.raises(ValueError, match="default"):
+            parse_rule_metadata(raw)
+
+    def test_conditional_level_not_a_dict(self):
+        raw = _minimal_rule_dict(conditional_level="error")
+        with pytest.raises(ValueError, match="mapping"):
+            parse_rule_metadata(raw)
+
+    def test_override_with_empty_condition(self):
+        raw = _minimal_rule_dict(
+            conditional_level={
+                "default": "warn",
+                "overrides": [{"condition": {}, "level": "error"}],
+            }
+        )
+        rule = parse_rule_metadata(raw)
+        assert rule.conditional_level.overrides[0].condition == {}
+
+    def test_non_dict_override_entries_skipped(self):
+        raw = _minimal_rule_dict(
+            conditional_level={
+                "default": "warn",
+                "overrides": ["invalid", {"condition": {}, "level": "hint"}],
+            }
+        )
+        rule = parse_rule_metadata(raw)
+        assert len(rule.conditional_level.overrides) == 1
+
+
+# ---------------------------------------------------------------------------
+# TestLoadRulesFromFile
+# ---------------------------------------------------------------------------
+
+
+class TestLoadRulesFromFile:
+    def test_valid_file(self, tmp_path: Path):
+        f = tmp_path / "spectral-rules.yaml"
+        _write_yaml(f, [_minimal_rule_dict()])
+        rules = load_rules_from_file(f)
+        assert len(rules) == 1
+        assert rules[0].id == "S-001"
+
+    def test_multiple_rules(self, tmp_path: Path):
+        f = tmp_path / "spectral-rules.yaml"
+        _write_yaml(
+            f,
+            [
+                _minimal_rule_dict(id="S-001", engine_rule="rule-a"),
+                _minimal_rule_dict(id="S-002", engine_rule="rule-b"),
+            ],
+        )
+        rules = load_rules_from_file(f)
+        assert len(rules) == 2
+
+    def test_missing_file(self, tmp_path: Path):
+        f = tmp_path / "does-not-exist.yaml"
+        assert load_rules_from_file(f) == []
+
+    def test_malformed_yaml(self, tmp_path: Path):
+        f = tmp_path / "bad.yaml"
+        f.write_text(": :\n  : - [\n", encoding="utf-8")
+        assert load_rules_from_file(f) == []
+
+    def test_empty_file(self, tmp_path: Path):
+        f = tmp_path / "empty.yaml"
+        f.write_text("", encoding="utf-8")
+        # yaml.safe_load returns None for empty → not a list → []
+        assert load_rules_from_file(f) == []
+
+    def test_non_array_yaml(self, tmp_path: Path):
+        f = tmp_path / "dict.yaml"
+        _write_yaml(f, {"not": "a list"})
+        assert load_rules_from_file(f) == []
+
+    def test_malformed_entry_skipped(self, tmp_path: Path):
+        f = tmp_path / "mixed.yaml"
+        _write_yaml(
+            f,
+            [
+                _minimal_rule_dict(id="S-001"),
+                {"id": "S-002"},  # missing required fields
+                _minimal_rule_dict(id="S-003"),
+            ],
+        )
+        rules = load_rules_from_file(f)
+        assert len(rules) == 2
+        assert rules[0].id == "S-001"
+        assert rules[1].id == "S-003"
+
+    def test_non_dict_entry_skipped(self, tmp_path: Path):
+        f = tmp_path / "mixed.yaml"
+        _write_yaml(f, [_minimal_rule_dict(id="S-001"), "not a dict"])
+        rules = load_rules_from_file(f)
+        assert len(rules) == 1
+
+
+# ---------------------------------------------------------------------------
+# TestLoadAllRules
+# ---------------------------------------------------------------------------
+
+
+class TestLoadAllRules:
+    def test_multiple_files(self, tmp_path: Path):
+        _write_yaml(
+            tmp_path / "spectral-rules.yaml",
+            [_minimal_rule_dict(id="S-001", engine="spectral")],
+        )
+        _write_yaml(
+            tmp_path / "python-rules.yaml",
+            [_minimal_rule_dict(id="P-001", engine="python", engine_rule="check-x")],
+        )
+        rules = load_all_rules(tmp_path)
+        assert len(rules) == 2
+        ids = {r.id for r in rules}
+        assert ids == {"S-001", "P-001"}
+
+    def test_empty_directory(self, tmp_path: Path):
+        assert load_all_rules(tmp_path) == []
+
+    def test_nonexistent_directory(self, tmp_path: Path):
+        assert load_all_rules(tmp_path / "nope") == []
+
+    def test_ignores_non_matching_files(self, tmp_path: Path):
+        _write_yaml(tmp_path / "spectral-rules.yaml", [_minimal_rule_dict()])
+        _write_yaml(tmp_path / "README.yaml", [_minimal_rule_dict(id="X-001")])
+        rules = load_all_rules(tmp_path)
+        assert len(rules) == 1
+        assert rules[0].id == "S-001"
+
+
+# ---------------------------------------------------------------------------
+# TestBuildLookupIndex
+# ---------------------------------------------------------------------------
+
+
+class TestBuildLookupIndex:
+    def test_normal_case(self):
+        rules = [
+            parse_rule_metadata(
+                _minimal_rule_dict(id="S-001", engine="spectral", engine_rule="rule-a")
+            ),
+            parse_rule_metadata(
+                _minimal_rule_dict(id="P-001", engine="python", engine_rule="check-x")
+            ),
+        ]
+        index = build_lookup_index(rules)
+        assert ("spectral", "rule-a") in index
+        assert ("python", "check-x") in index
+        assert index[("spectral", "rule-a")].id == "S-001"
+
+    def test_duplicate_key_first_wins(self):
+        rules = [
+            parse_rule_metadata(
+                _minimal_rule_dict(id="S-001", engine="spectral", engine_rule="rule-a")
+            ),
+            parse_rule_metadata(
+                _minimal_rule_dict(id="S-099", engine="spectral", engine_rule="rule-a")
+            ),
+        ]
+        index = build_lookup_index(rules)
+        assert index[("spectral", "rule-a")].id == "S-001"
+        assert len(index) == 1
+
+    def test_empty_list(self):
+        assert build_lookup_index([]) == {}

From 6e8becfeeafcd608b36ccb87f8210e642da9cff1 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 12:47:05 +0100
Subject: [PATCH 009/157] feat(validation): add rule metadata for all 96 engine
 rules (WP-06.14)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Simplify rule metadata schema: only id, engine, engine_rule required.
Name, hint, and conditional_level are optional — identity-only entries
need just 3 fields to assign a stable rule_id.

Rule metadata files (96 rules total):
- python-rules.yaml: 12 checks with conditional levels and applicability
- spectral-rules.yaml: 17 CAMARA custom + 29 built-in OAS (S-001–S-017,
  S-200–S-228, gap reserved for future CAMARA rules)
- gherkin-rules.yaml: 25 enabled rules
- yamllint-rules.yaml: 13 enabled rules

P-007 (check-test-file-version) suppressed pending fix: checks filename
version suffix but should check version inside .feature file content.

Integration test validates structural integrity, engine coverage
(every enabled engine rule has metadata), and metadata quality.
Coverage tracker documents 21 gap rules and 1 fix needed.

428 tests passing.
---
 validation/postfilter/engine.py               |  29 +-
 validation/postfilter/metadata_loader.py      |  21 +-
 validation/rules/gherkin-rules.yaml           | 103 +++++++
 validation/rules/python-rules.yaml            | 109 +++++++
 validation/rules/rule-inventory.yaml          | 208 +++++++++++++
 validation/rules/spectral-rules.yaml          | 193 ++++++++++++
 validation/rules/yamllint-rules.yaml          |  55 ++++
 validation/schemas/rule-metadata-schema.yaml  |  14 +-
 validation/tests/test_postfilter_engine.py    |  57 +++-
 validation/tests/test_postfilter_metadata.py  |  85 ++++--
 .../tests/test_rule_metadata_integrity.py     | 287 ++++++++++++++++++
 11 files changed, 1113 insertions(+), 48 deletions(-)
 create mode 100644 validation/rules/gherkin-rules.yaml
 create mode 100644 validation/rules/python-rules.yaml
 create mode 100644 validation/rules/rule-inventory.yaml
 create mode 100644 validation/rules/spectral-rules.yaml
 create mode 100644 validation/rules/yamllint-rules.yaml
 create mode 100644 validation/tests/test_rule_metadata_integrity.py

diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
index 3a054d46..f62f9ead 100644
--- a/validation/postfilter/engine.py
+++ b/validation/postfilter/engine.py
@@ -81,13 +81,19 @@ def _resolve_api_context(
 def _enrich_finding(
     finding: dict,
     rule: RuleMetadata,
-    resolved_level: str,
+    resolved_level: Optional[str] = None,
 ) -> dict:
-    """Create an enriched copy of a finding with metadata applied."""
+    """Create an enriched copy of a finding with metadata applied.
+
+    When *resolved_level* is ``None`` (identity-only entry without
+    ``conditional_level``), the engine's original level is preserved.
+    When *rule.hint* is empty, the engine's message is used instead.
+    """
     enriched = dict(finding)
     enriched["rule_id"] = rule.id
-    enriched["level"] = resolved_level
-    enriched["hint"] = rule.hint
+    if resolved_level is not None:
+        enriched["level"] = resolved_level
+    enriched["hint"] = rule.hint or finding.get("message", "")
     return enriched
 
 
@@ -217,12 +223,15 @@ def run_post_filter(
             if not is_applicable(rule.applicability, context, api_ctx):
                 continue
 
-            # Conditional level resolution
-            resolved_level = resolve_level(rule, context, api_ctx)
-            if resolved_level == "off":
-                continue
-
-            enriched = _enrich_finding(finding, rule, resolved_level)
+            # Conditional level resolution (skip for identity-only entries)
+            if rule.conditional_level is not None:
+                resolved_level = resolve_level(rule, context, api_ctx)
+                if resolved_level == "off":
+                    continue
+                enriched = _enrich_finding(finding, rule, resolved_level)
+            else:
+                # Identity-only: assign rule_id, keep engine level
+                enriched = _enrich_finding(finding, rule)
         else:
             # Step 4: Unmapped rule — pass-through
             enriched = _passthrough_finding(finding)
diff --git a/validation/postfilter/metadata_loader.py b/validation/postfilter/metadata_loader.py
index 64fa2ec8..18ab8ee2 100644
--- a/validation/postfilter/metadata_loader.py
+++ b/validation/postfilter/metadata_loader.py
@@ -24,7 +24,7 @@
 # Required fields in a rule metadata entry
 # ---------------------------------------------------------------------------
 
-_REQUIRED_FIELDS = ("id", "name", "engine", "engine_rule", "hint", "conditional_level")
+_REQUIRED_FIELDS = ("id", "engine", "engine_rule")
 
 
 # ---------------------------------------------------------------------------
@@ -66,12 +66,13 @@ class RuleMetadata:
 
     Attributes:
         id: Stable ID with engine prefix (e.g. ``"S-042"``).
-        name: Human-readable kebab-case name.
+        name: Human-readable kebab-case name.  Defaults to ``engine_rule``.
         engine: Engine responsible for producing the finding.
         engine_rule: Native rule identifier within the engine.
-        hint: Actionable fix guidance shown to developers.
+        hint: Actionable fix guidance.  Empty string means "use engine message".
         applicability: Condition dict — omitted fields are unconstrained.
-        conditional_level: Severity specification with optional overrides.
+        conditional_level: Severity specification, or ``None`` to preserve
+            engine-reported severity (identity mapping).
     """
 
     id: str
@@ -80,7 +81,7 @@ class RuleMetadata:
     engine_rule: str
     hint: str
     applicability: dict
-    conditional_level: ConditionalLevel
+    conditional_level: Optional[ConditionalLevel]
 
 
 # ---------------------------------------------------------------------------
@@ -132,14 +133,18 @@ def parse_rule_metadata(raw: dict) -> RuleMetadata:
     if missing:
         raise ValueError(f"Missing required fields: {', '.join(missing)}")
 
+    # Optional conditional_level — None means identity mapping
+    raw_cl = raw.get("conditional_level")
+    conditional_level = _parse_conditional_level(raw_cl) if raw_cl is not None else None
+
     return RuleMetadata(
         id=raw["id"],
-        name=raw["name"],
+        name=raw.get("name", raw["engine_rule"]),
         engine=raw["engine"],
         engine_rule=raw["engine_rule"],
-        hint=raw["hint"],
+        hint=raw.get("hint", ""),
         applicability=raw.get("applicability", {}),
-        conditional_level=_parse_conditional_level(raw["conditional_level"]),
+        conditional_level=conditional_level,
     )
 
 
diff --git a/validation/rules/gherkin-rules.yaml b/validation/rules/gherkin-rules.yaml
new file mode 100644
index 00000000..f0fd3e26
--- /dev/null
+++ b/validation/rules/gherkin-rules.yaml
@@ -0,0 +1,103 @@
+# Gherkin-lint rule metadata.
+# All entries are identity-only initially, can be extended if needed.
+# All rules default to warn (gherkin-lint has no per-finding severity).
+
+- id: G-001
+  engine: gherkin
+  engine_rule: allowed-tags
+
+- id: G-002
+  engine: gherkin
+  engine_rule: indentation
+
+- id: G-003
+  engine: gherkin
+  engine_rule: keywords-in-logical-order
+
+- id: G-004
+  engine: gherkin
+  engine_rule: max-scenarios-per-file
+
+- id: G-005
+  engine: gherkin
+  engine_rule: name-length
+
+- id: G-006
+  engine: gherkin
+  engine_rule: no-background-only-scenario
+
+- id: G-007
+  engine: gherkin
+  engine_rule: no-dupe-feature-names
+
+- id: G-008
+  engine: gherkin
+  engine_rule: no-dupe-scenario-names
+
+- id: G-009
+  engine: gherkin
+  engine_rule: no-duplicate-tags
+
+- id: G-010
+  engine: gherkin
+  engine_rule: no-empty-background
+
+- id: G-011
+  engine: gherkin
+  engine_rule: no-empty-file
+
+- id: G-012
+  engine: gherkin
+  engine_rule: no-files-without-scenarios
+
+- id: G-013
+  engine: gherkin
+  engine_rule: no-homogenous-tags
+
+- id: G-014
+  engine: gherkin
+  engine_rule: no-multiple-empty-lines
+
+- id: G-015
+  engine: gherkin
+  engine_rule: no-partially-commented-tag-lines
+
+- id: G-016
+  engine: gherkin
+  engine_rule: no-restricted-tags
+
+- id: G-017
+  engine: gherkin
+  engine_rule: no-scenario-outlines-without-examples
+
+- id: G-018
+  engine: gherkin
+  engine_rule: no-superfluous-tags
+
+- id: G-019
+  engine: gherkin
+  engine_rule: no-trailing-spaces
+
+- id: G-020
+  engine: gherkin
+  engine_rule: no-unnamed-features
+
+- id: G-021
+  engine: gherkin
+  engine_rule: no-unnamed-scenarios
+
+- id: G-022
+  engine: gherkin
+  engine_rule: no-unused-variables
+
+- id: G-023
+  engine: gherkin
+  engine_rule: one-space-between-tags
+
+- id: G-024
+  engine: gherkin
+  engine_rule: required-tags
+
+- id: G-025
+  engine: gherkin
+  engine_rule: use-and
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
new file mode 100644
index 00000000..2451df7f
--- /dev/null
+++ b/validation/rules/python-rules.yaml
@@ -0,0 +1,109 @@
+# Python check rule metadata.
+# These checks have context-dependent behavior: conditional levels based on
+# branch type, API maturity, and release context; applicability conditions
+# for release-review and release-plan-changed contexts.
+# Engine messages from make_finding() serve as hints (design doc section 8.4.1).
+
+# P-001: check-filename-kebab-case
+- id: P-001
+  engine: python
+  engine_rule: check-filename-kebab-case
+  conditional_level:
+    default: error
+
+# P-002: check-filename-matches-api-name
+- id: P-002
+  engine: python
+  engine_rule: check-filename-matches-api-name
+  conditional_level:
+    default: error
+
+# P-003: check-info-version-format
+- id: P-003
+  engine: python
+  engine_rule: check-info-version-format
+  conditional_level:
+    default: error
+
+# P-004: check-server-url-version
+- id: P-004
+  engine: python
+  engine_rule: check-server-url-version
+  conditional_level:
+    default: error
+    overrides:
+      - condition:
+          branch_types: [main, feature]
+        level: warn
+
+# P-005: check-server-url-api-name
+- id: P-005
+  engine: python
+  engine_rule: check-server-url-api-name
+  conditional_level:
+    default: error
+
+# P-006: check-test-files-exist
+- id: P-006
+  engine: python
+  engine_rule: check-test-files-exist
+  conditional_level:
+    default: hint
+    overrides:
+      - condition:
+          target_api_maturity: [stable]
+          target_release_type: [pre-release-rc, public-release]
+        level: warn
+
+# P-007: check-test-file-version — SUPPRESSED pending fix
+# Current check validates filename version suffix, but the version belongs
+# inside the .feature file (first line), not the filename. Needs rewrite
+# to parse file content (same approach as P-003 for spec info.version).
+- id: P-007
+  engine: python
+  engine_rule: check-test-file-version
+  conditional_level:
+    default: off
+
+# P-008: check-test-directory-exists
+- id: P-008
+  engine: python
+  engine_rule: check-test-directory-exists
+  conditional_level:
+    default: hint
+    overrides:
+      - condition:
+          target_release_type: [pre-release-rc, public-release]
+        level: warn
+
+# P-009: check-release-plan-semantics
+- id: P-009
+  engine: python
+  engine_rule: check-release-plan-semantics
+  applicability:
+    release_plan_changed: true
+  conditional_level:
+    default: error
+
+# P-010: check-changelog-format
+- id: P-010
+  engine: python
+  engine_rule: check-changelog-format
+  conditional_level:
+    default: warn
+
+# P-011: check-license-commonalities-consistency
+- id: P-011
+  engine: python
+  engine_rule: check-license-commonalities-consistency
+  conditional_level:
+    default: warn
+
+# P-012: check-release-review-file-restriction
+- id: P-012
+  engine: python
+  engine_rule: check-release-review-file-restriction
+  applicability:
+    is_release_review_pr: true
+  conditional_level:
+    default: error
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
new file mode 100644
index 00000000..c4bdb9e6
--- /dev/null
+++ b/validation/rules/rule-inventory.yaml
@@ -0,0 +1,208 @@
+# CAMARA Validation Framework — Rule Inventory
+#
+# Complete inventory of all known validation rules across all sources.
+# Core set rules have metadata entries in *-rules.yaml files.
+# Gap and manual rules are tracked here for WS07 implementation.
+#
+# Status values:
+#   implemented — metadata in *-rules.yaml, engine check active
+#   gap         — documented in audit, no engine implementation yet
+#   manual      — requires human judgment, not machine-checkable
+#   pending     — in open PRs, not yet merged
+
+version: 1
+generated: 2026-03-26
+
+summary:
+  total_implemented: 96
+  total_gap: 21
+  total_manual: 25
+  total_pending: 17
+  by_engine:
+    spectral: 46
+    gherkin: 25
+    python: 12
+    yamllint: 13
+
+# ---------------------------------------------------------------------------
+# Implemented rules — metadata in *-rules.yaml
+# ---------------------------------------------------------------------------
+# Not duplicated here — the *-rules.yaml files ARE the authoritative registry.
+# See: python-rules.yaml, spectral-rules.yaml, gherkin-rules.yaml,
+#      yamllint-rules.yaml
+
+# ---------------------------------------------------------------------------
+# Gap rules — documented but not implemented (WS07 scope)
+# ---------------------------------------------------------------------------
+# Source: private-dev-docs/validation-framework/reviews/commonalities-design-guide-audit.md
+
+gap_rules:
+  # Spectral gaps (new rules needed)
+  - audit_id: DG-003
+    description: date-time description RFC 3339 format
+    target_engine: spectral
+    priority: low
+
+  - audit_id: DG-004
+    description: duration description RFC 3339 format
+    target_engine: spectral
+    priority: low
+
+  - audit_id: DG-008
+    description: "Object: required properties MUST exist in properties"
+    target_engine: spectral
+    priority: medium
+
+  - audit_id: DG-013
+    description: Error code MUST NOT be numeric
+    target_engine: spectral
+    priority: medium
+
+  - audit_id: DG-014
+    description: Error code MUST be SCREAMING_SNAKE_CASE (r4.x)
+    target_engine: spectral
+    priority: medium
+    notes: commonalities_release >=r4.0
+
+  - audit_id: DG-015
+    description: "API-specific error: API_NAME.SPECIFIC_CODE format"
+    target_engine: spectral
+    priority: medium
+
+  - audit_id: DG-017
+    description: All APIs MUST document 403 response
+    target_engine: spectral
+    priority: medium
+
+  - audit_id: DG-032
+    description: info.contact MUST be absent
+    target_engine: spectral
+    priority: low
+
+  - audit_id: DG-041
+    description: Tag names Title Case convention
+    target_engine: spectral
+    priority: low
+
+  - audit_id: DG-058
+    description: Array items MUST have description (r4.x)
+    target_engine: spectral
+    priority: medium
+    notes: commonalities_release >=r4.0
+
+  - audit_id: DG-087
+    description: "specversion MUST be '1.0' (subscription)"
+    target_engine: spectral
+    priority: low
+    notes: api_pattern subscription only
+
+  - audit_id: DG-090
+    description: "protocol MUST be 'HTTP' (subscription)"
+    target_engine: spectral
+    priority: low
+    notes: api_pattern subscription only
+
+  - audit_id: DG-091
+    description: sink MUST use HTTPS (subscription)
+    target_engine: spectral
+    priority: low
+    notes: api_pattern subscription only
+
+  - audit_id: DG-094
+    description: Notification content-type cloudevents+json (subscription)
+    target_engine: spectral
+    priority: low
+    notes: api_pattern subscription only
+
+  # Python gaps (new checks needed)
+  - audit_id: DG-011
+    description: contextCode SCREAMING_SNAKE_CASE format (r4.x)
+    target_engine: python
+    priority: low
+
+  - audit_id: DG-018
+    description: CONFLICT error code deprecated (r4.x)
+    target_engine: python
+    priority: low
+
+  - audit_id: DG-086
+    description: Event type format org.camaraproject validation (subscription)
+    target_engine: python
+    priority: medium
+
+  - audit_id: DG-088
+    description: Subscription API filename convention
+    target_engine: python
+    priority: medium
+
+  - audit_id: DG-092
+    description: sinkCredential not in responses
+    target_engine: python
+    priority: medium
+
+  - audit_id: DG-095
+    description: Event version independence from API version
+    target_engine: python
+    priority: low
+
+  # New rules (not from audit — identified during implementation)
+  - audit_id: NEW-001
+    description: README.md placeholder must be removed from API_definitions/ when spec files are present
+    target_engine: python
+    priority: medium
+
+# ---------------------------------------------------------------------------
+# Fixes needed — implemented rules with incorrect behavior
+# ---------------------------------------------------------------------------
+
+fixes:
+  - rule_id: P-007
+    engine_rule: check-test-file-version
+    status: suppressed
+    issue: Checks version suffix in filename, but version belongs inside .feature file (first line)
+    fix: Rewrite to parse file content, same approach as P-003 for spec info.version
+    conditional_level_after_fix:
+      default: hint
+      overrides:
+        - condition:
+            target_api_maturity: [stable]
+          level: warn
+
+# ---------------------------------------------------------------------------
+# Pending rules — in open PRs
+# ---------------------------------------------------------------------------
+# Source: tooling#95 (OWASP Spectral rules)
+
+pending_rules:
+  - source: tooling#95
+    description: OWASP API security rules (17 rules)
+    target_engine: spectral
+    notes: Parked per DEC-013. Introduce with v1 + bundling.
+    estimated_count: 17
+
+# ---------------------------------------------------------------------------
+# Manual rules — require human judgment
+# ---------------------------------------------------------------------------
+# Source: private-dev-docs/validation-framework/reviews/testing-guidelines-audit.md
+
+manual_rules:
+  count: 25
+  categories:
+    - name: Feature context sections
+      audit_ids: [TG-015, TG-016, TG-017]
+      description: Feature file should describe API context
+    - name: Background setup patterns
+      audit_ids: [TG-020, TG-021, TG-022, TG-023]
+      description: Background section setup conventions
+    - name: Standardized step syntax
+      audit_ids: [TG-042, TG-043, TG-044, TG-045, TG-046, TG-047, TG-048]
+      description: Given/When/Then step phrasing conventions
+    - name: Test coverage adequacy
+      audit_ids: [TG-054, TG-055]
+      description: Test plan completeness for RC and public releases
+    - name: Device identifier rules
+      audit_ids: [TG-057, TG-058]
+      description: Device identifier handling in test scenarios
+    - name: Other semantic rules
+      audit_ids: [TG-024, TG-025, TG-026, TG-027, TG-028, TG-029, TG-056, TG-059, TG-060]
+      description: Various semantic test quality rules
diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml
new file mode 100644
index 00000000..a7b7c612
--- /dev/null
+++ b/validation/rules/spectral-rules.yaml
@@ -0,0 +1,193 @@
+# Spectral rule metadata.
+# S-001 through S-199: reserved for CAMARA custom rules
+# S-200 through S-xxx: Built-in OAS rules
+# All entries are identity-only initially, can be extended if needed.
+# Hints default to the Spectral engine message; can be overridden in WS07.
+
+# ===== CAMARA custom rules (S-001+) =====
+
+- id: S-001
+  engine: spectral
+  engine_rule: camara-discriminator-use
+
+- id: S-002
+  engine: spectral
+  engine_rule: camara-get-no-request-body
+
+- id: S-003
+  engine: spectral
+  engine_rule: camara-http-methods
+
+- id: S-004
+  engine: spectral
+  engine_rule: camara-language-avoid-telco
+
+- id: S-005
+  engine: spectral
+  engine_rule: camara-oas-version
+
+- id: S-006
+  engine: spectral
+  engine_rule: camara-operation-summary
+
+- id: S-007
+  engine: spectral
+  engine_rule: camara-operationid-casing-convention
+
+- id: S-008
+  engine: spectral
+  engine_rule: camara-parameter-casing-convention
+
+- id: S-009
+  engine: spectral
+  engine_rule: camara-parameters-descriptions
+
+- id: S-010
+  engine: spectral
+  engine_rule: camara-path-param-id
+
+- id: S-011
+  engine: spectral
+  engine_rule: camara-properties-descriptions
+
+- id: S-012
+  engine: spectral
+  engine_rule: camara-reserved-words
+
+- id: S-013
+  engine: spectral
+  engine_rule: camara-response-descriptions
+
+- id: S-014
+  engine: spectral
+  engine_rule: camara-routes-description
+
+- id: S-015
+  engine: spectral
+  engine_rule: camara-schema-casing-convention
+
+- id: S-016
+  engine: spectral
+  engine_rule: camara-schema-type-check
+
+- id: S-017
+  engine: spectral
+  engine_rule: camara-security-no-secrets-in-path-or-query-parameters
+
+# ===== Built-in OAS rules (S-200+) =====
+
+- id: S-200
+  engine: spectral
+  engine_rule: duplicated-entry-in-enum
+
+- id: S-201
+  engine: spectral
+  engine_rule: info-description
+
+- id: S-202
+  engine: spectral
+  engine_rule: info-license
+
+- id: S-203
+  engine: spectral
+  engine_rule: license-url
+
+- id: S-204
+  engine: spectral
+  engine_rule: no-$ref-siblings
+
+- id: S-205
+  engine: spectral
+  engine_rule: no-eval-in-markdown
+
+- id: S-206
+  engine: spectral
+  engine_rule: no-script-tags-in-markdown
+
+- id: S-207
+  engine: spectral
+  engine_rule: oas3-api-servers
+
+- id: S-208
+  engine: spectral
+  engine_rule: oas3-examples-value-or-externalValue
+
+- id: S-209
+  engine: spectral
+  engine_rule: oas3-schema
+
+- id: S-210
+  engine: spectral
+  engine_rule: oas3-server-trailing-slash
+
+- id: S-211
+  engine: spectral
+  engine_rule: oas3-unused-component
+
+- id: S-212
+  engine: spectral
+  engine_rule: oas3-valid-media-example
+
+- id: S-213
+  engine: spectral
+  engine_rule: oas3-valid-schema-example
+
+- id: S-214
+  engine: spectral
+  engine_rule: openapi-tags-uniqueness
+
+- id: S-215
+  engine: spectral
+  engine_rule: operation-description
+
+- id: S-216
+  engine: spectral
+  engine_rule: operation-operationId
+
+- id: S-217
+  engine: spectral
+  engine_rule: operation-operationId-unique
+
+- id: S-218
+  engine: spectral
+  engine_rule: operation-operationId-valid-in-url
+
+- id: S-219
+  engine: spectral
+  engine_rule: operation-parameters
+
+- id: S-220
+  engine: spectral
+  engine_rule: operation-singular-tag
+
+- id: S-221
+  engine: spectral
+  engine_rule: operation-success-response
+
+- id: S-222
+  engine: spectral
+  engine_rule: operation-tag-defined
+
+- id: S-223
+  engine: spectral
+  engine_rule: operation-tags
+
+- id: S-224
+  engine: spectral
+  engine_rule: path-declarations-must-exist
+
+- id: S-225
+  engine: spectral
+  engine_rule: path-keys-no-trailing-slash
+
+- id: S-226
+  engine: spectral
+  engine_rule: path-not-include-query
+
+- id: S-227
+  engine: spectral
+  engine_rule: path-params
+
+- id: S-228
+  engine: spectral
+  engine_rule: typed-enum
diff --git a/validation/rules/yamllint-rules.yaml b/validation/rules/yamllint-rules.yaml
new file mode 100644
index 00000000..a02a6676
--- /dev/null
+++ b/validation/rules/yamllint-rules.yaml
@@ -0,0 +1,55 @@
+# yamllint rule metadata.
+# All entries are identity-only initially, can be extended if needed.
+# Pre-bundling YAML syntax and formatting checks.
+
+- id: Y-001
+  engine: yamllint
+  engine_rule: braces
+
+- id: Y-002
+  engine: yamllint
+  engine_rule: brackets
+
+- id: Y-003
+  engine: yamllint
+  engine_rule: colons
+
+- id: Y-004
+  engine: yamllint
+  engine_rule: commas
+
+- id: Y-005
+  engine: yamllint
+  engine_rule: comments
+
+- id: Y-006
+  engine: yamllint
+  engine_rule: comments-indentation
+
+- id: Y-007
+  engine: yamllint
+  engine_rule: empty-lines
+
+- id: Y-008
+  engine: yamllint
+  engine_rule: hyphens
+
+- id: Y-009
+  engine: yamllint
+  engine_rule: indentation
+
+- id: Y-010
+  engine: yamllint
+  engine_rule: key-duplicates
+
+- id: Y-011
+  engine: yamllint
+  engine_rule: new-line-at-end-of-file
+
+- id: Y-012
+  engine: yamllint
+  engine_rule: trailing-spaces
+
+- id: Y-013
+  engine: yamllint
+  engine_rule: truthy
diff --git a/validation/schemas/rule-metadata-schema.yaml b/validation/schemas/rule-metadata-schema.yaml
index d10ffeb3..afb58f0d 100644
--- a/validation/schemas/rule-metadata-schema.yaml
+++ b/validation/schemas/rule-metadata-schema.yaml
@@ -11,11 +11,8 @@ description: >
 type: object
 required:
   - id
-  - name
   - engine
   - engine_rule
-  - hint
-  - conditional_level
 
 properties:
   id:
@@ -28,7 +25,9 @@ properties:
   name:
     type: string
     pattern: "^[a-z][a-z0-9-]+$"
-    description: Human-readable kebab-case name.
+    description: >
+      Human-readable kebab-case name.  Optional — defaults to engine_rule
+      when omitted.
 
   engine:
     type: string
@@ -45,7 +44,9 @@ properties:
 
   hint:
     type: string
-    description: Actionable fix guidance shown to the developer.
+    description: >
+      Actionable fix guidance shown to the developer.  Optional — when
+      omitted, the engine's finding message is used instead.
 
   applicability:
     type: object
@@ -103,6 +104,9 @@ properties:
 
   conditional_level:
     type: object
+    description: >
+      Severity specification.  Optional — when omitted, the engine's
+      reported severity is preserved (identity mapping).
     required: [default]
     properties:
       default:
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index cc68dcc2..b7363c9c 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -90,6 +90,15 @@ def _write_rules(tmp_path: Path, rules: list[dict], filename: str = "spectral-ru
     )
 
 
+def _identity_rule(
+    id: str = "S-001",
+    engine: str = "spectral",
+    engine_rule: str = "some-rule",
+) -> dict:
+    """Build an identity-only rule: just id, engine, engine_rule."""
+    return {"id": id, "engine": engine, "engine_rule": engine_rule}
+
+
 def _minimal_rule(
     id: str = "S-001",
     engine: str = "spectral",
@@ -99,9 +108,9 @@ def _minimal_rule(
     applicability: dict | None = None,
     overrides: list[dict] | None = None,
 ) -> dict:
+    """Build a rule with conditional_level (full behavior)."""
     rule: dict = {
         "id": id,
-        "name": "test-rule",
         "engine": engine,
         "engine_rule": engine_rule,
         "hint": hint,
@@ -471,3 +480,49 @@ def test_result_summary_content(self, tmp_path: Path):
         assert "1 errors" in result.summary
         assert "1 warnings" in result.summary
         assert "1 hints" in result.summary
+
+    def test_identity_only_entry(self, tmp_path: Path):
+        """Identity-only entry assigns rule_id but keeps engine level."""
+        _write_rules(tmp_path, [_identity_rule(id="S-018")])
+        ctx = _make_context(profile="standard")
+        findings = [_make_finding(level="warn", message="Engine message")]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        assert len(result.findings) == 1
+        f = result.findings[0]
+        assert f["rule_id"] == "S-018"
+        assert f["level"] == "warn"  # engine level preserved
+        assert f["hint"] == "Engine message"  # falls back to message
+        assert f["blocks"] is False  # warn doesn't block in standard
+
+    def test_identity_entry_with_explicit_hint(self, tmp_path: Path):
+        """Identity entry with explicit hint uses the hint, not message."""
+        _write_rules(tmp_path, [{
+            "id": "S-018",
+            "engine": "spectral",
+            "engine_rule": "some-rule",
+            "hint": "Custom guidance.",
+        }])
+        ctx = _make_context()
+        findings = [_make_finding(message="Engine message")]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        assert result.findings[0]["hint"] == "Custom guidance."
+        assert result.findings[0]["rule_id"] == "S-018"
+
+    def test_mapped_rule_without_hint_falls_back(self, tmp_path: Path):
+        """Rule with conditional_level but no hint uses engine message."""
+        _write_rules(tmp_path, [{
+            "id": "S-001",
+            "engine": "spectral",
+            "engine_rule": "some-rule",
+            "conditional_level": {"default": "error"},
+        }])
+        ctx = _make_context()
+        findings = [_make_finding(message="Engine says fix this")]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        f = result.findings[0]
+        assert f["rule_id"] == "S-001"
+        assert f["level"] == "error"  # from conditional_level
+        assert f["hint"] == "Engine says fix this"  # fallback from message
diff --git a/validation/tests/test_postfilter_metadata.py b/validation/tests/test_postfilter_metadata.py
index 39b52fa8..481584df 100644
--- a/validation/tests/test_postfilter_metadata.py
+++ b/validation/tests/test_postfilter_metadata.py
@@ -24,8 +24,19 @@
 
 
 def _minimal_rule_dict(**overrides: object) -> dict:
-    """Build a minimal valid rule metadata dict."""
-    base = {
+    """Build a minimal valid rule metadata dict (only required fields)."""
+    base: dict = {
+        "id": "S-001",
+        "engine": "spectral",
+        "engine_rule": "camara-test-rule",
+    }
+    base.update(overrides)
+    return base
+
+
+def _full_rule_dict(**overrides: object) -> dict:
+    """Build a rule metadata dict with all optional fields populated."""
+    base: dict = {
         "id": "S-001",
         "name": "test-rule",
         "engine": "spectral",
@@ -47,18 +58,48 @@ def _write_yaml(path: Path, data: object) -> None:
 
 
 class TestParseRuleMetadata:
-    def test_minimal_valid(self):
+    def test_identity_only(self):
+        """Minimal entry: just id, engine, engine_rule."""
         raw = _minimal_rule_dict()
         rule = parse_rule_metadata(raw)
         assert rule.id == "S-001"
-        assert rule.name == "test-rule"
+        assert rule.name == "camara-test-rule"  # defaults to engine_rule
         assert rule.engine == "spectral"
         assert rule.engine_rule == "camara-test-rule"
-        assert rule.hint == "Fix this issue."
+        assert rule.hint == ""
         assert rule.applicability == {}
+        assert rule.conditional_level is None
+
+    def test_full_entry(self):
+        raw = _full_rule_dict()
+        rule = parse_rule_metadata(raw)
+        assert rule.id == "S-001"
+        assert rule.name == "test-rule"
+        assert rule.hint == "Fix this issue."
+        assert rule.conditional_level is not None
         assert rule.conditional_level.default == "warn"
         assert rule.conditional_level.overrides == ()
 
+    def test_name_defaults_to_engine_rule(self):
+        raw = _minimal_rule_dict()
+        rule = parse_rule_metadata(raw)
+        assert rule.name == "camara-test-rule"
+
+    def test_explicit_name_overrides_default(self):
+        raw = _minimal_rule_dict(name="custom-name")
+        rule = parse_rule_metadata(raw)
+        assert rule.name == "custom-name"
+
+    def test_hint_defaults_to_empty(self):
+        raw = _minimal_rule_dict()
+        rule = parse_rule_metadata(raw)
+        assert rule.hint == ""
+
+    def test_explicit_hint(self):
+        raw = _minimal_rule_dict(hint="Do this instead.")
+        rule = parse_rule_metadata(raw)
+        assert rule.hint == "Do this instead."
+
     def test_with_applicability(self):
         raw = _minimal_rule_dict(
             applicability={"branch_types": ["main", "release"]}
@@ -67,7 +108,7 @@ def test_with_applicability(self):
         assert rule.applicability == {"branch_types": ["main", "release"]}
 
     def test_with_overrides(self):
-        raw = _minimal_rule_dict(
+        raw = _full_rule_dict(
             conditional_level={
                 "default": "hint",
                 "overrides": [
@@ -92,13 +133,11 @@ def test_with_overrides(self):
         }
 
     def test_missing_required_field(self):
-        raw = _minimal_rule_dict()
-        del raw["hint"]
-        with pytest.raises(ValueError, match="hint"):
-            parse_rule_metadata(raw)
+        with pytest.raises(ValueError, match="engine_rule"):
+            parse_rule_metadata({"id": "S-001", "engine": "spectral"})
 
     def test_missing_multiple_fields(self):
-        with pytest.raises(ValueError, match="id.*name|name.*id"):
+        with pytest.raises(ValueError, match="id"):
             parse_rule_metadata({"engine": "spectral"})
 
     def test_conditional_level_missing_default(self):
@@ -112,7 +151,7 @@ def test_conditional_level_not_a_dict(self):
             parse_rule_metadata(raw)
 
     def test_override_with_empty_condition(self):
-        raw = _minimal_rule_dict(
+        raw = _full_rule_dict(
             conditional_level={
                 "default": "warn",
                 "overrides": [{"condition": {}, "level": "error"}],
@@ -122,7 +161,7 @@ def test_override_with_empty_condition(self):
         assert rule.conditional_level.overrides[0].condition == {}
 
     def test_non_dict_override_entries_skipped(self):
-        raw = _minimal_rule_dict(
+        raw = _full_rule_dict(
             conditional_level={
                 "default": "warn",
                 "overrides": ["invalid", {"condition": {}, "level": "hint"}],
@@ -183,8 +222,8 @@ def test_malformed_entry_skipped(self, tmp_path: Path):
             f,
             [
                 _minimal_rule_dict(id="S-001"),
-                {"id": "S-002"},  # missing required fields
-                _minimal_rule_dict(id="S-003"),
+                {"name": "no-id"},  # missing required fields
+                _minimal_rule_dict(id="S-003", engine_rule="rule-b"),
             ],
         )
         rules = load_rules_from_file(f)
@@ -227,7 +266,10 @@ def test_nonexistent_directory(self, tmp_path: Path):
 
     def test_ignores_non_matching_files(self, tmp_path: Path):
         _write_yaml(tmp_path / "spectral-rules.yaml", [_minimal_rule_dict()])
-        _write_yaml(tmp_path / "README.yaml", [_minimal_rule_dict(id="X-001")])
+        _write_yaml(
+            tmp_path / "README.yaml",
+            [_minimal_rule_dict(id="X-001", engine_rule="other")],
+        )
         rules = load_all_rules(tmp_path)
         assert len(rules) == 1
         assert rules[0].id == "S-001"
@@ -254,14 +296,9 @@ def test_normal_case(self):
         assert index[("spectral", "rule-a")].id == "S-001"
 
     def test_duplicate_key_first_wins(self):
-        rules = [
-            parse_rule_metadata(
-                _minimal_rule_dict(id="S-001", engine="spectral", engine_rule="rule-a")
-            ),
-            parse_rule_metadata(
-                _minimal_rule_dict(id="S-099", engine="spectral", engine_rule="rule-a")
-            ),
-        ]
+        r1 = _minimal_rule_dict(id="S-001", engine="spectral", engine_rule="rule-a")
+        r2 = _minimal_rule_dict(id="S-099", engine="spectral", engine_rule="rule-a")
+        rules = [parse_rule_metadata(r1), parse_rule_metadata(r2)]
         index = build_lookup_index(rules)
         assert index[("spectral", "rule-a")].id == "S-001"
         assert len(index) == 1
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
new file mode 100644
index 00000000..cababc41
--- /dev/null
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -0,0 +1,287 @@
+"""Integration tests for rule metadata files.
+
+Loads the real rule metadata YAML files from ``validation/rules/`` and
+verifies structural integrity, completeness, and consistency with the
+engine configurations.
+"""
+
+from __future__ import annotations
+
+import json
+import re
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.postfilter.metadata_loader import (
+    build_lookup_index,
+    load_all_rules,
+    parse_rule_metadata,
+)
+
+# ---------------------------------------------------------------------------
+# Paths
+# ---------------------------------------------------------------------------
+
+_REPO_ROOT = Path(__file__).resolve().parent.parent.parent
+_RULES_DIR = _REPO_ROOT / "validation" / "rules"
+_LINTING_DIR = _REPO_ROOT / "linting" / "config"
+
+_SPECTRAL_CONFIG = _LINTING_DIR / ".spectral.yaml"
+_GHERKIN_CONFIG = _LINTING_DIR / ".gherkin-lintrc"
+_YAMLLINT_CONFIG = _LINTING_DIR / ".yamllint.yaml"
+
+# Rule ID pattern from schema
+_ID_PATTERN = re.compile(r"^[A-Z]-[0-9]{3}$")
+
+# Engine prefix mapping
+_ENGINE_PREFIX = {
+    "spectral": "S",
+    "gherkin": "G",
+    "python": "P",
+    "yamllint": "Y",
+    "manual": "M",
+}
+
+
+# ---------------------------------------------------------------------------
+# Fixture: load all rules once
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture(scope="module")
+def all_rules():
+    return load_all_rules(_RULES_DIR)
+
+
+@pytest.fixture(scope="module")
+def rule_index(all_rules):
+    return build_lookup_index(all_rules)
+
+
+# ---------------------------------------------------------------------------
+# Structural integrity
+# ---------------------------------------------------------------------------
+
+
+class TestStructuralIntegrity:
+    """Verify all rule files load without error and have valid structure."""
+
+    def test_rules_load_successfully(self, all_rules):
+        assert len(all_rules) > 0, "No rules loaded from validation/rules/"
+
+    def test_expected_rule_counts(self, all_rules):
+        counts = {}
+        for r in all_rules:
+            counts[r.engine] = counts.get(r.engine, 0) + 1
+        assert counts["python"] == 12
+        assert counts["spectral"] == 46
+        assert counts["gherkin"] == 25
+        assert counts["yamllint"] == 13
+
+    def test_no_duplicate_keys(self, all_rules):
+        """No duplicate (engine, engine_rule) pairs."""
+        seen: set[tuple[str, str]] = set()
+        duplicates = []
+        for r in all_rules:
+            key = (r.engine, r.engine_rule)
+            if key in seen:
+                duplicates.append(f"{r.id}: ({r.engine}, {r.engine_rule})")
+            seen.add(key)
+        assert not duplicates, f"Duplicate keys: {duplicates}"
+
+    def test_no_duplicate_ids(self, all_rules):
+        seen: set[str] = set()
+        duplicates = []
+        for r in all_rules:
+            if r.id in seen:
+                duplicates.append(r.id)
+            seen.add(r.id)
+        assert not duplicates, f"Duplicate IDs: {duplicates}"
+
+    def test_ids_match_pattern(self, all_rules):
+        bad = [r.id for r in all_rules if not _ID_PATTERN.match(r.id)]
+        assert not bad, f"IDs not matching ^[A-Z]-[0-9]{{3}}$: {bad}"
+
+    def test_ids_use_correct_engine_prefix(self, all_rules):
+        bad = []
+        for r in all_rules:
+            expected_prefix = _ENGINE_PREFIX.get(r.engine)
+            if expected_prefix and not r.id.startswith(expected_prefix + "-"):
+                bad.append(f"{r.id} (engine={r.engine}, expected {expected_prefix}-)")
+            seen_prefix = r.id.split("-")[0]
+            if seen_prefix not in _ENGINE_PREFIX.values():
+                bad.append(f"{r.id} (unknown prefix {seen_prefix})")
+        assert not bad, f"ID/engine prefix mismatches: {bad}"
+
+    def test_ids_sequential_within_ranges(self, all_rules):
+        """IDs should be sequential within contiguous ranges.
+
+        Spectral uses S-001–S-017 (CAMARA custom) and S-100+ (built-in OAS)
+        with a reserved gap between them.  Other engines are contiguous.
+        """
+        by_prefix: dict[str, list[int]] = {}
+        for r in all_rules:
+            prefix, num_str = r.id.split("-")
+            by_prefix.setdefault(prefix, []).append(int(num_str))
+
+        for prefix, nums in by_prefix.items():
+            nums_sorted = sorted(nums)
+            # Split into contiguous ranges
+            ranges: list[list[int]] = [[nums_sorted[0]]]
+            for n in nums_sorted[1:]:
+                if n == ranges[-1][-1] + 1:
+                    ranges[-1].append(n)
+                else:
+                    ranges.append([n])
+            # Each range must be contiguous (already guaranteed by construction)
+            # but verify no gaps within a range
+            for rng in ranges:
+                expected = list(range(rng[0], rng[0] + len(rng)))
+                assert rng == expected, (
+                    f"Gap in {prefix}- range starting at {rng[0]}: "
+                    f"got {rng}, expected {expected}"
+                )
+
+
+# ---------------------------------------------------------------------------
+# Engine coverage
+# ---------------------------------------------------------------------------
+
+
+class TestEngineCoverage:
+    """Verify rule metadata covers all enabled engine rules."""
+
+    def _get_spectral_enabled_rules(self) -> set[str]:
+        """Extract enabled rules from .spectral.yaml."""
+        if not _SPECTRAL_CONFIG.is_file():
+            pytest.skip("Spectral config not found")
+        data = yaml.safe_load(_SPECTRAL_CONFIG.read_text(encoding="utf-8"))
+        rules = data.get("rules", {})
+        enabled = set()
+        for name, value in rules.items():
+            # false = disabled, true/severity/dict = enabled
+            if value is False:
+                continue
+            # Custom rules with recommended: false are still defined
+            if isinstance(value, dict) and value.get("recommended") is False:
+                # Still a defined rule — include it
+                pass
+            enabled.add(name)
+        return enabled
+
+    def _get_gherkin_enabled_rules(self) -> set[str]:
+        """Extract enabled rules from .gherkin-lintrc."""
+        if not _GHERKIN_CONFIG.is_file():
+            pytest.skip("Gherkin config not found")
+        # gherkin-lintrc may have comments (non-standard JSON)
+        text = _GHERKIN_CONFIG.read_text(encoding="utf-8")
+        # Strip // comments for JSON parsing
+        lines = []
+        for line in text.splitlines():
+            stripped = line.lstrip()
+            if stripped.startswith("//"):
+                continue
+            # Remove inline // comments (but not inside strings)
+            if "//" in line:
+                # Simple heuristic: remove from // to end of line
+                idx = line.index("//")
+                line = line[:idx]
+            lines.append(line)
+        data = json.loads("\n".join(lines))
+        enabled = set()
+        for name, value in data.items():
+            if value == "off":
+                continue
+            if isinstance(value, list) and value[0] == "off":
+                continue
+            enabled.add(name)
+        return enabled
+
+    def _get_yamllint_enabled_rules(self) -> set[str]:
+        """Extract enabled rules from .yamllint.yaml."""
+        if not _YAMLLINT_CONFIG.is_file():
+            pytest.skip("yamllint config not found")
+        data = yaml.safe_load(_YAMLLINT_CONFIG.read_text(encoding="utf-8"))
+        rules = data.get("rules", {})
+        enabled = set()
+        for name, value in rules.items():
+            if value == "disable" or value is False:
+                continue
+            enabled.add(name)
+        return enabled
+
+    def _get_python_check_names(self) -> set[str]:
+        """Get all registered Python check names."""
+        from validation.engines.python_checks import CHECKS
+
+        return {c.name for c in CHECKS}
+
+    def test_spectral_coverage(self, rule_index):
+        """Every enabled Spectral rule has a metadata entry."""
+        enabled = self._get_spectral_enabled_rules()
+        indexed = {er for (eng, er) in rule_index if eng == "spectral"}
+        missing = enabled - indexed
+        assert not missing, (
+            f"Spectral rules without metadata: {sorted(missing)}"
+        )
+
+    def test_gherkin_coverage(self, rule_index):
+        """Every enabled gherkin-lint rule has a metadata entry."""
+        enabled = self._get_gherkin_enabled_rules()
+        indexed = {er for (eng, er) in rule_index if eng == "gherkin"}
+        missing = enabled - indexed
+        assert not missing, (
+            f"Gherkin rules without metadata: {sorted(missing)}"
+        )
+
+    def test_yamllint_coverage(self, rule_index):
+        """Every enabled yamllint rule has a metadata entry."""
+        enabled = self._get_yamllint_enabled_rules()
+        indexed = {er for (eng, er) in rule_index if eng == "yamllint"}
+        missing = enabled - indexed
+        assert not missing, (
+            f"yamllint rules without metadata: {sorted(missing)}"
+        )
+
+    def test_python_coverage(self, rule_index):
+        """Every registered Python check has a metadata entry."""
+        checks = self._get_python_check_names()
+        indexed = {er for (eng, er) in rule_index if eng == "python"}
+        missing = checks - indexed
+        assert not missing, (
+            f"Python checks without metadata: {sorted(missing)}"
+        )
+
+
+# ---------------------------------------------------------------------------
+# Metadata quality
+# ---------------------------------------------------------------------------
+
+
+class TestMetadataQuality:
+    """Verify metadata entries that SHOULD have certain fields do."""
+
+    def test_python_rules_have_conditional_level(self, all_rules):
+        """Python checks have context-dependent behavior; all need levels."""
+        missing = [
+            r.id
+            for r in all_rules
+            if r.engine == "python" and r.conditional_level is None
+        ]
+        assert not missing, f"Python rules without conditional_level: {missing}"
+
+    def test_hints_are_exception_not_norm(self, all_rules):
+        """Hints are rare overrides — engine messages serve as default guidance.
+
+        Engine messages are the primary fix guidance (design doc 8.4.1).
+        Explicit hints should only exist when the engine message is
+        insufficient.  This test documents the current state; update
+        the count when hints are added in WS07.
+        """
+        with_hints = [r.id for r in all_rules if r.hint]
+        assert len(with_hints) == 0, (
+            f"Expected 0 explicit hints (update test if adding hints): "
+            f"{with_hints}"
+        )

From abf269e03bf4669a12fae64396a27da35763ca58 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 16:57:07 +0100
Subject: [PATCH 010/157] refactor(validation): split hint into
 message_override and hint (DEC-018)

Separate two use cases previously conflated in a single hint field:
- message_override: replaces engine message entirely (rare)
- hint: additional fix guidance alongside engine message (common)

Both fields are optional; when neither is set the engine message is
preserved and no hint is added.  Zero rules currently use either field,
so this is a non-breaking schema evolution.
---
 validation/postfilter/engine.py               | 26 ++++----
 validation/postfilter/metadata_loader.py      | 11 +++-
 validation/rules/python-rules.yaml            |  3 +-
 validation/rules/spectral-rules.yaml          |  2 +-
 validation/schemas/findings-schema.yaml       |  4 +-
 validation/schemas/rule-metadata-schema.yaml  | 12 +++-
 validation/tests/test_postfilter_engine.py    | 62 ++++++++++++++++---
 validation/tests/test_postfilter_levels.py    |  1 +
 validation/tests/test_postfilter_metadata.py  | 23 ++++++-
 .../tests/test_rule_metadata_integrity.py     | 14 +++--
 10 files changed, 121 insertions(+), 37 deletions(-)

diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
index f62f9ead..66dc0339 100644
--- a/validation/postfilter/engine.py
+++ b/validation/postfilter/engine.py
@@ -36,7 +36,7 @@ class PostFilterResult:
     """Result of post-filter processing.
 
     Attributes:
-        findings: Processed findings with resolved level, hint, blocks.
+        findings: Processed findings with resolved level, optional hint, blocks.
         result: Overall verdict — ``"pass"``, ``"fail"``, or ``"error"``.
         summary: Human-readable one-line summary.
     """
@@ -87,22 +87,25 @@ def _enrich_finding(
 
     When *resolved_level* is ``None`` (identity-only entry without
     ``conditional_level``), the engine's original level is preserved.
-    When *rule.hint* is empty, the engine's message is used instead.
+    When *message_override* is set, the finding's message is replaced.
+    When *hint* is set, it is added to the finding as additional guidance.
+    When neither is set, the engine's original message is preserved and
+    no hint is added.
     """
     enriched = dict(finding)
     enriched["rule_id"] = rule.id
     if resolved_level is not None:
         enriched["level"] = resolved_level
-    enriched["hint"] = rule.hint or finding.get("message", "")
+    if rule.message_override is not None:
+        enriched["message"] = rule.message_override
+    if rule.hint is not None:
+        enriched["hint"] = rule.hint
     return enriched
 
 
 def _passthrough_finding(finding: dict) -> dict:
-    """Create a pass-through copy: keep engine level, hint = message."""
-    enriched = dict(finding)
-    if "hint" not in enriched or enriched.get("hint") is None:
-        enriched["hint"] = finding.get("message", "")
-    return enriched
+    """Create a pass-through copy: keep engine level, no hint added."""
+    return dict(finding)
 
 
 def compute_overall_result(
@@ -174,9 +177,10 @@ def run_post_filter(
     2. Look up ``(engine, engine_rule)`` in the metadata index.
     3. **Mapped rule**: evaluate applicability (remove if not applicable),
        resolve conditional level (remove if ``"off"``), enrich with
-       ``rule_id``, ``hint``, and adjusted ``level``.
-    4. **Unmapped rule** (pass-through): keep engine severity, set
-       ``hint = message``.
+       ``rule_id``, optional ``message_override``/``hint``, and
+       adjusted ``level``.
+    4. **Unmapped rule** (pass-through): keep engine severity and
+       message, no hint added.
     5. Apply profile blocking to all surviving findings.
     6. Compute overall result.
 
diff --git a/validation/postfilter/metadata_loader.py b/validation/postfilter/metadata_loader.py
index 18ab8ee2..297718fa 100644
--- a/validation/postfilter/metadata_loader.py
+++ b/validation/postfilter/metadata_loader.py
@@ -69,7 +69,10 @@ class RuleMetadata:
         name: Human-readable kebab-case name.  Defaults to ``engine_rule``.
         engine: Engine responsible for producing the finding.
         engine_rule: Native rule identifier within the engine.
-        hint: Actionable fix guidance.  Empty string means "use engine message".
+        message_override: Replaces the engine's finding message.  ``None``
+            means keep the engine message.
+        hint: Additional fix guidance shown alongside the message.  ``None``
+            means no extra hint.
         applicability: Condition dict — omitted fields are unconstrained.
         conditional_level: Severity specification, or ``None`` to preserve
             engine-reported severity (identity mapping).
@@ -79,7 +82,8 @@ class RuleMetadata:
     name: str
     engine: str
     engine_rule: str
-    hint: str
+    message_override: Optional[str]
+    hint: Optional[str]
     applicability: dict
     conditional_level: Optional[ConditionalLevel]
 
@@ -142,7 +146,8 @@ def parse_rule_metadata(raw: dict) -> RuleMetadata:
         name=raw.get("name", raw["engine_rule"]),
         engine=raw["engine"],
         engine_rule=raw["engine_rule"],
-        hint=raw.get("hint", ""),
+        message_override=raw.get("message_override"),
+        hint=raw.get("hint"),
         applicability=raw.get("applicability", {}),
         conditional_level=conditional_level,
     )
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 2451df7f..b87b698c 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -2,7 +2,8 @@
 # These checks have context-dependent behavior: conditional levels based on
 # branch type, API maturity, and release context; applicability conditions
 # for release-review and release-plan-changed contexts.
-# Engine messages from make_finding() serve as hints (design doc section 8.4.1).
+# Engine messages from make_finding() are preserved by default (design doc 8.4.1).
+# message_override and hint can be added per DEC-018 when engine messages are insufficient.
 
 # P-001: check-filename-kebab-case
 - id: P-001
diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml
index a7b7c612..d029dae6 100644
--- a/validation/rules/spectral-rules.yaml
+++ b/validation/rules/spectral-rules.yaml
@@ -2,7 +2,7 @@
 # S-001 through S-199: reserved for CAMARA custom rules
 # S-200 through S-xxx: Built-in OAS rules
 # All entries are identity-only initially, can be extended if needed.
-# Hints default to the Spectral engine message; can be overridden in WS07.
+# Engine messages are preserved by default; message_override and hint can be added per DEC-018.
 
 # ===== CAMARA custom rules (S-001+) =====
 
diff --git a/validation/schemas/findings-schema.yaml b/validation/schemas/findings-schema.yaml
index 74201f63..c6352b1c 100644
--- a/validation/schemas/findings-schema.yaml
+++ b/validation/schemas/findings-schema.yaml
@@ -77,5 +77,5 @@ properties:
   hint:
     type: string
     description: >
-      Actionable fix guidance.  From rule metadata if available, otherwise
-      falls back to the engine message.
+      Actionable fix guidance from rule metadata.  Only present when
+      the rule metadata explicitly provides a hint.
diff --git a/validation/schemas/rule-metadata-schema.yaml b/validation/schemas/rule-metadata-schema.yaml
index afb58f0d..ae8c79c1 100644
--- a/validation/schemas/rule-metadata-schema.yaml
+++ b/validation/schemas/rule-metadata-schema.yaml
@@ -42,11 +42,19 @@ properties:
       Native rule identifier within the engine.  For Spectral this is the
       rule name in .spectral.yaml; for Python checks the function name.
 
+  message_override:
+    type: string
+    description: >
+      Replaces the engine's finding message entirely.  Use when the
+      engine message is misleading or unhelpful.  Optional — when
+      omitted, the engine message is preserved.
+
   hint:
     type: string
     description: >
-      Actionable fix guidance shown to the developer.  Optional — when
-      omitted, the engine's finding message is used instead.
+      Additional fix guidance rendered alongside the finding message.
+      Optional — only set when there is "how to fix" guidance beyond
+      what the message already says.
 
   applicability:
     type: object
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index b7363c9c..98978513 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -103,7 +103,8 @@ def _minimal_rule(
     id: str = "S-001",
     engine: str = "spectral",
     engine_rule: str = "some-rule",
-    hint: str = "Fix this.",
+    message_override: str | None = None,
+    hint: str | None = None,
     default_level: str = "warn",
     applicability: dict | None = None,
     overrides: list[dict] | None = None,
@@ -113,9 +114,12 @@ def _minimal_rule(
         "id": id,
         "engine": engine,
         "engine_rule": engine_rule,
-        "hint": hint,
         "conditional_level": {"default": default_level},
     }
+    if message_override is not None:
+        rule["message_override"] = message_override
+    if hint is not None:
+        rule["hint"] = hint
     if applicability:
         rule["applicability"] = applicability
     if overrides:
@@ -226,7 +230,8 @@ def test_unmapped_rule_passthrough(self, tmp_path: Path):
         assert len(result.findings) == 1
         f = result.findings[0]
         assert f["level"] == "warn"
-        assert f["hint"] == "Use kebab-case"
+        assert f["message"] == "Use kebab-case"
+        assert "hint" not in f
         assert f["blocks"] is False
         assert "rule_id" not in f  # unmapped → no rule_id
 
@@ -238,7 +243,7 @@ def test_unmapped_error_blocks_in_standard(self, tmp_path: Path):
         assert result.findings[0]["blocks"] is True
 
     def test_mapped_rule_enrichment(self, tmp_path: Path):
-        """Mapped rules get rule_id, hint, and resolved level."""
+        """Mapped rules get rule_id, optional hint, and resolved level."""
         _write_rules(tmp_path, [
             _minimal_rule(
                 id="S-001",
@@ -248,12 +253,13 @@ def test_mapped_rule_enrichment(self, tmp_path: Path):
             )
         ])
         ctx = _make_context(profile="standard")
-        findings = [_make_finding(level="warn")]  # engine reports warn
+        findings = [_make_finding(level="warn", message="Original msg")]
         result = run_post_filter(findings, ctx, tmp_path)
 
         assert len(result.findings) == 1
         f = result.findings[0]
         assert f["rule_id"] == "S-001"
+        assert f["message"] == "Original msg"  # engine message preserved
         assert f["hint"] == "Do this instead."
         assert f["level"] == "error"  # remapped from warn to error by metadata
         assert f["blocks"] is True
@@ -492,7 +498,8 @@ def test_identity_only_entry(self, tmp_path: Path):
         f = result.findings[0]
         assert f["rule_id"] == "S-018"
         assert f["level"] == "warn"  # engine level preserved
-        assert f["hint"] == "Engine message"  # falls back to message
+        assert f["message"] == "Engine message"
+        assert "hint" not in f  # no hint in metadata → no hint in output
         assert f["blocks"] is False  # warn doesn't block in standard
 
     def test_identity_entry_with_explicit_hint(self, tmp_path: Path):
@@ -510,8 +517,8 @@ def test_identity_entry_with_explicit_hint(self, tmp_path: Path):
         assert result.findings[0]["hint"] == "Custom guidance."
         assert result.findings[0]["rule_id"] == "S-018"
 
-    def test_mapped_rule_without_hint_falls_back(self, tmp_path: Path):
-        """Rule with conditional_level but no hint uses engine message."""
+    def test_mapped_rule_without_hint_preserves_message(self, tmp_path: Path):
+        """Rule without hint/message_override preserves engine message."""
         _write_rules(tmp_path, [{
             "id": "S-001",
             "engine": "spectral",
@@ -525,4 +532,41 @@ def test_mapped_rule_without_hint_falls_back(self, tmp_path: Path):
         f = result.findings[0]
         assert f["rule_id"] == "S-001"
         assert f["level"] == "error"  # from conditional_level
-        assert f["hint"] == "Engine says fix this"  # fallback from message
+        assert f["message"] == "Engine says fix this"  # preserved
+        assert "hint" not in f  # no hint in metadata → no hint in output
+
+    def test_message_override_replaces_message(self, tmp_path: Path):
+        """message_override replaces the engine message entirely."""
+        _write_rules(tmp_path, [{
+            "id": "S-001",
+            "engine": "spectral",
+            "engine_rule": "some-rule",
+            "message_override": "Better description.",
+            "conditional_level": {"default": "error"},
+        }])
+        ctx = _make_context()
+        findings = [_make_finding(message="Original engine message")]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        f = result.findings[0]
+        assert f["rule_id"] == "S-001"
+        assert f["message"] == "Better description."
+        assert "hint" not in f
+
+    def test_message_override_with_hint(self, tmp_path: Path):
+        """Both message_override and hint can be set together."""
+        _write_rules(tmp_path, [{
+            "id": "S-001",
+            "engine": "spectral",
+            "engine_rule": "some-rule",
+            "message_override": "Better description.",
+            "hint": "Fix by doing X.",
+            "conditional_level": {"default": "error"},
+        }])
+        ctx = _make_context()
+        findings = [_make_finding(message="Original engine message")]
+        result = run_post_filter(findings, ctx, tmp_path)
+
+        f = result.findings[0]
+        assert f["message"] == "Better description."
+        assert f["hint"] == "Fix by doing X."
diff --git a/validation/tests/test_postfilter_levels.py b/validation/tests/test_postfilter_levels.py
index 7db30b04..8fb923a1 100644
--- a/validation/tests/test_postfilter_levels.py
+++ b/validation/tests/test_postfilter_levels.py
@@ -74,6 +74,7 @@ def _make_rule(
         name="test-rule",
         engine="spectral",
         engine_rule="test-rule",
+        message_override=None,
         hint="Fix it.",
         applicability={},
         conditional_level=ConditionalLevel(
diff --git a/validation/tests/test_postfilter_metadata.py b/validation/tests/test_postfilter_metadata.py
index 481584df..96a2799d 100644
--- a/validation/tests/test_postfilter_metadata.py
+++ b/validation/tests/test_postfilter_metadata.py
@@ -41,6 +41,7 @@ def _full_rule_dict(**overrides: object) -> dict:
         "name": "test-rule",
         "engine": "spectral",
         "engine_rule": "camara-test-rule",
+        "message_override": "Overridden message.",
         "hint": "Fix this issue.",
         "conditional_level": {"default": "warn"},
     }
@@ -66,7 +67,8 @@ def test_identity_only(self):
         assert rule.name == "camara-test-rule"  # defaults to engine_rule
         assert rule.engine == "spectral"
         assert rule.engine_rule == "camara-test-rule"
-        assert rule.hint == ""
+        assert rule.message_override is None
+        assert rule.hint is None
         assert rule.applicability == {}
         assert rule.conditional_level is None
 
@@ -75,6 +77,7 @@ def test_full_entry(self):
         rule = parse_rule_metadata(raw)
         assert rule.id == "S-001"
         assert rule.name == "test-rule"
+        assert rule.message_override == "Overridden message."
         assert rule.hint == "Fix this issue."
         assert rule.conditional_level is not None
         assert rule.conditional_level.default == "warn"
@@ -90,16 +93,30 @@ def test_explicit_name_overrides_default(self):
         rule = parse_rule_metadata(raw)
         assert rule.name == "custom-name"
 
-    def test_hint_defaults_to_empty(self):
+    def test_optional_fields_default_to_none(self):
         raw = _minimal_rule_dict()
         rule = parse_rule_metadata(raw)
-        assert rule.hint == ""
+        assert rule.message_override is None
+        assert rule.hint is None
+
+    def test_explicit_message_override(self):
+        raw = _minimal_rule_dict(message_override="Better message.")
+        rule = parse_rule_metadata(raw)
+        assert rule.message_override == "Better message."
 
     def test_explicit_hint(self):
         raw = _minimal_rule_dict(hint="Do this instead.")
         rule = parse_rule_metadata(raw)
         assert rule.hint == "Do this instead."
 
+    def test_both_message_override_and_hint(self):
+        raw = _minimal_rule_dict(
+            message_override="Overridden.", hint="Fix guidance."
+        )
+        rule = parse_rule_metadata(raw)
+        assert rule.message_override == "Overridden."
+        assert rule.hint == "Fix guidance."
+
     def test_with_applicability(self):
         raw = _minimal_rule_dict(
             applicability={"branch_types": ["main", "release"]}
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index cababc41..9e76b174 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -273,15 +273,19 @@ def test_python_rules_have_conditional_level(self, all_rules):
         assert not missing, f"Python rules without conditional_level: {missing}"
 
     def test_hints_are_exception_not_norm(self, all_rules):
-        """Hints are rare overrides — engine messages serve as default guidance.
+        """Hints and message overrides are rare — engine messages are primary.
 
         Engine messages are the primary fix guidance (design doc 8.4.1).
-        Explicit hints should only exist when the engine message is
-        insufficient.  This test documents the current state; update
-        the count when hints are added in WS07.
+        Explicit hints and message overrides should only exist when the
+        engine message is insufficient.  Update counts when adding in WS07.
         """
-        with_hints = [r.id for r in all_rules if r.hint]
+        with_hints = [r.id for r in all_rules if r.hint is not None]
+        with_overrides = [r.id for r in all_rules if r.message_override is not None]
         assert len(with_hints) == 0, (
             f"Expected 0 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
+        assert len(with_overrides) == 0, (
+            f"Expected 0 message overrides (update test if adding overrides): "
+            f"{with_overrides}"
+        )

From 478825f03472bd27d0abc5848808541eeeb313f5 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 20:44:25 +0100
Subject: [PATCH 011/157] feat(validation): add output pipeline for all
 validation surfaces
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Implements the output pipeline that formats post-filter findings for:
- Workflow summary ($GITHUB_STEP_SUMMARY) with 900KB truncation
- Check annotations (::error/::warning/::notice workflow commands, 50 limit)
- PR comment (concise, marker-based create-or-update)
- Commit status payload (CAMARA Validation context)
- Diagnostic artifacts (full findings JSON, context, summary, engine reports)

All modules are pure Python generators — no GitHub API calls. The workflow
step (caller workflow, future WP) handles posting via actions/github-script.

6 modules + 6 test files, 94 new tests (526 total, all passing).
---
 validation/output/__init__.py                 |  11 +
 validation/output/annotations.py              | 142 ++++++++
 validation/output/commit_status.py            |  86 +++++
 validation/output/diagnostics.py              | 102 ++++++
 validation/output/formatting.py               | 133 +++++++
 validation/output/pr_comment.py               |  73 ++++
 validation/output/workflow_summary.py         | 308 ++++++++++++++++
 validation/tests/test_output_annotations.py   | 194 ++++++++++
 validation/tests/test_output_commit_status.py | 112 ++++++
 validation/tests/test_output_diagnostics.py   | 143 ++++++++
 validation/tests/test_output_formatting.py    | 200 +++++++++++
 validation/tests/test_output_pr_comment.py    | 121 +++++++
 .../tests/test_output_workflow_summary.py     | 337 ++++++++++++++++++
 13 files changed, 1962 insertions(+)
 create mode 100644 validation/output/annotations.py
 create mode 100644 validation/output/commit_status.py
 create mode 100644 validation/output/diagnostics.py
 create mode 100644 validation/output/formatting.py
 create mode 100644 validation/output/pr_comment.py
 create mode 100644 validation/output/workflow_summary.py
 create mode 100644 validation/tests/test_output_annotations.py
 create mode 100644 validation/tests/test_output_commit_status.py
 create mode 100644 validation/tests/test_output_diagnostics.py
 create mode 100644 validation/tests/test_output_formatting.py
 create mode 100644 validation/tests/test_output_pr_comment.py
 create mode 100644 validation/tests/test_output_workflow_summary.py

diff --git a/validation/output/__init__.py b/validation/output/__init__.py
index 2068a35f..9340b496 100644
--- a/validation/output/__init__.py
+++ b/validation/output/__init__.py
@@ -1,3 +1,14 @@
 # Output pipeline.
 # Formats findings for workflow summary, check annotations, PR comments,
 # commit status, and diagnostic artifacts.
+
+from .annotations import AnnotationResult, generate_annotations  # noqa: F401
+from .commit_status import (  # noqa: F401
+    CommitStatusPayload,
+    generate_commit_status,
+)
+from .diagnostics import write_diagnostics  # noqa: F401
+from .formatting import FindingCounts, count_findings, sort_findings_by_priority  # noqa: F401
+from .pr_comment import MARKER as PR_COMMENT_MARKER  # noqa: F401
+from .pr_comment import generate_pr_comment  # noqa: F401
+from .workflow_summary import SummaryResult, generate_workflow_summary  # noqa: F401
diff --git a/validation/output/annotations.py b/validation/output/annotations.py
new file mode 100644
index 00000000..8a558279
--- /dev/null
+++ b/validation/output/annotations.py
@@ -0,0 +1,142 @@
+"""Check annotation generation via GitHub Actions workflow commands.
+
+Produces ``::error``, ``::warning``, and ``::notice`` command strings
+that GitHub Actions renders as file-pinned annotations in the PR diff.
+
+Design doc references:
+  - Section 9.3: check run annotations (50 per step limit, priority ordering)
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from typing import List
+
+from validation.postfilter.engine import PostFilterResult
+
+from .formatting import format_rule_label, sort_findings_by_priority
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Constants
+# ---------------------------------------------------------------------------
+
+ANNOTATION_LIMIT = 50
+
+_LEVEL_TO_COMMAND = {
+    "error": "error",
+    "warn": "warning",
+    "hint": "notice",
+}
+
+# ---------------------------------------------------------------------------
+# Result type
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class AnnotationResult:
+    """Result of annotation generation.
+
+    Attributes:
+        commands: Workflow command strings ready to print to stdout.
+        total_findings: Total number of findings before truncation.
+        annotations_emitted: Number of annotations actually emitted.
+        truncated: Whether findings were truncated to the limit.
+    """
+
+    commands: List[str]
+    total_findings: int
+    annotations_emitted: int
+    truncated: bool
+
+
+# ---------------------------------------------------------------------------
+# Internal helpers
+# ---------------------------------------------------------------------------
+
+
+def _sanitize_message(text: str) -> str:
+    """Sanitize a message for use in a workflow command.
+
+    Workflow commands use ``::`` as delimiters and newlines as
+    terminators.  Both must be escaped in the message body.
+    """
+    text = text.replace("\r\n", " ").replace("\r", " ").replace("\n", " ")
+    # Percent-encode the characters that GitHub Actions interprets specially
+    # in workflow command data: %, \r, \n, and :
+    # Using the documented encoding: https://github.com/actions/toolkit
+    text = text.replace("%", "%25")
+    text = text.replace(":", "%3A")
+    return text
+
+
+def _build_command(finding: dict) -> str:
+    """Build a single ``::error``/``::warning``/``::notice`` command."""
+    level = finding.get("level", "hint")
+    command = _LEVEL_TO_COMMAND.get(level, "notice")
+
+    # Location parameters
+    path = finding.get("path", "")
+    line = finding.get("line", 1)
+    col = finding.get("column")
+
+    title = format_rule_label(finding)
+
+    # Message: main message + optional hint
+    message = finding.get("message", "")
+    hint = finding.get("hint")
+    if hint:
+        message = f"{message} | Hint: {hint}"
+
+    # Build parameter string
+    params = f"file={path},line={line}"
+    if col is not None:
+        params += f",col={col}"
+    params += f",title={_sanitize_message(title)}"
+
+    return f"::{command} {params}::{_sanitize_message(message)}"
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+
+def generate_annotations(
+    post_filter_result: PostFilterResult,
+) -> AnnotationResult:
+    """Generate workflow command annotation strings from findings.
+
+    Findings are sorted by priority (errors first, then warnings, then
+    hints) and truncated to :data:`ANNOTATION_LIMIT`.
+
+    Args:
+        post_filter_result: Output of the post-filter engine.
+
+    Returns:
+        :class:`AnnotationResult` with workflow command strings.
+    """
+    sorted_findings = sort_findings_by_priority(post_filter_result.findings)
+    total = len(sorted_findings)
+
+    selected = sorted_findings[:ANNOTATION_LIMIT]
+    commands = [_build_command(f) for f in selected]
+    emitted = len(commands)
+    truncated = total > ANNOTATION_LIMIT
+
+    if truncated:
+        logger.info(
+            "Annotation limit reached: showing %d of %d findings",
+            emitted,
+            total,
+        )
+
+    return AnnotationResult(
+        commands=commands,
+        total_findings=total,
+        annotations_emitted=emitted,
+        truncated=truncated,
+    )
diff --git a/validation/output/commit_status.py b/validation/output/commit_status.py
new file mode 100644
index 00000000..916a94ce
--- /dev/null
+++ b/validation/output/commit_status.py
@@ -0,0 +1,86 @@
+"""Commit status payload generation.
+
+Produces a ``CommitStatusPayload`` that the workflow step sends via
+``github.rest.repos.createCommitStatus()``.  The Python code does not
+call the GitHub API — it only prepares the payload.
+
+Design doc references:
+  - Section 9.3: commit status (context, state mapping)
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+
+from validation.context import ValidationContext
+from validation.postfilter.engine import PostFilterResult
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Constants
+# ---------------------------------------------------------------------------
+
+STATUS_CONTEXT = "CAMARA Validation"
+
+_DESCRIPTION_MAX_LEN = 140
+
+_RESULT_TO_STATE = {
+    "pass": "success",
+    "fail": "failure",
+    "error": "error",
+}
+
+# ---------------------------------------------------------------------------
+# Result type
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class CommitStatusPayload:
+    """Payload for ``createCommitStatus`` GitHub API call.
+
+    Attributes:
+        state: One of ``"success"``, ``"failure"``, ``"error"``.
+        description: Short summary (max 140 characters).
+        context: Check context identifier.
+        target_url: Link to the full workflow run.
+    """
+
+    state: str
+    description: str
+    context: str
+    target_url: str
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+
+def generate_commit_status(
+    post_filter_result: PostFilterResult,
+    context: ValidationContext,
+) -> CommitStatusPayload:
+    """Generate the commit status payload.
+
+    Args:
+        post_filter_result: Output of the post-filter engine.
+        context: Unified validation context.
+
+    Returns:
+        :class:`CommitStatusPayload` ready for the workflow step to post.
+    """
+    state = _RESULT_TO_STATE.get(post_filter_result.result, "error")
+
+    description = post_filter_result.summary
+    if len(description) > _DESCRIPTION_MAX_LEN:
+        description = description[: _DESCRIPTION_MAX_LEN - 1] + "\u2026"
+
+    return CommitStatusPayload(
+        state=state,
+        description=description,
+        context=STATUS_CONTEXT,
+        target_url=context.workflow_run_url,
+    )
diff --git a/validation/output/diagnostics.py b/validation/output/diagnostics.py
new file mode 100644
index 00000000..4a6e3897
--- /dev/null
+++ b/validation/output/diagnostics.py
@@ -0,0 +1,102 @@
+"""Diagnostic artifact writing.
+
+Writes the full (untruncated) findings list, validation context, summary
+metadata, and optional engine reports to JSON files in a specified
+directory.  The workflow step uploads this directory via
+``actions/upload-artifact``.
+
+Design doc references:
+  - Section 9.5: diagnostic artifacts (always available regardless of token)
+"""
+
+from __future__ import annotations
+
+import json
+import logging
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+from validation.context import ValidationContext
+from validation.postfilter.engine import PostFilterResult
+
+from .formatting import count_findings
+
+logger = logging.getLogger(__name__)
+
+
+def write_diagnostics(
+    post_filter_result: PostFilterResult,
+    context: ValidationContext,
+    output_dir: Path,
+    engine_reports: Optional[Dict[str, Any]] = None,
+) -> List[Path]:
+    """Write diagnostic artifact files to *output_dir*.
+
+    Creates the directory if it does not exist.
+
+    Files written:
+      - ``findings.json`` — full findings list (no truncation)
+      - ``context.json`` — serialised validation context
+      - ``summary.json`` — result, summary string, and aggregate counts
+      - ``engine-reports.json`` — raw engine reports (only when provided)
+
+    Args:
+        post_filter_result: Output of the post-filter engine.
+        context: Unified validation context.
+        output_dir: Target directory for artifact files.
+        engine_reports: Optional raw engine output to include.
+
+    Returns:
+        List of paths to the files that were written.
+    """
+    output_dir.mkdir(parents=True, exist_ok=True)
+
+    written: List[Path] = []
+
+    # findings.json
+    findings_path = output_dir / "findings.json"
+    findings_path.write_text(
+        json.dumps(post_filter_result.findings, indent=2, ensure_ascii=False),
+        encoding="utf-8",
+    )
+    written.append(findings_path)
+
+    # context.json
+    context_path = output_dir / "context.json"
+    context_path.write_text(
+        json.dumps(context.to_dict(), indent=2, ensure_ascii=False),
+        encoding="utf-8",
+    )
+    written.append(context_path)
+
+    # summary.json
+    counts = count_findings(post_filter_result.findings)
+    summary_data = {
+        "result": post_filter_result.result,
+        "summary": post_filter_result.summary,
+        "counts": {
+            "errors": counts.errors,
+            "warnings": counts.warnings,
+            "hints": counts.hints,
+            "total": counts.total,
+            "blocking": counts.blocking,
+        },
+    }
+    summary_path = output_dir / "summary.json"
+    summary_path.write_text(
+        json.dumps(summary_data, indent=2, ensure_ascii=False),
+        encoding="utf-8",
+    )
+    written.append(summary_path)
+
+    # engine-reports.json (optional)
+    if engine_reports is not None:
+        reports_path = output_dir / "engine-reports.json"
+        reports_path.write_text(
+            json.dumps(engine_reports, indent=2, ensure_ascii=False),
+            encoding="utf-8",
+        )
+        written.append(reports_path)
+
+    logger.info("Wrote %d diagnostic files to %s", len(written), output_dir)
+    return written
diff --git a/validation/output/formatting.py b/validation/output/formatting.py
new file mode 100644
index 00000000..30b7a6a7
--- /dev/null
+++ b/validation/output/formatting.py
@@ -0,0 +1,133 @@
+"""Shared formatting utilities for the output pipeline.
+
+Pure functions for counting, sorting, and labelling findings.  Used by
+all output surface modules (workflow summary, annotations, PR comment,
+commit status, diagnostics).
+
+Design doc references:
+  - Section 9.2: finding grouping and priority ordering
+  - Section 9.3: per-API summary table
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from typing import Dict, List
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Priority ordering for levels
+# ---------------------------------------------------------------------------
+
+_LEVEL_PRIORITY = {"error": 0, "warn": 1, "hint": 2}
+
+# Sentinel label for repo-level findings (api_name is None)
+REPO_LEVEL_LABEL = "(repository)"
+
+# ---------------------------------------------------------------------------
+# Result types
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class FindingCounts:
+    """Aggregate counts for a set of findings."""
+
+    errors: int
+    warnings: int
+    hints: int
+    total: int
+    blocking: int
+
+
+# ---------------------------------------------------------------------------
+# Counting
+# ---------------------------------------------------------------------------
+
+
+def count_findings(findings: List[dict]) -> FindingCounts:
+    """Count findings by level and blocking status."""
+    errors = 0
+    warnings = 0
+    hints = 0
+    blocking = 0
+    for f in findings:
+        level = f.get("level", "")
+        if level == "error":
+            errors += 1
+        elif level == "warn":
+            warnings += 1
+        elif level == "hint":
+            hints += 1
+        if f.get("blocks"):
+            blocking += 1
+    return FindingCounts(
+        errors=errors,
+        warnings=warnings,
+        hints=hints,
+        total=len(findings),
+        blocking=blocking,
+    )
+
+
+def count_findings_by_api(
+    findings: List[dict],
+) -> Dict[str, FindingCounts]:
+    """Group findings by ``api_name`` and count each group.
+
+    Findings with ``api_name`` of ``None`` are grouped under
+    :data:`REPO_LEVEL_LABEL`.  Keys are returned in insertion order
+    (first-seen API name).
+    """
+    groups: Dict[str, List[dict]] = {}
+    for f in findings:
+        key = f.get("api_name") or REPO_LEVEL_LABEL
+        groups.setdefault(key, []).append(f)
+    return {api: count_findings(fs) for api, fs in groups.items()}
+
+
+# ---------------------------------------------------------------------------
+# Sorting
+# ---------------------------------------------------------------------------
+
+
+def sort_findings_by_priority(findings: List[dict]) -> List[dict]:
+    """Sort findings: errors first, then warnings, then hints.
+
+    Within the same level, sort by file path then line number.
+    The sort is stable — equal items preserve their original order.
+    """
+    return sorted(
+        findings,
+        key=lambda f: (
+            _LEVEL_PRIORITY.get(f.get("level", ""), 99),
+            f.get("path", ""),
+            f.get("line", 0),
+        ),
+    )
+
+
+# ---------------------------------------------------------------------------
+# Label formatting
+# ---------------------------------------------------------------------------
+
+
+def format_rule_label(finding: dict) -> str:
+    """Return the best short label for a finding's rule.
+
+    Uses ``rule_id`` when present (e.g. ``"S-042"``), otherwise falls
+    back to ``engine_rule``.
+    """
+    return finding.get("rule_id") or finding.get("engine_rule", "unknown")
+
+
+def format_finding_location(finding: dict) -> str:
+    """Format a finding's location as ``path:line`` or ``path:line:column``."""
+    path = finding.get("path", "")
+    line = finding.get("line", 0)
+    column = finding.get("column")
+    if column is not None:
+        return f"{path}:{line}:{column}"
+    return f"{path}:{line}"
diff --git a/validation/output/pr_comment.py b/validation/output/pr_comment.py
new file mode 100644
index 00000000..c7497855
--- /dev/null
+++ b/validation/output/pr_comment.py
@@ -0,0 +1,73 @@
+"""PR comment markdown generation.
+
+Produces a concise summary comment for the pull request with a
+create-or-update marker.  The actual posting is handled by the
+workflow step (``actions/github-script``).
+
+Design doc references:
+  - Section 9.3: PR comment (concise, marker-based create-or-update)
+"""
+
+from __future__ import annotations
+
+import logging
+
+from validation.context import ValidationContext
+from validation.postfilter.engine import PostFilterResult
+
+from .formatting import count_findings
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Constants
+# ---------------------------------------------------------------------------
+
+MARKER = ""
+
+_RESULT_LABEL = {"pass": "PASS", "fail": "FAIL", "error": "ERROR"}
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+
+def generate_pr_comment(
+    post_filter_result: PostFilterResult,
+    context: ValidationContext,
+) -> str:
+    """Generate the PR comment markdown string.
+
+    The returned string includes the :data:`MARKER` for idempotent
+    create-or-update by the workflow step.
+
+    Args:
+        post_filter_result: Output of the post-filter engine.
+        context: Unified validation context.
+
+    Returns:
+        Complete Markdown string ready to post as a PR comment.
+    """
+    result_label = _RESULT_LABEL.get(
+        post_filter_result.result,
+        post_filter_result.result.upper(),
+    )
+    counts = count_findings(post_filter_result.findings)
+
+    lines = [
+        MARKER,
+        f"### CAMARA Validation — {result_label}",
+        "",
+        (
+            f"{counts.errors} errors, {counts.warnings} warnings, "
+            f"{counts.hints} hints | Profile: {context.profile}"
+        ),
+        "",
+    ]
+    if context.workflow_run_url:
+        lines.append(f"[View full results]({context.workflow_run_url})")
+    else:
+        lines.append("See workflow summary for full results.")
+
+    return "\n".join(lines)
diff --git a/validation/output/workflow_summary.py b/validation/output/workflow_summary.py
new file mode 100644
index 00000000..964fa5fe
--- /dev/null
+++ b/validation/output/workflow_summary.py
@@ -0,0 +1,308 @@
+"""Workflow summary generation for ``$GITHUB_STEP_SUMMARY``.
+
+Produces a Markdown string with header, per-API summary table, findings
+tables grouped by severity level, engine status table, and footer.
+Implements 900 KB truncation with priority ordering (errors are never
+truncated).
+
+Design doc references:
+  - Section 9.3: workflow summary structure and truncation strategy
+  - Section 9.4: 1 MB GitHub limit (900 KB safety margin)
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import dataclass
+from typing import Dict, List, Optional
+
+from validation.context import ValidationContext
+from validation.postfilter.engine import PostFilterResult
+
+from .formatting import (
+    REPO_LEVEL_LABEL,
+    count_findings,
+    count_findings_by_api,
+    format_rule_label,
+    sort_findings_by_priority,
+)
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Constants
+# ---------------------------------------------------------------------------
+
+SUMMARY_SIZE_LIMIT = 900 * 1024  # 900 KB (GitHub limit is 1 MB)
+
+_RESULT_LABEL = {"pass": "PASS", "fail": "FAIL", "error": "ERROR"}
+
+# ---------------------------------------------------------------------------
+# Result type
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class SummaryResult:
+    """Result of workflow summary generation.
+
+    Attributes:
+        markdown: Complete Markdown string for ``$GITHUB_STEP_SUMMARY``.
+        truncated: Whether any findings sections were truncated.
+        truncation_note: Human-readable note about what was truncated,
+            or empty string if nothing was truncated.
+    """
+
+    markdown: str
+    truncated: bool
+    truncation_note: str
+
+
+# ---------------------------------------------------------------------------
+# Internal section renderers
+# ---------------------------------------------------------------------------
+
+
+def _render_header(
+    result: str,
+    context: ValidationContext,
+) -> str:
+    """Render the summary header with result and metadata."""
+    label = _RESULT_LABEL.get(result, result.upper())
+    return (
+        f"## CAMARA Validation — {label}\n\n"
+        f"**Profile**: {context.profile} | "
+        f"**Branch**: {context.branch_type} | "
+        f"**Trigger**: {context.trigger_type}\n"
+    )
+
+
+def _render_api_table(findings: List[dict]) -> str:
+    """Render the per-API summary table."""
+    by_api = count_findings_by_api(findings)
+    if not by_api:
+        return ""
+
+    lines = [
+        "\n### Summary\n",
+        "| API | Errors | Warnings | Hints |",
+        "|-----|--------|----------|-------|",
+    ]
+    for api_name, counts in by_api.items():
+        lines.append(
+            f"| {api_name} | {counts.errors} | {counts.warnings} | {counts.hints} |"
+        )
+    lines.append("")
+    return "\n".join(lines)
+
+
+def _render_findings_table(
+    findings: List[dict],
+    level_label: str,
+) -> str:
+    """Render a findings table for a single severity level.
+
+    Returns an empty string if there are no findings at this level.
+    """
+    if not findings:
+        return ""
+
+    lines = [
+        f"\n### {level_label}\n",
+        "| Rule | File | Line | Message | Hint |",
+        "|------|------|------|---------|------|",
+    ]
+    for f in findings:
+        rule = format_rule_label(f)
+        path = f.get("path", "")
+        line = f.get("line", 0)
+        message = f.get("message", "").replace("|", "\\|")
+        hint = (f.get("hint") or "").replace("|", "\\|")
+        lines.append(f"| {rule} | {path} | {line} | {message} | {hint} |")
+    lines.append("")
+    return "\n".join(lines)
+
+
+def _render_engine_table(
+    engine_statuses: Optional[Dict[str, str]],
+) -> str:
+    """Render the engine status table."""
+    if not engine_statuses:
+        return ""
+
+    lines = [
+        "\n### Engine Status\n",
+        "| Engine | Status |",
+        "|--------|--------|",
+    ]
+    for engine, status in engine_statuses.items():
+        lines.append(f"| {engine} | {status} |")
+    lines.append("")
+    return "\n".join(lines)
+
+
+def _render_footer(
+    context: ValidationContext,
+    commit_sha: str,
+) -> str:
+    """Render the footer with commit info and workflow link."""
+    parts = []
+    if commit_sha:
+        parts.append(f"Commit: {commit_sha[:7]}")
+    if context.tooling_ref:
+        parts.append(f"Tooling: {context.tooling_ref[:7]}")
+    if context.workflow_run_url:
+        parts.append(f"[Full workflow run]({context.workflow_run_url})")
+    if not parts:
+        return ""
+    return "\n---\n" + " | ".join(parts) + "\n"
+
+
+# ---------------------------------------------------------------------------
+# Truncation
+# ---------------------------------------------------------------------------
+
+
+def _byte_size(text: str) -> int:
+    """Return the UTF-8 byte size of *text*."""
+    return len(text.encode("utf-8"))
+
+
+def _truncation_notice(shown: int, total: int, level_label: str) -> str:
+    """Build a truncation notice for a findings section."""
+    return (
+        f"> Showing {shown} of {total} {level_label.lower()} findings. "
+        f"Full results available in workflow artifacts.\n"
+    )
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+
+def generate_workflow_summary(
+    post_filter_result: PostFilterResult,
+    context: ValidationContext,
+    engine_statuses: Optional[Dict[str, str]] = None,
+    commit_sha: str = "",
+) -> SummaryResult:
+    """Generate the full workflow summary Markdown.
+
+    Implements truncation: errors are never truncated; warnings and then
+    hints are truncated if the cumulative size exceeds
+    :data:`SUMMARY_SIZE_LIMIT`.
+
+    Args:
+        post_filter_result: Output of the post-filter engine.
+        context: Unified validation context.
+        engine_statuses: Optional mapping of engine name to status string.
+        commit_sha: Full commit SHA (first 7 chars shown in footer).
+
+    Returns:
+        :class:`SummaryResult` with the complete Markdown and truncation info.
+    """
+    findings = post_filter_result.findings
+    sorted_all = sort_findings_by_priority(findings)
+
+    # Partition by level
+    errors = [f for f in sorted_all if f.get("level") == "error"]
+    warnings = [f for f in sorted_all if f.get("level") == "warn"]
+    hints = [f for f in sorted_all if f.get("level") == "hint"]
+
+    # Fixed sections (always rendered)
+    header = _render_header(post_filter_result.result, context)
+    api_table = _render_api_table(findings)
+    engine_table = _render_engine_table(engine_statuses)
+    footer = _render_footer(context, commit_sha)
+
+    fixed_size = sum(
+        _byte_size(s) for s in (header, api_table, engine_table, footer)
+    )
+
+    # Budget for findings sections
+    budget = SUMMARY_SIZE_LIMIT - fixed_size
+    truncated = False
+    truncation_note = ""
+
+    # Errors section — never truncated
+    errors_section = _render_findings_table(errors, "Errors")
+    budget -= _byte_size(errors_section)
+
+    # Warnings section — truncated if over budget
+    warnings_section = _render_findings_table(warnings, "Warnings")
+    if budget - _byte_size(warnings_section) < 0 and warnings:
+        # Find how many warnings fit
+        shown = _fit_count(warnings, "Warnings", budget)
+        if shown > 0:
+            warnings_section = _render_findings_table(warnings[:shown], "Warnings")
+            warnings_section += _truncation_notice(
+                shown, len(warnings), "Warnings"
+            )
+        else:
+            warnings_section = _truncation_notice(0, len(warnings), "Warnings")
+        truncated = True
+        truncation_note = f"{len(warnings) - shown} warning(s) truncated"
+    budget -= _byte_size(warnings_section)
+
+    # Hints section — truncated if over budget
+    hints_section = _render_findings_table(hints, "Hints")
+    if budget - _byte_size(hints_section) < 0 and hints:
+        shown = _fit_count(hints, "Hints", budget)
+        if shown > 0:
+            hints_section = _render_findings_table(hints[:shown], "Hints")
+            hints_section += _truncation_notice(shown, len(hints), "Hints")
+        else:
+            hints_section = _truncation_notice(0, len(hints), "Hints")
+        truncated = True
+        note = f"{len(hints) - shown} hint(s) truncated"
+        truncation_note = (
+            f"{truncation_note}; {note}" if truncation_note else note
+        )
+
+    # Assemble
+    markdown = (
+        header
+        + api_table
+        + errors_section
+        + warnings_section
+        + hints_section
+        + engine_table
+        + footer
+    )
+
+    return SummaryResult(
+        markdown=markdown,
+        truncated=truncated,
+        truncation_note=truncation_note,
+    )
+
+
+def _fit_count(
+    findings: List[dict],
+    level_label: str,
+    budget: int,
+) -> int:
+    """Binary-search for how many findings fit within *budget* bytes.
+
+    Accounts for the truncation notice size that will be appended.
+    """
+    if budget <= 0:
+        return 0
+
+    # Quick check: does the full section fit?
+    full = _render_findings_table(findings, level_label)
+    if _byte_size(full) <= budget:
+        return len(findings)
+
+    # Binary search
+    lo, hi = 0, len(findings)
+    while lo < hi:
+        mid = (lo + hi + 1) // 2
+        section = _render_findings_table(findings[:mid], level_label)
+        notice = _truncation_notice(mid, len(findings), level_label)
+        if _byte_size(section) + _byte_size(notice) <= budget:
+            lo = mid
+        else:
+            hi = mid - 1
+    return lo
diff --git a/validation/tests/test_output_annotations.py b/validation/tests/test_output_annotations.py
new file mode 100644
index 00000000..651df71c
--- /dev/null
+++ b/validation/tests/test_output_annotations.py
@@ -0,0 +1,194 @@
+"""Unit tests for validation.output.annotations."""
+
+from __future__ import annotations
+
+from validation.output.annotations import (
+    ANNOTATION_LIMIT,
+    AnnotationResult,
+    _build_command,
+    _sanitize_message,
+    generate_annotations,
+)
+from validation.postfilter.engine import PostFilterResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_finding(
+    level: str = "warn",
+    path: str = "code/API_definitions/quality-on-demand.yaml",
+    line: int = 10,
+    column: int | None = None,
+    message: str = "Something is wrong",
+    hint: str | None = None,
+    rule_id: str | None = None,
+    engine_rule: str = "some-rule",
+    api_name: str | None = "quality-on-demand",
+    blocks: bool = False,
+) -> dict:
+    f: dict = {
+        "engine": "spectral",
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": message,
+        "path": path,
+        "line": line,
+        "api_name": api_name,
+        "blocks": blocks,
+    }
+    if column is not None:
+        f["column"] = column
+    if rule_id is not None:
+        f["rule_id"] = rule_id
+    if hint is not None:
+        f["hint"] = hint
+    return f
+
+
+def _make_result(findings: list[dict]) -> PostFilterResult:
+    return PostFilterResult(findings=findings, result="fail", summary="test")
+
+
+# ---------------------------------------------------------------------------
+# _sanitize_message
+# ---------------------------------------------------------------------------
+
+
+class TestSanitizeMessage:
+    def test_newlines_replaced(self):
+        assert " " in _sanitize_message("line1\nline2")
+        assert "\n" not in _sanitize_message("line1\nline2")
+
+    def test_carriage_return_replaced(self):
+        assert "\r" not in _sanitize_message("a\rb")
+
+    def test_crlf_replaced(self):
+        assert "\r\n" not in _sanitize_message("a\r\nb")
+
+    def test_colons_encoded(self):
+        result = _sanitize_message("key::value")
+        assert "::" not in result
+        assert "%3A" in result
+
+    def test_plain_text_unchanged(self):
+        assert _sanitize_message("hello world") == "hello world"
+
+
+# ---------------------------------------------------------------------------
+# _build_command
+# ---------------------------------------------------------------------------
+
+
+class TestBuildCommand:
+    def test_error_level(self):
+        f = _make_finding(level="error", path="a.yaml", line=5)
+        cmd = _build_command(f)
+        assert cmd.startswith("::error ")
+
+    def test_warn_level(self):
+        f = _make_finding(level="warn")
+        cmd = _build_command(f)
+        assert cmd.startswith("::warning ")
+
+    def test_hint_level(self):
+        f = _make_finding(level="hint")
+        cmd = _build_command(f)
+        assert cmd.startswith("::notice ")
+
+    def test_file_and_line(self):
+        f = _make_finding(path="spec.yaml", line=42)
+        cmd = _build_command(f)
+        assert "file=spec.yaml" in cmd
+        assert "line=42" in cmd
+
+    def test_column_included(self):
+        f = _make_finding(column=8)
+        cmd = _build_command(f)
+        assert "col=8" in cmd
+
+    def test_column_omitted_when_none(self):
+        f = _make_finding(column=None)
+        cmd = _build_command(f)
+        assert "col=" not in cmd
+
+    def test_title_uses_rule_id(self):
+        f = _make_finding(rule_id="S-042", engine_rule="some-spectral-rule")
+        cmd = _build_command(f)
+        assert "title=S-042" in cmd
+
+    def test_title_falls_back_to_engine_rule(self):
+        f = _make_finding(engine_rule="camara-path-casing")
+        cmd = _build_command(f)
+        assert "title=camara-path-casing" in cmd
+
+    def test_hint_appended(self):
+        f = _make_finding(message="Bad path", hint="Use kebab-case")
+        cmd = _build_command(f)
+        assert "Bad path | Hint%3A Use kebab-case" in cmd
+
+    def test_no_hint(self):
+        f = _make_finding(message="Bad path")
+        cmd = _build_command(f)
+        assert "Hint" not in cmd
+
+
+# ---------------------------------------------------------------------------
+# generate_annotations
+# ---------------------------------------------------------------------------
+
+
+class TestGenerateAnnotations:
+    def test_empty_findings(self):
+        result = generate_annotations(_make_result([]))
+        assert result == AnnotationResult(
+            commands=[], total_findings=0, annotations_emitted=0, truncated=False
+        )
+
+    def test_single_finding(self):
+        findings = [_make_finding(level="error")]
+        result = generate_annotations(_make_result(findings))
+        assert result.total_findings == 1
+        assert result.annotations_emitted == 1
+        assert not result.truncated
+        assert result.commands[0].startswith("::error ")
+
+    def test_priority_ordering(self):
+        findings = [
+            _make_finding(level="hint", path="a.yaml", line=1),
+            _make_finding(level="error", path="a.yaml", line=1),
+            _make_finding(level="warn", path="a.yaml", line=1),
+        ]
+        result = generate_annotations(_make_result(findings))
+        assert result.commands[0].startswith("::error ")
+        assert result.commands[1].startswith("::warning ")
+        assert result.commands[2].startswith("::notice ")
+
+    def test_limit_enforced(self):
+        findings = [
+            _make_finding(level="error", line=i) for i in range(60)
+        ]
+        result = generate_annotations(_make_result(findings))
+        assert result.total_findings == 60
+        assert result.annotations_emitted == ANNOTATION_LIMIT
+        assert result.truncated
+
+    def test_limit_prioritises_errors(self):
+        errors = [_make_finding(level="error", line=i) for i in range(30)]
+        warnings = [_make_finding(level="warn", line=i) for i in range(30)]
+        findings = warnings + errors  # Interleave — warnings first in input
+        result = generate_annotations(_make_result(findings))
+        # All 30 errors should be in the first 30 commands
+        error_cmds = [c for c in result.commands if c.startswith("::error ")]
+        assert len(error_cmds) == 30
+        # Remaining 20 are warnings
+        warn_cmds = [c for c in result.commands if c.startswith("::warning ")]
+        assert len(warn_cmds) == 20
+
+    def test_exactly_at_limit_not_truncated(self):
+        findings = [_make_finding(line=i) for i in range(ANNOTATION_LIMIT)]
+        result = generate_annotations(_make_result(findings))
+        assert result.annotations_emitted == ANNOTATION_LIMIT
+        assert not result.truncated
diff --git a/validation/tests/test_output_commit_status.py b/validation/tests/test_output_commit_status.py
new file mode 100644
index 00000000..482a5def
--- /dev/null
+++ b/validation/tests/test_output_commit_status.py
@@ -0,0 +1,112 @@
+"""Unit tests for validation.output.commit_status."""
+
+from __future__ import annotations
+
+from validation.context import ValidationContext
+from validation.output.commit_status import (
+    STATUS_CONTEXT,
+    CommitStatusPayload,
+    generate_commit_status,
+)
+from validation.postfilter.engine import PostFilterResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    workflow_run_url: str = "https://github.com/test/run/1",
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="pr",
+        profile="standard",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url=workflow_run_url,
+        tooling_ref="abc1234",
+    )
+
+
+def _make_result(
+    result: str = "pass",
+    summary: str = "Passed: no findings",
+) -> PostFilterResult:
+    return PostFilterResult(findings=[], result=result, summary=summary)
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestGenerateCommitStatus:
+    def test_pass_maps_to_success(self):
+        payload = generate_commit_status(_make_result("pass"), _make_context())
+        assert payload.state == "success"
+
+    def test_fail_maps_to_failure(self):
+        payload = generate_commit_status(_make_result("fail"), _make_context())
+        assert payload.state == "failure"
+
+    def test_error_maps_to_error(self):
+        payload = generate_commit_status(_make_result("error"), _make_context())
+        assert payload.state == "error"
+
+    def test_unknown_result_maps_to_error(self):
+        payload = generate_commit_status(_make_result("unknown"), _make_context())
+        assert payload.state == "error"
+
+    def test_context_is_camara_validation(self):
+        payload = generate_commit_status(_make_result(), _make_context())
+        assert payload.context == STATUS_CONTEXT
+        assert payload.context == "CAMARA Validation"
+
+    def test_target_url(self):
+        url = "https://github.com/test/run/42"
+        payload = generate_commit_status(
+            _make_result(), _make_context(workflow_run_url=url)
+        )
+        assert payload.target_url == url
+
+    def test_description_from_summary(self):
+        payload = generate_commit_status(
+            _make_result(summary="Passed: no findings"),
+            _make_context(),
+        )
+        assert payload.description == "Passed: no findings"
+
+    def test_description_truncated_at_140(self):
+        long_summary = "x" * 200
+        payload = generate_commit_status(
+            _make_result(summary=long_summary),
+            _make_context(),
+        )
+        assert len(payload.description) <= 140
+        assert payload.description.endswith("\u2026")
+
+    def test_description_exactly_140_not_truncated(self):
+        summary = "x" * 140
+        payload = generate_commit_status(
+            _make_result(summary=summary),
+            _make_context(),
+        )
+        assert payload.description == summary
+        assert "\u2026" not in payload.description
+
+    def test_frozen(self):
+        payload = generate_commit_status(_make_result(), _make_context())
+        try:
+            payload.state = "failure"  # type: ignore[misc]
+            assert False, "Should not be able to mutate frozen dataclass"
+        except AttributeError:
+            pass
diff --git a/validation/tests/test_output_diagnostics.py b/validation/tests/test_output_diagnostics.py
new file mode 100644
index 00000000..77d6f7d8
--- /dev/null
+++ b/validation/tests/test_output_diagnostics.py
@@ -0,0 +1,143 @@
+"""Unit tests for validation.output.diagnostics."""
+
+from __future__ import annotations
+
+import json
+from pathlib import Path
+
+from validation.context import ValidationContext
+from validation.output.diagnostics import write_diagnostics
+from validation.postfilter.engine import PostFilterResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context() -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="pr",
+        profile="standard",
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url="https://example.com/run/1",
+        tooling_ref="abc1234",
+    )
+
+
+def _make_finding(
+    level: str = "warn",
+    message: str = "Something is wrong",
+) -> dict:
+    return {
+        "engine": "spectral",
+        "engine_rule": "some-rule",
+        "level": level,
+        "message": message,
+        "path": "spec.yaml",
+        "line": 10,
+        "api_name": "quality-on-demand",
+        "blocks": False,
+    }
+
+
+def _make_result(
+    findings: list[dict] | None = None,
+    result: str = "pass",
+) -> PostFilterResult:
+    return PostFilterResult(
+        findings=findings or [],
+        result=result,
+        summary="Passed: no findings",
+    )
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestWriteDiagnostics:
+    def test_creates_expected_files(self, tmp_path: Path):
+        out = tmp_path / "output"
+        paths = write_diagnostics(_make_result(), _make_context(), out)
+        names = {p.name for p in paths}
+        assert names == {"findings.json", "context.json", "summary.json"}
+
+    def test_findings_json_content(self, tmp_path: Path):
+        findings = [_make_finding(level="error"), _make_finding(level="warn")]
+        out = tmp_path / "output"
+        write_diagnostics(_make_result(findings), _make_context(), out)
+        data = json.loads((out / "findings.json").read_text())
+        assert len(data) == 2
+        assert data[0]["level"] == "error"
+        assert data[1]["level"] == "warn"
+
+    def test_context_json_parseable(self, tmp_path: Path):
+        out = tmp_path / "output"
+        write_diagnostics(_make_result(), _make_context(), out)
+        data = json.loads((out / "context.json").read_text())
+        assert data["repository"] == "TestRepo"
+        assert data["profile"] == "standard"
+
+    def test_summary_json_content(self, tmp_path: Path):
+        findings = [
+            _make_finding(level="error"),
+            _make_finding(level="warn"),
+        ]
+        out = tmp_path / "output"
+        write_diagnostics(
+            _make_result(findings, result="fail"),
+            _make_context(),
+            out,
+        )
+        data = json.loads((out / "summary.json").read_text())
+        assert data["result"] == "fail"
+        assert data["counts"]["errors"] == 1
+        assert data["counts"]["warnings"] == 1
+        assert data["counts"]["total"] == 2
+
+    def test_engine_reports_written_when_provided(self, tmp_path: Path):
+        out = tmp_path / "output"
+        reports = {"spectral": {"raw": "data"}}
+        paths = write_diagnostics(
+            _make_result(), _make_context(), out, engine_reports=reports
+        )
+        names = {p.name for p in paths}
+        assert "engine-reports.json" in names
+        data = json.loads((out / "engine-reports.json").read_text())
+        assert data["spectral"]["raw"] == "data"
+
+    def test_engine_reports_omitted_when_none(self, tmp_path: Path):
+        out = tmp_path / "output"
+        paths = write_diagnostics(_make_result(), _make_context(), out)
+        assert not (out / "engine-reports.json").exists()
+        assert len(paths) == 3
+
+    def test_empty_findings(self, tmp_path: Path):
+        out = tmp_path / "output"
+        write_diagnostics(_make_result([]), _make_context(), out)
+        data = json.loads((out / "findings.json").read_text())
+        assert data == []
+
+    def test_creates_output_dir(self, tmp_path: Path):
+        out = tmp_path / "nested" / "deep" / "output"
+        assert not out.exists()
+        write_diagnostics(_make_result(), _make_context(), out)
+        assert out.exists()
+        assert (out / "findings.json").exists()
+
+    def test_returns_written_paths(self, tmp_path: Path):
+        out = tmp_path / "output"
+        paths = write_diagnostics(_make_result(), _make_context(), out)
+        assert all(isinstance(p, Path) for p in paths)
+        assert all(p.exists() for p in paths)
diff --git a/validation/tests/test_output_formatting.py b/validation/tests/test_output_formatting.py
new file mode 100644
index 00000000..4da4e2ad
--- /dev/null
+++ b/validation/tests/test_output_formatting.py
@@ -0,0 +1,200 @@
+"""Unit tests for validation.output.formatting."""
+
+from __future__ import annotations
+
+from validation.output.formatting import (
+    REPO_LEVEL_LABEL,
+    FindingCounts,
+    count_findings,
+    count_findings_by_api,
+    format_finding_location,
+    format_rule_label,
+    sort_findings_by_priority,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_finding(
+    level: str = "warn",
+    path: str = "code/API_definitions/quality-on-demand.yaml",
+    line: int = 10,
+    api_name: str | None = "quality-on-demand",
+    blocks: bool = False,
+    rule_id: str | None = None,
+    engine_rule: str = "some-rule",
+    column: int | None = None,
+) -> dict:
+    f: dict = {
+        "engine": "spectral",
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": "Something is wrong",
+        "path": path,
+        "line": line,
+        "api_name": api_name,
+        "blocks": blocks,
+    }
+    if rule_id is not None:
+        f["rule_id"] = rule_id
+    if column is not None:
+        f["column"] = column
+    return f
+
+
+# ---------------------------------------------------------------------------
+# count_findings
+# ---------------------------------------------------------------------------
+
+
+class TestCountFindings:
+    def test_empty(self):
+        result = count_findings([])
+        assert result == FindingCounts(
+            errors=0, warnings=0, hints=0, total=0, blocking=0
+        )
+
+    def test_mixed_levels(self):
+        findings = [
+            _make_finding(level="error", blocks=True),
+            _make_finding(level="warn"),
+            _make_finding(level="warn", blocks=True),
+            _make_finding(level="hint"),
+        ]
+        result = count_findings(findings)
+        assert result.errors == 1
+        assert result.warnings == 2
+        assert result.hints == 1
+        assert result.total == 4
+        assert result.blocking == 2
+
+    def test_all_same_level(self):
+        findings = [_make_finding(level="error") for _ in range(3)]
+        result = count_findings(findings)
+        assert result.errors == 3
+        assert result.warnings == 0
+        assert result.hints == 0
+        assert result.total == 3
+
+
+# ---------------------------------------------------------------------------
+# count_findings_by_api
+# ---------------------------------------------------------------------------
+
+
+class TestCountFindingsByApi:
+    def test_multi_api(self):
+        findings = [
+            _make_finding(api_name="api-a", level="error"),
+            _make_finding(api_name="api-a", level="warn"),
+            _make_finding(api_name="api-b", level="hint"),
+        ]
+        result = count_findings_by_api(findings)
+        assert set(result.keys()) == {"api-a", "api-b"}
+        assert result["api-a"].errors == 1
+        assert result["api-a"].warnings == 1
+        assert result["api-b"].hints == 1
+
+    def test_repo_level_findings(self):
+        findings = [
+            _make_finding(api_name=None, level="error"),
+            _make_finding(api_name=None, level="warn"),
+        ]
+        result = count_findings_by_api(findings)
+        assert REPO_LEVEL_LABEL in result
+        assert result[REPO_LEVEL_LABEL].total == 2
+
+    def test_empty(self):
+        result = count_findings_by_api([])
+        assert result == {}
+
+    def test_mixed_api_and_repo(self):
+        findings = [
+            _make_finding(api_name="api-a", level="error"),
+            _make_finding(api_name=None, level="warn"),
+        ]
+        result = count_findings_by_api(findings)
+        assert set(result.keys()) == {"api-a", REPO_LEVEL_LABEL}
+
+
+# ---------------------------------------------------------------------------
+# sort_findings_by_priority
+# ---------------------------------------------------------------------------
+
+
+class TestSortFindingsByPriority:
+    def test_level_ordering(self):
+        findings = [
+            _make_finding(level="hint", path="a.yaml", line=1),
+            _make_finding(level="error", path="a.yaml", line=1),
+            _make_finding(level="warn", path="a.yaml", line=1),
+        ]
+        sorted_f = sort_findings_by_priority(findings)
+        levels = [f["level"] for f in sorted_f]
+        assert levels == ["error", "warn", "hint"]
+
+    def test_secondary_sort_by_path_then_line(self):
+        findings = [
+            _make_finding(level="error", path="z.yaml", line=5),
+            _make_finding(level="error", path="a.yaml", line=20),
+            _make_finding(level="error", path="a.yaml", line=3),
+        ]
+        sorted_f = sort_findings_by_priority(findings)
+        locs = [(f["path"], f["line"]) for f in sorted_f]
+        assert locs == [("a.yaml", 3), ("a.yaml", 20), ("z.yaml", 5)]
+
+    def test_empty(self):
+        assert sort_findings_by_priority([]) == []
+
+    def test_single_item(self):
+        f = _make_finding()
+        assert sort_findings_by_priority([f]) == [f]
+
+
+# ---------------------------------------------------------------------------
+# format_rule_label
+# ---------------------------------------------------------------------------
+
+
+class TestFormatRuleLabel:
+    def test_with_rule_id(self):
+        f = _make_finding(rule_id="S-042", engine_rule="some-spectral-rule")
+        assert format_rule_label(f) == "S-042"
+
+    def test_without_rule_id(self):
+        f = _make_finding(engine_rule="camara-path-casing")
+        assert format_rule_label(f) == "camara-path-casing"
+
+    def test_empty_rule_id_falls_back(self):
+        f = _make_finding(engine_rule="my-rule")
+        f["rule_id"] = ""
+        assert format_rule_label(f) == "my-rule"
+
+    def test_missing_both(self):
+        assert format_rule_label({}) == "unknown"
+
+
+# ---------------------------------------------------------------------------
+# format_finding_location
+# ---------------------------------------------------------------------------
+
+
+class TestFormatFindingLocation:
+    def test_with_column(self):
+        f = _make_finding(path="spec.yaml", line=42, column=8)
+        assert format_finding_location(f) == "spec.yaml:42:8"
+
+    def test_without_column(self):
+        f = _make_finding(path="spec.yaml", line=42)
+        assert format_finding_location(f) == "spec.yaml:42"
+
+    def test_column_none_explicit(self):
+        f = _make_finding(path="spec.yaml", line=42)
+        f["column"] = None
+        assert format_finding_location(f) == "spec.yaml:42"
+
+    def test_empty_finding(self):
+        assert format_finding_location({}) == ":0"
diff --git a/validation/tests/test_output_pr_comment.py b/validation/tests/test_output_pr_comment.py
new file mode 100644
index 00000000..fa1b0ddb
--- /dev/null
+++ b/validation/tests/test_output_pr_comment.py
@@ -0,0 +1,121 @@
+"""Unit tests for validation.output.pr_comment."""
+
+from __future__ import annotations
+
+from validation.context import ValidationContext
+from validation.output.pr_comment import MARKER, generate_pr_comment
+from validation.postfilter.engine import PostFilterResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    profile: str = "standard",
+    workflow_run_url: str = "https://github.com/test/run/1",
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="pr",
+        profile=profile,
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url=workflow_run_url,
+        tooling_ref="abc1234",
+    )
+
+
+def _make_finding(level: str = "warn", blocks: bool = False) -> dict:
+    return {
+        "engine": "spectral",
+        "engine_rule": "some-rule",
+        "level": level,
+        "message": "Something is wrong",
+        "path": "spec.yaml",
+        "line": 10,
+        "api_name": "quality-on-demand",
+        "blocks": blocks,
+    }
+
+
+def _make_result(
+    findings: list[dict] | None = None,
+    result: str = "pass",
+) -> PostFilterResult:
+    return PostFilterResult(
+        findings=findings or [],
+        result=result,
+        summary="test summary",
+    )
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestGeneratePrComment:
+    def test_contains_marker(self):
+        comment = generate_pr_comment(_make_result(), _make_context())
+        assert MARKER in comment
+
+    def test_marker_is_first_line(self):
+        comment = generate_pr_comment(_make_result(), _make_context())
+        assert comment.startswith(MARKER)
+
+    def test_pass_result(self):
+        comment = generate_pr_comment(_make_result(result="pass"), _make_context())
+        assert "PASS" in comment
+
+    def test_fail_result(self):
+        comment = generate_pr_comment(_make_result(result="fail"), _make_context())
+        assert "FAIL" in comment
+
+    def test_error_result(self):
+        comment = generate_pr_comment(_make_result(result="error"), _make_context())
+        assert "ERROR" in comment
+
+    def test_counts_displayed(self):
+        findings = [
+            _make_finding(level="error"),
+            _make_finding(level="warn"),
+            _make_finding(level="warn"),
+            _make_finding(level="hint"),
+        ]
+        comment = generate_pr_comment(_make_result(findings), _make_context())
+        assert "1 errors" in comment
+        assert "2 warnings" in comment
+        assert "1 hints" in comment
+
+    def test_profile_displayed(self):
+        comment = generate_pr_comment(
+            _make_result(), _make_context(profile="strict")
+        )
+        assert "Profile: strict" in comment
+
+    def test_workflow_url_linked(self):
+        url = "https://github.com/test/run/42"
+        comment = generate_pr_comment(
+            _make_result(), _make_context(workflow_run_url=url)
+        )
+        assert f"[View full results]({url})" in comment
+
+    def test_no_url_fallback(self):
+        comment = generate_pr_comment(
+            _make_result(), _make_context(workflow_run_url="")
+        )
+        assert "View full results" not in comment
+        assert "See workflow summary" in comment
+
+    def test_empty_findings(self):
+        comment = generate_pr_comment(_make_result([]), _make_context())
+        assert "0 errors, 0 warnings, 0 hints" in comment
diff --git a/validation/tests/test_output_workflow_summary.py b/validation/tests/test_output_workflow_summary.py
new file mode 100644
index 00000000..05e3d701
--- /dev/null
+++ b/validation/tests/test_output_workflow_summary.py
@@ -0,0 +1,337 @@
+"""Unit tests for validation.output.workflow_summary."""
+
+from __future__ import annotations
+
+from validation.context import ApiContext, ValidationContext
+from validation.output.workflow_summary import (
+    SUMMARY_SIZE_LIMIT,
+    SummaryResult,
+    generate_workflow_summary,
+)
+from validation.postfilter.engine import PostFilterResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    profile: str = "standard",
+    branch_type: str = "main",
+    trigger_type: str = "pr",
+    workflow_run_url: str = "https://github.com/test/run/1",
+    tooling_ref: str = "abc1234def5678",
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type=trigger_type,
+        profile=profile,
+        stage="standard",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url=workflow_run_url,
+        tooling_ref=tooling_ref,
+    )
+
+
+def _make_finding(
+    level: str = "warn",
+    path: str = "code/API_definitions/quality-on-demand.yaml",
+    line: int = 10,
+    message: str = "Something is wrong",
+    api_name: str | None = "quality-on-demand",
+    blocks: bool = False,
+    rule_id: str | None = None,
+    engine_rule: str = "some-rule",
+    hint: str | None = None,
+) -> dict:
+    f: dict = {
+        "engine": "spectral",
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": message,
+        "path": path,
+        "line": line,
+        "api_name": api_name,
+        "blocks": blocks,
+    }
+    if rule_id is not None:
+        f["rule_id"] = rule_id
+    if hint is not None:
+        f["hint"] = hint
+    return f
+
+
+def _make_result(
+    findings: list[dict] | None = None,
+    result: str = "pass",
+    summary: str = "Passed: no findings",
+) -> PostFilterResult:
+    return PostFilterResult(
+        findings=findings or [],
+        result=result,
+        summary=summary,
+    )
+
+
+# ---------------------------------------------------------------------------
+# Header
+# ---------------------------------------------------------------------------
+
+
+class TestHeader:
+    def test_pass_result(self):
+        ctx = _make_context()
+        sr = generate_workflow_summary(_make_result(), ctx)
+        assert "## CAMARA Validation — PASS" in sr.markdown
+
+    def test_fail_result(self):
+        findings = [_make_finding(level="error", blocks=True)]
+        ctx = _make_context()
+        sr = generate_workflow_summary(
+            _make_result(findings, result="fail"), ctx
+        )
+        assert "## CAMARA Validation — FAIL" in sr.markdown
+
+    def test_error_result(self):
+        ctx = _make_context()
+        sr = generate_workflow_summary(
+            _make_result(result="error"), ctx
+        )
+        assert "## CAMARA Validation — ERROR" in sr.markdown
+
+    def test_metadata_in_header(self):
+        ctx = _make_context(profile="strict", branch_type="release", trigger_type="dispatch")
+        sr = generate_workflow_summary(_make_result(), ctx)
+        assert "strict" in sr.markdown
+        assert "release" in sr.markdown
+        assert "dispatch" in sr.markdown
+
+
+# ---------------------------------------------------------------------------
+# API summary table
+# ---------------------------------------------------------------------------
+
+
+class TestApiTable:
+    def test_multi_api(self):
+        findings = [
+            _make_finding(level="error", api_name="api-a"),
+            _make_finding(level="warn", api_name="api-a"),
+            _make_finding(level="hint", api_name="api-b"),
+        ]
+        ctx = _make_context()
+        sr = generate_workflow_summary(_make_result(findings), ctx)
+        assert "| api-a |" in sr.markdown
+        assert "| api-b |" in sr.markdown
+
+    def test_empty_findings_no_table(self):
+        sr = generate_workflow_summary(_make_result(), _make_context())
+        assert "### Summary" not in sr.markdown
+
+    def test_repo_level_findings(self):
+        findings = [_make_finding(api_name=None, level="warn")]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "(repository)" in sr.markdown
+
+
+# ---------------------------------------------------------------------------
+# Findings tables
+# ---------------------------------------------------------------------------
+
+
+class TestFindingsTables:
+    def test_errors_section(self):
+        findings = [_make_finding(level="error", rule_id="S-001")]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "### Errors" in sr.markdown
+        assert "S-001" in sr.markdown
+
+    def test_warnings_section(self):
+        findings = [_make_finding(level="warn")]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "### Warnings" in sr.markdown
+
+    def test_hints_section(self):
+        findings = [_make_finding(level="hint")]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "### Hints" in sr.markdown
+
+    def test_table_columns(self):
+        findings = [
+            _make_finding(
+                level="error",
+                rule_id="S-042",
+                path="spec.yaml",
+                line=47,
+                message="Bad path",
+                hint="Use kebab-case",
+            )
+        ]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "| Rule | File | Line | Message | Hint |" in sr.markdown
+        assert "S-042" in sr.markdown
+        assert "spec.yaml" in sr.markdown
+        assert "47" in sr.markdown
+        assert "Bad path" in sr.markdown
+        assert "Use kebab-case" in sr.markdown
+
+    def test_pipe_in_message_escaped(self):
+        findings = [_make_finding(message="a|b")]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "a\\|b" in sr.markdown
+
+    def test_absent_levels_not_rendered(self):
+        findings = [_make_finding(level="warn")]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert "### Errors" not in sr.markdown
+        assert "### Hints" not in sr.markdown
+
+
+# ---------------------------------------------------------------------------
+# Engine status table
+# ---------------------------------------------------------------------------
+
+
+class TestEngineTable:
+    def test_with_statuses(self):
+        sr = generate_workflow_summary(
+            _make_result(),
+            _make_context(),
+            engine_statuses={"Spectral": "completed", "yamllint": "error"},
+        )
+        assert "### Engine Status" in sr.markdown
+        assert "| Spectral | completed |" in sr.markdown
+        assert "| yamllint | error |" in sr.markdown
+
+    def test_none_statuses(self):
+        sr = generate_workflow_summary(_make_result(), _make_context())
+        assert "### Engine Status" not in sr.markdown
+
+
+# ---------------------------------------------------------------------------
+# Footer
+# ---------------------------------------------------------------------------
+
+
+class TestFooter:
+    def test_commit_sha_truncated(self):
+        sr = generate_workflow_summary(
+            _make_result(),
+            _make_context(),
+            commit_sha="abcdef1234567890",
+        )
+        assert "Commit: abcdef1" in sr.markdown
+
+    def test_tooling_ref(self):
+        ctx = _make_context(tooling_ref="1234567890abcdef")
+        sr = generate_workflow_summary(_make_result(), ctx)
+        assert "Tooling: 1234567" in sr.markdown
+
+    def test_workflow_run_url(self):
+        ctx = _make_context(workflow_run_url="https://github.com/test/run/1")
+        sr = generate_workflow_summary(_make_result(), ctx)
+        assert "[Full workflow run](https://github.com/test/run/1)" in sr.markdown
+
+    def test_empty_footer_fields(self):
+        ctx = _make_context(workflow_run_url="", tooling_ref="")
+        sr = generate_workflow_summary(_make_result(), ctx, commit_sha="")
+        # No footer separator when all fields empty
+        assert "---" not in sr.markdown
+
+
+# ---------------------------------------------------------------------------
+# Truncation
+# ---------------------------------------------------------------------------
+
+
+class TestTruncation:
+    def test_small_summary_not_truncated(self):
+        findings = [_make_finding() for _ in range(5)]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert not sr.truncated
+        assert sr.truncation_note == ""
+
+    def test_errors_never_truncated(self):
+        # Create enough error findings to fill a large portion of budget
+        # but they should still all appear
+        errors = [
+            _make_finding(
+                level="error",
+                message="x" * 200,
+                line=i,
+            )
+            for i in range(50)
+        ]
+        sr = generate_workflow_summary(_make_result(errors), _make_context())
+        # All 50 errors should appear
+        assert sr.markdown.count("| some-rule |") == 50
+
+    def test_hints_truncated_first(self):
+        # Generate content that exceeds the budget.
+        # With a very long message per finding and many findings,
+        # we force truncation.
+        long_msg = "x" * 5000
+        errors = [_make_finding(level="error", message=long_msg, line=i) for i in range(10)]
+        warnings = [_make_finding(level="warn", message=long_msg, line=i) for i in range(10)]
+        hints = [_make_finding(level="hint", message=long_msg, line=i) for i in range(200)]
+        findings = errors + warnings + hints
+
+        sr = generate_workflow_summary(
+            _make_result(findings),
+            _make_context(),
+        )
+        # All errors must be present
+        assert "### Errors" in sr.markdown
+        # If truncation occurred, it should hit hints first
+        if sr.truncated:
+            assert "hint" in sr.truncation_note.lower()
+
+    def test_truncation_note_set(self):
+        # Force truncation with massive findings
+        long_msg = "x" * 10000
+        hints = [
+            _make_finding(level="hint", message=long_msg, line=i)
+            for i in range(200)
+        ]
+        sr = generate_workflow_summary(
+            _make_result(hints),
+            _make_context(),
+        )
+        if sr.truncated:
+            assert sr.truncation_note != ""
+            assert "truncated" in sr.truncation_note
+
+    def test_under_limit_returns_all(self):
+        findings = [
+            _make_finding(level="error", line=1),
+            _make_finding(level="warn", line=2),
+            _make_finding(level="hint", line=3),
+        ]
+        sr = generate_workflow_summary(_make_result(findings), _make_context())
+        assert not sr.truncated
+        assert "### Errors" in sr.markdown
+        assert "### Warnings" in sr.markdown
+        assert "### Hints" in sr.markdown
+
+
+# ---------------------------------------------------------------------------
+# SummaryResult type
+# ---------------------------------------------------------------------------
+
+
+class TestSummaryResult:
+    def test_is_frozen(self):
+        sr = SummaryResult(markdown="x", truncated=False, truncation_note="")
+        try:
+            sr.markdown = "y"  # type: ignore[misc]
+            assert False, "Should not be able to mutate frozen dataclass"
+        except AttributeError:
+            pass

From b08a408a27c4fefbdaa82b756a79f04158b9dc9a Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 22:33:18 +0100
Subject: [PATCH 012/157] feat(validation): add caller workflow, reusable
 workflow, and orchestrator

Pipeline orchestrator (validation/orchestrator.py) chains all existing
modules: config gate -> context builder -> engines -> post-filter -> output.
Writes output files for the workflow to post to GitHub surfaces.

Reusable workflow (.github/workflows/validation.yml) implements:
- 3-tier ref resolution (override -> OIDC -> fallback tag v1-rc)
- Single-job sequential pipeline with 16 steps
- Annotations, workflow summary, PR comment, commit status, diagnostics
- Graceful degradation for fork PRs (continue-on-error)

Also includes:
- Caller template (validation/workflows/validation-caller.yml)
- npm dependencies (Spectral, gherkin-lint) with lock file
- 29 unit tests for orchestrator (555 total, zero regressions)
---
 .github/workflows/validation.yml           |  298 +-
 .gitignore                                 |    3 +
 validation/orchestrator.py                 |  494 +++
 validation/package-lock.json               | 3474 ++++++++++++++++++++
 validation/package.json                    |    9 +
 validation/tests/test_orchestrator.py      |  666 ++++
 validation/workflows/validation-caller.yml |   35 +
 7 files changed, 4974 insertions(+), 5 deletions(-)
 create mode 100644 validation/orchestrator.py
 create mode 100644 validation/package-lock.json
 create mode 100644 validation/package.json
 create mode 100644 validation/tests/test_orchestrator.py
 create mode 100644 validation/workflows/validation-caller.yml

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index c47751c9..22c50ea6 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -1,15 +1,303 @@
-# CAMARA Validation Framework — Stub
+# CAMARA Validation Framework — Reusable Workflow (v1)
 #
-# Minimal stub on main so GitHub recognizes the workflow file on feature
-# branches.  The full implementation lives on the validation-framework branch.
+# Called by the identical per-repo caller workflow.  All validation logic,
+# configuration, and output surfacing are handled here.
+#
+# Architecture: single-job, sequential steps.  The Python orchestrator
+# (validation.orchestrator) chains all modules and writes output files;
+# the workflow reads those files and posts them to GitHub surfaces.
 
 name: CAMARA Validation
 
 on:
   workflow_call:
+    inputs:
+      tooling_ref_override:
+        description: >-
+          40-character SHA to override the automatic tooling checkout ref.
+          Pilot / break-glass only.
+        type: string
+        required: false
+        default: ""
+      profile:
+        description: >-
+          Validation profile override (advisory, standard, strict).
+          Default: framework auto-selects based on context.
+        type: string
+        required: false
+        default: ""
+      mode:
+        description: >-
+          Execution mode.  Set to "pre-snapshot" by release automation
+          for pre-snapshot validation gate.
+        type: string
+        required: false
+        default: ""
+
+permissions:
+  checks: write
+  pull-requests: write
+  issues: write
+  contents: read
+  statuses: write
+  id-token: write
 
 jobs:
-  stub:
+  validate:
+    name: Validate
     runs-on: ubuntu-latest
+
     steps:
-      - run: 'echo "Stub — use the validation-framework branch for the full workflow"'
+      # ── Step 1: Checkout repository content ────────────────────────
+      - name: Checkout repository
+        uses: actions/checkout@v6
+        with:
+          fetch-depth: 0
+
+      # ── Step 2: Resolve tooling ref (OIDC → override → fallback) ──
+      - name: Resolve tooling ref
+        id: resolve-ref
+        uses: actions/github-script@v8
+        env:
+          TOOLING_REF_OVERRIDE: ${{ inputs.tooling_ref_override }}
+        with:
+          script: |
+            const overrideRef = (process.env.TOOLING_REF_OVERRIDE || '').trim();
+            const shaPattern = /^[0-9a-f]{40}$/i;
+            const workflowRefPattern =
+              /^([^/]+\/[^/]+)\/\.github\/workflows\/validation\.yml@.+$/;
+
+            function decodeJwtPayload(jwt) {
+              const parts = jwt.split('.');
+              if (parts.length !== 3) {
+                throw new Error('OIDC token is not a valid JWT');
+              }
+              const payload = parts[1]
+                .replace(/-/g, '+')
+                .replace(/_/g, '/');
+              const padLength = (4 - (payload.length % 4)) % 4;
+              const padded = payload + '='.repeat(padLength);
+              return JSON.parse(Buffer.from(padded, 'base64').toString('utf8'));
+            }
+
+            // Tier 1: Explicit override (highest priority)
+            if (overrideRef) {
+              if (!shaPattern.test(overrideRef)) {
+                core.setFailed('tooling_ref_override must be a full 40-character SHA');
+                return;
+              }
+              core.setOutput('tooling_checkout_repo', 'camaraproject/tooling');
+              core.setOutput('tooling_checkout_ref', overrideRef.toLowerCase());
+              core.setOutput('tooling_ref_source', 'override');
+              core.info(`Tooling ref: override ${overrideRef.substring(0, 7)}`);
+              return;
+            }
+
+            // Tier 2: OIDC job_workflow_sha
+            try {
+              const oidcToken = await core.getIDToken('camara-validation-tooling-ref');
+              const claims = decodeJwtPayload(oidcToken);
+              const workflowRef = claims.job_workflow_ref || '';
+              const workflowSha = claims.job_workflow_sha || '';
+              const match = workflowRef.match(workflowRefPattern);
+
+              if (match && shaPattern.test(workflowSha)) {
+                const repo = match[1];
+                const ref = workflowSha.toLowerCase();
+                core.setOutput('tooling_checkout_repo', repo);
+                core.setOutput('tooling_checkout_ref', ref);
+                core.setOutput('tooling_ref_source', 'oidc');
+                core.info(`Tooling ref: OIDC ${ref.substring(0, 7)} from ${repo}`);
+                core.info(`job_workflow_ref: ${workflowRef}`);
+                return;
+              }
+              core.info(`OIDC claims unusable: ref=${workflowRef} sha=${workflowSha}`);
+            } catch (e) {
+              core.info(`OIDC token unavailable: ${e.message}`);
+            }
+
+            // Tier 3: Hardcoded fallback tag
+            core.setOutput('tooling_checkout_repo', 'camaraproject/tooling');
+            core.setOutput('tooling_checkout_ref', 'v1-rc');
+            core.setOutput('tooling_ref_source', 'fallback_tag');
+            core.info('Tooling ref: fallback tag v1-rc');
+
+      # ── Step 3: Checkout tooling (sparse) ──────────────────────────
+      - name: Checkout tooling
+        uses: actions/checkout@v6
+        with:
+          repository: ${{ steps.resolve-ref.outputs.tooling_checkout_repo }}
+          ref: ${{ steps.resolve-ref.outputs.tooling_checkout_ref }}
+          sparse-checkout: |
+            linting/config
+            validation
+            shared-actions
+          path: .tooling
+
+      # ── Step 4: Setup Python ───────────────────────────────────────
+      - name: Setup Python
+        uses: actions/setup-python@v5
+        with:
+          python-version: "3.11"
+
+      # ── Step 5: Setup Node ─────────────────────────────────────────
+      - name: Setup Node
+        uses: actions/setup-node@v4
+        with:
+          node-version: "20"
+
+      # ── Step 6: Install Python dependencies ────────────────────────
+      - name: Install Python dependencies
+        run: pip install --quiet pyyaml jsonschema
+
+      # ── Step 7: Install Node dependencies ──────────────────────────
+      - name: Install Node dependencies
+        run: npm ci --ignore-scripts
+        working-directory: .tooling/validation
+
+      # ── Step 8: Detect release-plan changes (PR only) ──────────────
+      - name: Detect release-plan changes
+        id: detect-changes
+        if: github.event_name == 'pull_request'
+        run: |
+          if git diff --name-only "origin/${{ github.base_ref }}...HEAD" -- release-plan.yaml | grep -q .; then
+            echo "release_plan_changed=true" >> "$GITHUB_OUTPUT"
+          else
+            echo "release_plan_changed=false" >> "$GITHUB_OUTPUT"
+          fi
+
+      # ── Step 9: Run validation orchestrator ────────────────────────
+      - name: Run validation
+        id: orchestrator
+        env:
+          PYTHONPATH: ${{ github.workspace }}/.tooling
+          PATH_NODE_MODULES: ${{ github.workspace }}/.tooling/validation/node_modules/.bin
+          VALIDATION_REPO_PATH: ${{ github.workspace }}
+          VALIDATION_TOOLING_PATH: ${{ github.workspace }}/.tooling
+          VALIDATION_OUTPUT_DIR: ${{ github.workspace }}/validation-output
+          VALIDATION_REPO_NAME: ${{ github.repository }}
+          VALIDATION_REPO_OWNER: ${{ github.repository_owner }}
+          VALIDATION_EVENT_NAME: ${{ github.event_name }}
+          VALIDATION_REF_NAME: ${{ github.ref_name }}
+          VALIDATION_BASE_REF: ${{ github.base_ref }}
+          VALIDATION_MODE: ${{ inputs.mode }}
+          VALIDATION_PROFILE: ${{ inputs.profile }}
+          VALIDATION_PR_NUMBER: ${{ github.event.pull_request.number }}
+          VALIDATION_RELEASE_PLAN_CHANGED: ${{ steps.detect-changes.outputs.release_plan_changed }}
+          VALIDATION_WORKFLOW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+          VALIDATION_TOOLING_REF: ${{ steps.resolve-ref.outputs.tooling_checkout_ref }}
+          VALIDATION_COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
+        run: |
+          export PATH="${PATH_NODE_MODULES}:${PATH}"
+          python -m validation.orchestrator
+
+      # ── Step 10: Emit annotations ──────────────────────────────────
+      - name: Emit annotations
+        if: always() && steps.orchestrator.outcome == 'success'
+        run: |
+          if [ -f validation-output/annotations.txt ]; then
+            cat validation-output/annotations.txt
+          fi
+
+      # ── Step 11: Write workflow summary ────────────────────────────
+      - name: Write workflow summary
+        if: always() && steps.orchestrator.outcome == 'success'
+        run: |
+          if [ -f validation-output/summary.md ]; then
+            cat validation-output/summary.md >> "$GITHUB_STEP_SUMMARY"
+          fi
+
+      # ── Step 12: Post PR comment ───────────────────────────────────
+      - name: Post PR comment
+        if: always() && steps.orchestrator.outcome == 'success' && github.event_name == 'pull_request'
+        continue-on-error: true
+        uses: actions/github-script@v8
+        with:
+          script: |
+            const fs = require('fs');
+            const commentPath = 'validation-output/pr-comment.md';
+            if (!fs.existsSync(commentPath)) return;
+            const body = fs.readFileSync(commentPath, 'utf8');
+            const marker = '';
+            const prNumber = context.payload.pull_request.number;
+
+            // Find existing comment with marker
+            const comments = await github.paginate(
+              github.rest.issues.listComments,
+              { owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber }
+            );
+            const existing = comments.find(c => c.body && c.body.includes(marker));
+
+            if (existing) {
+              await github.rest.issues.updateComment({
+                owner: context.repo.owner,
+                repo: context.repo.repo,
+                comment_id: existing.id,
+                body: body,
+              });
+              core.info(`Updated existing comment ${existing.id}`);
+            } else {
+              const created = await github.rest.issues.createComment({
+                owner: context.repo.owner,
+                repo: context.repo.repo,
+                issue_number: prNumber,
+                body: body,
+              });
+              core.info(`Created comment ${created.data.id}`);
+            }
+
+      # ── Step 13: Set commit status ─────────────────────────────────
+      - name: Set commit status
+        if: always() && steps.orchestrator.outcome == 'success'
+        continue-on-error: true
+        uses: actions/github-script@v8
+        with:
+          script: |
+            const fs = require('fs');
+            const statusPath = 'validation-output/commit-status.json';
+            if (!fs.existsSync(statusPath)) return;
+            const payload = JSON.parse(fs.readFileSync(statusPath, 'utf8'));
+            const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
+
+            await github.rest.repos.createCommitStatus({
+              owner: context.repo.owner,
+              repo: context.repo.repo,
+              sha: sha,
+              state: payload.state,
+              description: payload.description,
+              context: payload.context,
+              target_url: payload.target_url,
+            });
+            core.info(`Commit status: ${payload.state}`);
+
+      # ── Step 14: Upload diagnostics ────────────────────────────────
+      - name: Upload diagnostics
+        if: always() && steps.orchestrator.outcome == 'success'
+        uses: actions/upload-artifact@v4
+        with:
+          name: validation-diagnostics
+          path: validation-output/diagnostics/
+          if-no-files-found: ignore
+          retention-days: 30
+
+      # ── Step 15: Check result ──────────────────────────────────────
+      - name: Check result
+        if: always() && steps.orchestrator.outcome == 'success'
+        run: |
+          if [ -f validation-output/result.json ]; then
+            SHOULD_FAIL=$(python3 -c "import json; r=json.load(open('validation-output/result.json')); print(r.get('should_fail', False))")
+            if [ "$SHOULD_FAIL" = "True" ]; then
+              echo "::error::Validation failed — see summary and annotations above"
+              exit 1
+            fi
+          fi
+
+      # ── Fallback: orchestrator infrastructure error ────────────────
+      - name: Report orchestrator failure
+        if: always() && steps.orchestrator.outcome == 'failure'
+        run: |
+          echo "## CAMARA Validation" >> "$GITHUB_STEP_SUMMARY"
+          echo "" >> "$GITHUB_STEP_SUMMARY"
+          echo "**Error**: The validation orchestrator failed with an infrastructure error." >> "$GITHUB_STEP_SUMMARY"
+          echo "Check the workflow logs for details." >> "$GITHUB_STEP_SUMMARY"
+          exit 1
diff --git a/.gitignore b/.gitignore
index e2992cc9..138e4887 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,9 @@ __pycache__/
 dist/
 build/
 
+# Node
+node_modules/
+
 # Virtual environments
 venv/
 .venv/
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
new file mode 100644
index 00000000..bd3c714d
--- /dev/null
+++ b/validation/orchestrator.py
@@ -0,0 +1,494 @@
+"""CAMARA validation framework orchestrator.
+
+Chains the full validation pipeline:
+
+    config gate -> context builder -> engines -> post-filter -> output
+
+Invoked once from the reusable workflow as ``python -m validation.orchestrator``.
+All inputs arrive via ``VALIDATION_*`` environment variables set by the workflow.
+Output files are written to ``$VALIDATION_OUTPUT_DIR`` for the workflow to read
+and post to GitHub surfaces (annotations, PR comments, commit status, artifacts).
+
+No GitHub API calls are made from Python — this keeps the orchestrator
+independently testable.
+"""
+
+from __future__ import annotations
+
+import dataclasses
+import json
+import logging
+import os
+import sys
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple
+
+from validation.config.config_gate import StageGateResult, resolve_stage_from_files
+from validation.context import ValidationContext, build_validation_context
+from validation.engines import (
+    run_gherkin_engine,
+    run_python_engine,
+    run_spectral_engine,
+    run_yamllint_engine,
+)
+from validation.output import (
+    generate_annotations,
+    generate_commit_status,
+    generate_pr_comment,
+    generate_workflow_summary,
+    write_diagnostics,
+)
+from validation.postfilter.engine import PostFilterResult, run_post_filter
+
+# ---------------------------------------------------------------------------
+# Logging — structured output so workflow logs are readable
+# ---------------------------------------------------------------------------
+
+logging.basicConfig(
+    level=logging.INFO,
+    format="%(levelname)s %(name)s: %(message)s",
+)
+logger = logging.getLogger("validation.orchestrator")
+
+# ---------------------------------------------------------------------------
+# Exit codes
+# ---------------------------------------------------------------------------
+
+EXIT_OK = 0
+EXIT_INFRA_ERROR = 2
+
+
+# ---------------------------------------------------------------------------
+# Environment parsing
+# ---------------------------------------------------------------------------
+
+
+@dataclasses.dataclass
+class OrchestratorArgs:
+    """Parsed orchestrator inputs."""
+
+    repo_path: Path
+    tooling_path: Path
+    output_dir: Path
+
+    repo_name: str  # e.g. "camaraproject/QualityOnDemand"
+    repo_owner: str  # e.g. "camaraproject"
+    event_name: str  # e.g. "pull_request", "workflow_dispatch"
+    ref_name: str  # checked-out branch
+    base_ref: str  # PR target branch (empty for non-PR)
+
+    mode: str  # "" or "pre-snapshot"
+    profile: str  # "" or "advisory"/"standard"/"strict"
+    pr_number: Optional[int]
+    release_plan_changed: Optional[bool]
+
+    workflow_run_url: str
+    tooling_ref: str
+    commit_sha: str
+
+
+def _env(name: str, default: str = "") -> str:
+    """Read a VALIDATION_* environment variable."""
+    return os.environ.get(f"VALIDATION_{name}", default)
+
+
+def _env_optional_int(name: str) -> Optional[int]:
+    """Read an env var as optional int."""
+    raw = _env(name)
+    if not raw:
+        return None
+    try:
+        return int(raw)
+    except ValueError:
+        return None
+
+
+def _env_optional_bool(name: str) -> Optional[bool]:
+    """Read an env var as optional bool."""
+    raw = _env(name).lower()
+    if raw in ("true", "1", "yes"):
+        return True
+    if raw in ("false", "0", "no"):
+        return False
+    return None
+
+
+def parse_args() -> OrchestratorArgs:
+    """Parse all inputs from VALIDATION_* environment variables."""
+    return OrchestratorArgs(
+        repo_path=Path(_env("REPO_PATH", ".")),
+        tooling_path=Path(_env("TOOLING_PATH", ".tooling")),
+        output_dir=Path(_env("OUTPUT_DIR", "validation-output")),
+        repo_name=_env("REPO_NAME"),
+        repo_owner=_env("REPO_OWNER"),
+        event_name=_env("EVENT_NAME"),
+        ref_name=_env("REF_NAME"),
+        base_ref=_env("BASE_REF"),
+        mode=_env("MODE"),
+        profile=_env("PROFILE"),
+        pr_number=_env_optional_int("PR_NUMBER"),
+        release_plan_changed=_env_optional_bool("RELEASE_PLAN_CHANGED"),
+        workflow_run_url=_env("WORKFLOW_RUN_URL"),
+        tooling_ref=_env("TOOLING_REF"),
+        commit_sha=_env("COMMIT_SHA"),
+    )
+
+
+# ---------------------------------------------------------------------------
+# Path resolution
+# ---------------------------------------------------------------------------
+
+
+@dataclasses.dataclass
+class ToolingPaths:
+    """Resolved paths within the tooling checkout."""
+
+    config_file: Path
+    config_schema: Path
+    release_plan_schema: Path
+    linting_config_dir: Path
+    rules_dir: Path
+
+
+def resolve_tooling_paths(tooling_path: Path) -> ToolingPaths:
+    """Build all paths relative to the tooling checkout."""
+    return ToolingPaths(
+        config_file=tooling_path / "validation" / "config" / "validation-config.yaml",
+        config_schema=tooling_path / "validation" / "schemas" / "validation-config-schema.yaml",
+        release_plan_schema=tooling_path / "validation" / "schemas" / "release-plan-schema.yaml",
+        linting_config_dir=tooling_path / "linting" / "config",
+        rules_dir=tooling_path / "validation" / "rules",
+    )
+
+
+# ---------------------------------------------------------------------------
+# File discovery
+# ---------------------------------------------------------------------------
+
+
+def discover_spec_files(repo_path: Path) -> List[Path]:
+    """Find OpenAPI spec files under ``code/API_definitions/``."""
+    return sorted(repo_path.glob("code/API_definitions/*.yaml"))
+
+
+def discover_test_files(repo_path: Path) -> List[Path]:
+    """Find Gherkin test files under ``code/Test_definitions/``."""
+    return sorted(repo_path.glob("code/Test_definitions/**/*.feature"))
+
+
+# ---------------------------------------------------------------------------
+# Engine orchestration
+# ---------------------------------------------------------------------------
+
+
+def run_engines(
+    repo_path: Path,
+    paths: ToolingPaths,
+    context: Any,  # ValidationContext
+    test_files: List[Path],
+) -> Tuple[List[dict], Dict[str, str]]:
+    """Run all validation engines and collect findings.
+
+    Returns:
+        Tuple of (all_findings, engine_statuses).
+    """
+    all_findings: List[dict] = []
+    engine_statuses: Dict[str, str] = {}
+    is_release_review = getattr(context, "is_release_review_pr", False)
+
+    # --- yamllint ---
+    if is_release_review:
+        engine_statuses["yamllint"] = "skipped (release review PR)"
+        logger.info("yamllint: skipped (release review PR)")
+    else:
+        try:
+            yamllint_config = paths.linting_config_dir / ".yamllint.yaml"
+            findings = run_yamllint_engine(
+                repo_path=repo_path,
+                config_path=yamllint_config,
+            )
+            all_findings.extend(findings)
+            engine_statuses["yamllint"] = f"{len(findings)} finding(s)"
+            logger.info("yamllint: %d finding(s)", len(findings))
+        except Exception as exc:
+            engine_statuses["yamllint"] = f"error: {exc}"
+            logger.error("yamllint failed: %s", exc)
+
+    # --- Spectral ---
+    if is_release_review:
+        engine_statuses["spectral"] = "skipped (release review PR)"
+        logger.info("Spectral: skipped (release review PR)")
+    else:
+        try:
+            commonalities_release = getattr(context, "commonalities_release", None)
+            findings = run_spectral_engine(
+                repo_path=repo_path,
+                config_dir=paths.linting_config_dir,
+                commonalities_release=commonalities_release,
+            )
+            all_findings.extend(findings)
+            engine_statuses["spectral"] = f"{len(findings)} finding(s)"
+            logger.info("Spectral: %d finding(s)", len(findings))
+        except Exception as exc:
+            engine_statuses["spectral"] = f"error: {exc}"
+            logger.error("Spectral failed: %s", exc)
+
+    # --- Python checks ---
+    try:
+        findings = run_python_engine(
+            repo_path=repo_path,
+            context=context,
+        )
+        all_findings.extend(findings)
+        engine_statuses["python"] = f"{len(findings)} finding(s)"
+        logger.info("Python checks: %d finding(s)", len(findings))
+    except Exception as exc:
+        engine_statuses["python"] = f"error: {exc}"
+        logger.error("Python checks failed: %s", exc)
+
+    # --- gherkin-lint ---
+    if not test_files:
+        engine_statuses["gherkin"] = "skipped (no test files)"
+        logger.info("gherkin-lint: skipped (no test files)")
+    else:
+        try:
+            gherkin_config = paths.linting_config_dir / ".gherkin-lintrc"
+            findings = run_gherkin_engine(
+                repo_path=repo_path,
+                config_path=gherkin_config,
+            )
+            all_findings.extend(findings)
+            engine_statuses["gherkin"] = f"{len(findings)} finding(s)"
+            logger.info("gherkin-lint: %d finding(s)", len(findings))
+        except Exception as exc:
+            engine_statuses["gherkin"] = f"error: {exc}"
+            logger.error("gherkin-lint failed: %s", exc)
+
+    # --- Bundling (placeholder for WP-06.08) ---
+    engine_statuses["bundling"] = "not yet implemented"
+
+    return all_findings, engine_statuses
+
+
+# ---------------------------------------------------------------------------
+# Output writing
+# ---------------------------------------------------------------------------
+
+
+def write_result_json(output_dir: Path, result: str, summary: str) -> None:
+    """Write result.json with overall verdict and should_fail flag."""
+    payload = {
+        "result": result,
+        "summary": summary,
+        "should_fail": result in ("fail", "error"),
+    }
+    (output_dir / "result.json").write_text(
+        json.dumps(payload, indent=2) + "\n"
+    )
+
+
+def write_skip_output(output_dir: Path, reason: str) -> None:
+    """Write minimal output files for a skipped run."""
+    output_dir.mkdir(parents=True, exist_ok=True)
+    # Summary
+    (output_dir / "summary.md").write_text(
+        f"## CAMARA Validation\n\n{reason}\n"
+    )
+    # Result
+    write_result_json(output_dir, "skipped", reason)
+    logger.info("Skipped: %s", reason)
+
+
+def write_outputs(
+    post_filter_result: Any,  # PostFilterResult
+    context: Any,  # ValidationContext
+    output_dir: Path,
+    engine_statuses: Dict[str, str],
+    commit_sha: str,
+) -> None:
+    """Write all output files for the workflow to consume."""
+    output_dir.mkdir(parents=True, exist_ok=True)
+
+    # --- Annotations ---
+    annotation_result = generate_annotations(post_filter_result)
+    if annotation_result.commands:
+        (output_dir / "annotations.txt").write_text(
+            "\n".join(annotation_result.commands) + "\n"
+        )
+    logger.info(
+        "Annotations: %d emitted (of %d total, truncated=%s)",
+        annotation_result.annotations_emitted,
+        annotation_result.total_findings,
+        annotation_result.truncated,
+    )
+
+    # --- Workflow summary ---
+    summary_result = generate_workflow_summary(
+        post_filter_result,
+        context,
+        engine_statuses=engine_statuses,
+        commit_sha=commit_sha,
+    )
+    (output_dir / "summary.md").write_text(summary_result.markdown)
+    if summary_result.truncated:
+        logger.info("Summary truncated: %s", summary_result.truncation_note)
+
+    # --- PR comment ---
+    pr_comment = generate_pr_comment(post_filter_result, context)
+    (output_dir / "pr-comment.md").write_text(pr_comment)
+
+    # --- Commit status ---
+    status_payload = generate_commit_status(post_filter_result, context)
+    (output_dir / "commit-status.json").write_text(
+        json.dumps(dataclasses.asdict(status_payload), indent=2) + "\n"
+    )
+
+    # --- Diagnostics ---
+    diagnostics_dir = output_dir / "diagnostics"
+    write_diagnostics(
+        post_filter_result,
+        context,
+        diagnostics_dir,
+        engine_reports=engine_statuses,
+    )
+
+    # --- Result ---
+    write_result_json(
+        output_dir,
+        post_filter_result.result,
+        post_filter_result.summary,
+    )
+
+
+# ---------------------------------------------------------------------------
+# Main pipeline
+# ---------------------------------------------------------------------------
+
+
+def main() -> int:
+    """Run the full validation pipeline.
+
+    Returns:
+        Exit code: 0 on success (even when validation fails), 2 on
+        infrastructure error.
+    """
+    args = parse_args()
+    logger.info(
+        "Starting validation: repo=%s event=%s ref=%s",
+        args.repo_name,
+        args.event_name,
+        args.ref_name,
+    )
+
+    # Resolve tooling paths
+    paths = resolve_tooling_paths(args.tooling_path)
+
+    # ------------------------------------------------------------------
+    # Step 1: Config gate
+    # ------------------------------------------------------------------
+    stage_result = resolve_stage_from_files(
+        config_path=paths.config_file,
+        schema_path=paths.config_schema,
+        repo_full_name=args.repo_name,
+        repo_owner=args.repo_owner,
+        trigger_type=args.event_name,
+    )
+    logger.info(
+        "Config gate: stage=%s continue=%s fork=%s override=%s",
+        stage_result.stage,
+        stage_result.should_continue,
+        stage_result.is_fork,
+        stage_result.fork_override_applied,
+    )
+    if not stage_result.should_continue:
+        write_skip_output(args.output_dir, stage_result.reason)
+        return EXIT_OK
+
+    # ------------------------------------------------------------------
+    # Step 2: Build validation context
+    # ------------------------------------------------------------------
+    context = build_validation_context(
+        repo_name=args.repo_name,
+        event_name=args.event_name,
+        ref_name=args.ref_name,
+        base_ref=args.base_ref,
+        mode=args.mode,
+        profile_override=args.profile,
+        stage=stage_result.stage,
+        pr_number=args.pr_number,
+        release_plan_changed=args.release_plan_changed,
+        repo_path=args.repo_path,
+        release_plan_schema_path=paths.release_plan_schema,
+        workflow_run_url=args.workflow_run_url,
+        tooling_ref=args.tooling_ref,
+    )
+    logger.info(
+        "Context: branch=%s trigger=%s profile=%s release_review=%s apis=%d",
+        context.branch_type,
+        context.trigger_type,
+        context.profile,
+        context.is_release_review_pr,
+        len(context.apis),
+    )
+
+    # ------------------------------------------------------------------
+    # Step 3: Discover files
+    # ------------------------------------------------------------------
+    spec_files = discover_spec_files(args.repo_path)
+    test_files = discover_test_files(args.repo_path)
+    logger.info(
+        "Files: %d spec(s), %d test(s)",
+        len(spec_files),
+        len(test_files),
+    )
+
+    # ------------------------------------------------------------------
+    # Step 4: Run engines
+    # ------------------------------------------------------------------
+    all_findings, engine_statuses = run_engines(
+        repo_path=args.repo_path,
+        paths=paths,
+        context=context,
+        test_files=test_files,
+    )
+    logger.info("Total raw findings: %d", len(all_findings))
+
+    # ------------------------------------------------------------------
+    # Step 5: Post-filter
+    # ------------------------------------------------------------------
+    post_filter_result = run_post_filter(
+        findings=all_findings,
+        context=context,
+        rules_dir=paths.rules_dir,
+    )
+    logger.info(
+        "Post-filter: result=%s, %d finding(s) after filter",
+        post_filter_result.result,
+        len(post_filter_result.findings),
+    )
+
+    # ------------------------------------------------------------------
+    # Step 6: Write outputs
+    # ------------------------------------------------------------------
+    write_outputs(
+        post_filter_result=post_filter_result,
+        context=context,
+        output_dir=args.output_dir,
+        engine_statuses=engine_statuses,
+        commit_sha=args.commit_sha,
+    )
+    logger.info("Output written to %s", args.output_dir)
+
+    return EXIT_OK
+
+
+# ---------------------------------------------------------------------------
+# Entry point
+# ---------------------------------------------------------------------------
+
+if __name__ == "__main__":
+    try:
+        sys.exit(main())
+    except Exception:
+        logger.exception("Orchestrator infrastructure error")
+        sys.exit(EXIT_INFRA_ERROR)
diff --git a/validation/package-lock.json b/validation/package-lock.json
new file mode 100644
index 00000000..b598bf36
--- /dev/null
+++ b/validation/package-lock.json
@@ -0,0 +1,3474 @@
+{
+  "name": "camara-validation-tools",
+  "lockfileVersion": 3,
+  "requires": true,
+  "packages": {
+    "": {
+      "name": "camara-validation-tools",
+      "dependencies": {
+        "@stoplight/spectral-cli": "^6.14.0",
+        "gherkin-lint": "^4.2.4"
+      }
+    },
+    "node_modules/@asyncapi/specs": {
+      "version": "6.11.1",
+      "resolved": "https://registry.npmjs.org/@asyncapi/specs/-/specs-6.11.1.tgz",
+      "integrity": "sha512-A3WBLqAKGoJ2+6FWFtpjBlCQ1oFCcs4GxF7zsIGvNqp/klGUHjlA3aAcZ9XMMpLGE8zPeYDz2x9FmO6DSuKraQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@types/json-schema": "^7.0.11"
+      }
+    },
+    "node_modules/@jsep-plugin/assignment": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@jsep-plugin/assignment/-/assignment-1.3.0.tgz",
+      "integrity": "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 10.16.0"
+      },
+      "peerDependencies": {
+        "jsep": "^0.4.0||^1.0.0"
+      }
+    },
+    "node_modules/@jsep-plugin/regex": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/@jsep-plugin/regex/-/regex-1.0.4.tgz",
+      "integrity": "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 10.16.0"
+      },
+      "peerDependencies": {
+        "jsep": "^0.4.0||^1.0.0"
+      }
+    },
+    "node_modules/@jsep-plugin/ternary": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/@jsep-plugin/ternary/-/ternary-1.1.4.tgz",
+      "integrity": "sha512-ck5wiqIbqdMX6WRQztBL7ASDty9YLgJ3sSAK5ZpBzXeySvFGCzIvM6UiAI4hTZ22fEcYQVV/zhUbNscggW+Ukg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 10.16.0"
+      },
+      "peerDependencies": {
+        "jsep": "^0.4.0||^1.0.0"
+      }
+    },
+    "node_modules/@nodelib/fs.scandir": {
+      "version": "2.1.5",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+      "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+      "license": "MIT",
+      "dependencies": {
+        "@nodelib/fs.stat": "2.0.5",
+        "run-parallel": "^1.1.9"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/@nodelib/fs.stat": {
+      "version": "2.0.5",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+      "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/@nodelib/fs.walk": {
+      "version": "1.2.8",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+      "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+      "license": "MIT",
+      "dependencies": {
+        "@nodelib/fs.scandir": "2.1.5",
+        "fastq": "^1.6.0"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/@protobufjs/aspromise": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
+      "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/base64": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
+      "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/codegen": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
+      "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/eventemitter": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
+      "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/fetch": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
+      "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@protobufjs/aspromise": "^1.1.1",
+        "@protobufjs/inquire": "^1.1.0"
+      }
+    },
+    "node_modules/@protobufjs/float": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
+      "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/inquire": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
+      "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/path": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
+      "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/pool": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
+      "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/utf8": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
+      "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@rollup/plugin-commonjs": {
+      "version": "22.0.2",
+      "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-22.0.2.tgz",
+      "integrity": "sha512-//NdP6iIwPbMTcazYsiBMbJW7gfmpHom33u1beiIoHDEM0Q9clvtQB1T0efvMqHeKsGohiHo97BCPCkBXdscwg==",
+      "license": "MIT",
+      "dependencies": {
+        "@rollup/pluginutils": "^3.1.0",
+        "commondir": "^1.0.1",
+        "estree-walker": "^2.0.1",
+        "glob": "^7.1.6",
+        "is-reference": "^1.2.1",
+        "magic-string": "^0.25.7",
+        "resolve": "^1.17.0"
+      },
+      "engines": {
+        "node": ">= 12.0.0"
+      },
+      "peerDependencies": {
+        "rollup": "^2.68.0"
+      }
+    },
+    "node_modules/@rollup/pluginutils": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-3.1.0.tgz",
+      "integrity": "sha512-GksZ6pr6TpIjHm8h9lSQ8pi8BE9VeubNT0OMJ3B5uZJ8pz73NPiqOtCog/x2/QzM1ENChPKxMDhiQuRHsqc+lg==",
+      "license": "MIT",
+      "dependencies": {
+        "@types/estree": "0.0.39",
+        "estree-walker": "^1.0.1",
+        "picomatch": "^2.2.2"
+      },
+      "engines": {
+        "node": ">= 8.0.0"
+      },
+      "peerDependencies": {
+        "rollup": "^1.20.0||^2.0.0"
+      }
+    },
+    "node_modules/@rollup/pluginutils/node_modules/estree-walker": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-1.0.1.tgz",
+      "integrity": "sha512-1fMXF3YP4pZZVozF8j/ZLfvnR8NSIljt56UhbZ5PeeDmmGHpgpdwQt7ITlGvYaQukCvuBRMLEiKiYC+oeIg4cg==",
+      "license": "MIT"
+    },
+    "node_modules/@stoplight/better-ajv-errors": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/@stoplight/better-ajv-errors/-/better-ajv-errors-1.0.3.tgz",
+      "integrity": "sha512-0p9uXkuB22qGdNfy3VeEhxkU5uwvp/KrBTAbrLBURv6ilxIVwanKwjMc41lQfIVgPGcOkmLbTolfFrSsueu7zA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "jsonpointer": "^5.0.0",
+        "leven": "^3.1.0"
+      },
+      "engines": {
+        "node": "^12.20 || >= 14.13"
+      },
+      "peerDependencies": {
+        "ajv": ">=8"
+      }
+    },
+    "node_modules/@stoplight/json": {
+      "version": "3.21.7",
+      "resolved": "https://registry.npmjs.org/@stoplight/json/-/json-3.21.7.tgz",
+      "integrity": "sha512-xcJXgKFqv/uCEgtGlPxy3tPA+4I+ZI4vAuMJ885+ThkTHFVkC+0Fm58lA9NlsyjnkpxFh4YiQWpH+KefHdbA0A==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/ordered-object-literal": "^1.0.3",
+        "@stoplight/path": "^1.3.2",
+        "@stoplight/types": "^13.6.0",
+        "jsonc-parser": "~2.2.1",
+        "lodash": "^4.17.21",
+        "safe-stable-stringify": "^1.1"
+      },
+      "engines": {
+        "node": ">=8.3.0"
+      }
+    },
+    "node_modules/@stoplight/json-ref-readers": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/@stoplight/json-ref-readers/-/json-ref-readers-1.2.2.tgz",
+      "integrity": "sha512-nty0tHUq2f1IKuFYsLM4CXLZGHdMn+X/IwEUIpeSOXt0QjMUbL0Em57iJUDzz+2MkWG83smIigNZ3fauGjqgdQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "node-fetch": "^2.6.0",
+        "tslib": "^1.14.1"
+      },
+      "engines": {
+        "node": ">=8.3.0"
+      }
+    },
+    "node_modules/@stoplight/json-ref-readers/node_modules/tslib": {
+      "version": "1.14.1",
+      "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+      "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
+      "license": "0BSD"
+    },
+    "node_modules/@stoplight/json-ref-resolver": {
+      "version": "3.1.6",
+      "resolved": "https://registry.npmjs.org/@stoplight/json-ref-resolver/-/json-ref-resolver-3.1.6.tgz",
+      "integrity": "sha512-YNcWv3R3n3U6iQYBsFOiWSuRGE5su1tJSiX6pAPRVk7dP0L7lqCteXGzuVRQ0gMZqUl8v1P0+fAKxF6PLo9B5A==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json": "^3.21.0",
+        "@stoplight/path": "^1.3.2",
+        "@stoplight/types": "^12.3.0 || ^13.0.0",
+        "@types/urijs": "^1.19.19",
+        "dependency-graph": "~0.11.0",
+        "fast-memoize": "^2.5.2",
+        "immer": "^9.0.6",
+        "lodash": "^4.17.21",
+        "tslib": "^2.6.0",
+        "urijs": "^1.19.11"
+      },
+      "engines": {
+        "node": ">=8.3.0"
+      }
+    },
+    "node_modules/@stoplight/ordered-object-literal": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/@stoplight/ordered-object-literal/-/ordered-object-literal-1.0.5.tgz",
+      "integrity": "sha512-COTiuCU5bgMUtbIFBuyyh2/yVVzlr5Om0v5utQDgBCuQUOPgU1DwoffkTfg4UBQOvByi5foF4w4T+H9CoRe5wg==",
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/@stoplight/path": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/@stoplight/path/-/path-1.3.2.tgz",
+      "integrity": "sha512-lyIc6JUlUA8Ve5ELywPC8I2Sdnh1zc1zmbYgVarhXIp9YeAB0ReeqmGEOWNtlHkbP2DAA1AL65Wfn2ncjK/jtQ==",
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/@stoplight/spectral-cli": {
+      "version": "6.15.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-cli/-/spectral-cli-6.15.0.tgz",
+      "integrity": "sha512-FVeQIuqQQnnLfa8vy+oatTKUve7uU+3SaaAfdjpX/B+uB1NcfkKRJYhKT9wMEehDRaMPL5AKIRYMCFerdEbIpw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json": "~3.21.0",
+        "@stoplight/path": "1.3.2",
+        "@stoplight/spectral-core": "^1.19.5",
+        "@stoplight/spectral-formatters": "^1.4.1",
+        "@stoplight/spectral-parsers": "^1.0.4",
+        "@stoplight/spectral-ref-resolver": "^1.0.4",
+        "@stoplight/spectral-ruleset-bundler": "^1.6.0",
+        "@stoplight/spectral-ruleset-migrator": "^1.11.0",
+        "@stoplight/spectral-rulesets": ">=1",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "@stoplight/types": "^13.6.0",
+        "chalk": "4.1.2",
+        "fast-glob": "~3.2.12",
+        "hpagent": "~1.2.0",
+        "lodash": "~4.17.21",
+        "pony-cause": "^1.1.1",
+        "stacktracey": "^2.1.8",
+        "tslib": "^2.8.1",
+        "yargs": "~17.7.2"
+      },
+      "bin": {
+        "spectral": "dist/index.js"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-core": {
+      "version": "1.21.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-core/-/spectral-core-1.21.0.tgz",
+      "integrity": "sha512-oj4e/FrDLUhBRocIW+lRMKlJ/q/rDZw61HkLbTFsdMd+f/FTkli2xHNB1YC6n1mrMKjjvy7XlUuFkC7XxtgbWw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/better-ajv-errors": "1.0.3",
+        "@stoplight/json": "~3.21.0",
+        "@stoplight/path": "1.3.2",
+        "@stoplight/spectral-parsers": "^1.0.0",
+        "@stoplight/spectral-ref-resolver": "^1.0.4",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "@stoplight/types": "~13.6.0",
+        "@types/es-aggregate-error": "^1.0.2",
+        "@types/json-schema": "^7.0.11",
+        "ajv": "^8.17.1",
+        "ajv-errors": "~3.0.0",
+        "ajv-formats": "~2.1.1",
+        "es-aggregate-error": "^1.0.7",
+        "jsonpath-plus": "^10.3.0",
+        "lodash": "~4.17.23",
+        "lodash.topath": "^4.5.2",
+        "minimatch": "3.1.2",
+        "nimma": "0.2.3",
+        "pony-cause": "^1.1.1",
+        "simple-eval": "1.0.1",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-core/node_modules/@stoplight/types": {
+      "version": "13.6.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/types/-/types-13.6.0.tgz",
+      "integrity": "sha512-dzyuzvUjv3m1wmhPfq82lCVYGcXG0xUYgqnWfCq3PCVR4BKFhjdkHrnJ+jIDoMKvXb05AZP/ObQF6+NpDo29IQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@types/json-schema": "^7.0.4",
+        "utility-types": "^3.10.0"
+      },
+      "engines": {
+        "node": "^12.20 || >=14.13"
+      }
+    },
+    "node_modules/@stoplight/spectral-formats": {
+      "version": "1.8.2",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-formats/-/spectral-formats-1.8.2.tgz",
+      "integrity": "sha512-c06HB+rOKfe7tuxg0IdKDEA5XnjL2vrn/m/OVIIxtINtBzphZrOgtRn7epQ5bQF5SWp84Ue7UJWaGgDwVngMFw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json": "^3.17.0",
+        "@stoplight/spectral-core": "^1.19.2",
+        "@types/json-schema": "^7.0.7",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-formatters": {
+      "version": "1.5.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-formatters/-/spectral-formatters-1.5.0.tgz",
+      "integrity": "sha512-lR7s41Z00Mf8TdXBBZQ3oi2uR8wqAtR6NO0KA8Ltk4FSpmAy0i6CKUmJG9hZQjanTnGmwpQkT/WP66p1GY3iXA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/path": "^1.3.2",
+        "@stoplight/spectral-core": "^1.19.4",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "@stoplight/types": "^13.15.0",
+        "@types/markdown-escape": "^1.1.3",
+        "chalk": "4.1.2",
+        "cliui": "7.0.4",
+        "lodash": "^4.17.21",
+        "markdown-escape": "^2.0.0",
+        "node-sarif-builder": "^2.0.3",
+        "strip-ansi": "6.0",
+        "text-table": "^0.2.0",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-functions": {
+      "version": "1.10.1",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-functions/-/spectral-functions-1.10.1.tgz",
+      "integrity": "sha512-obu8ZfoHxELOapfGsCJixKZXZcffjg+lSoNuttpmUFuDzVLT3VmH8QkPXfOGOL5Pz80BR35ClNAToDkdnYIURg==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/better-ajv-errors": "1.0.3",
+        "@stoplight/json": "^3.17.1",
+        "@stoplight/spectral-core": "^1.19.4",
+        "@stoplight/spectral-formats": "^1.8.1",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "ajv": "^8.17.1",
+        "ajv-draft-04": "~1.0.0",
+        "ajv-errors": "~3.0.0",
+        "ajv-formats": "~2.1.1",
+        "lodash": "~4.17.21",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-parsers": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-parsers/-/spectral-parsers-1.0.5.tgz",
+      "integrity": "sha512-ANDTp2IHWGvsQDAY85/jQi9ZrF4mRrA5bciNHX+PUxPr4DwS6iv4h+FVWJMVwcEYdpyoIdyL+SRmHdJfQEPmwQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json": "~3.21.0",
+        "@stoplight/types": "^14.1.1",
+        "@stoplight/yaml": "~4.3.0",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-parsers/node_modules/@stoplight/types": {
+      "version": "14.1.1",
+      "resolved": "https://registry.npmjs.org/@stoplight/types/-/types-14.1.1.tgz",
+      "integrity": "sha512-/kjtr+0t0tjKr+heVfviO9FrU/uGLc+QNX3fHJc19xsCNYqU7lVhaXxDmEID9BZTjG+/r9pK9xP/xU02XGg65g==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@types/json-schema": "^7.0.4",
+        "utility-types": "^3.10.0"
+      },
+      "engines": {
+        "node": "^12.20 || >=14.13"
+      }
+    },
+    "node_modules/@stoplight/spectral-ref-resolver": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-ref-resolver/-/spectral-ref-resolver-1.0.5.tgz",
+      "integrity": "sha512-gj3TieX5a9zMW29z3mBlAtDOCgN3GEc1VgZnCVlr5irmR4Qi5LuECuFItAq4pTn5Zu+sW5bqutsCH7D4PkpyAA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json-ref-readers": "1.2.2",
+        "@stoplight/json-ref-resolver": "~3.1.6",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "dependency-graph": "0.11.0",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-ruleset-bundler": {
+      "version": "1.6.3",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-ruleset-bundler/-/spectral-ruleset-bundler-1.6.3.tgz",
+      "integrity": "sha512-AQFRO6OCKg8SZJUupnr3+OzI1LrMieDTEUHsYgmaRpNiDRPvzImE3bzM1KyQg99q58kTQyZ8kpr7sG8Lp94RRA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@rollup/plugin-commonjs": "~22.0.2",
+        "@stoplight/path": "1.3.2",
+        "@stoplight/spectral-core": ">=1",
+        "@stoplight/spectral-formats": "^1.8.1",
+        "@stoplight/spectral-functions": ">=1",
+        "@stoplight/spectral-parsers": ">=1",
+        "@stoplight/spectral-ref-resolver": "^1.0.4",
+        "@stoplight/spectral-ruleset-migrator": "^1.9.6",
+        "@stoplight/spectral-rulesets": ">=1",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "@stoplight/types": "^13.6.0",
+        "@types/node": "*",
+        "pony-cause": "1.1.1",
+        "rollup": "~2.79.2",
+        "tslib": "^2.8.1",
+        "validate-npm-package-name": "3.0.0"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-ruleset-migrator": {
+      "version": "1.11.3",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-ruleset-migrator/-/spectral-ruleset-migrator-1.11.3.tgz",
+      "integrity": "sha512-+9Y1zFxYmSsneT5FPkgS1IlRQs0VgtdMT77f5xf6vzje9ezyhfs7oXwbZOCSZjEJew8iVZBKQtiOFndcBrdtqg==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json": "~3.21.0",
+        "@stoplight/ordered-object-literal": "~1.0.4",
+        "@stoplight/path": "1.3.2",
+        "@stoplight/spectral-functions": "^1.9.1",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "@stoplight/types": "^13.6.0",
+        "@stoplight/yaml": "~4.2.3",
+        "@types/node": "*",
+        "ajv": "^8.17.1",
+        "ast-types": "0.14.2",
+        "astring": "^1.9.0",
+        "reserved": "0.1.2",
+        "tslib": "^2.8.1",
+        "validate-npm-package-name": "3.0.0"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-ruleset-migrator/node_modules/@stoplight/yaml": {
+      "version": "4.2.3",
+      "resolved": "https://registry.npmjs.org/@stoplight/yaml/-/yaml-4.2.3.tgz",
+      "integrity": "sha512-Mx01wjRAR9C7yLMUyYFTfbUf5DimEpHMkRDQ1PKLe9dfNILbgdxyrncsOXM3vCpsQ1Hfj4bPiGl+u4u6e9Akqw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/ordered-object-literal": "^1.0.1",
+        "@stoplight/types": "^13.0.0",
+        "@stoplight/yaml-ast-parser": "0.0.48",
+        "tslib": "^2.2.0"
+      },
+      "engines": {
+        "node": ">=10.8"
+      }
+    },
+    "node_modules/@stoplight/spectral-ruleset-migrator/node_modules/@stoplight/yaml-ast-parser": {
+      "version": "0.0.48",
+      "resolved": "https://registry.npmjs.org/@stoplight/yaml-ast-parser/-/yaml-ast-parser-0.0.48.tgz",
+      "integrity": "sha512-sV+51I7WYnLJnKPn2EMWgS4EUfoP4iWEbrWwbXsj0MZCB/xOK8j6+C9fntIdOM50kpx45ZLC3s6kwKivWuqvyg==",
+      "license": "Apache-2.0"
+    },
+    "node_modules/@stoplight/spectral-rulesets": {
+      "version": "1.22.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-rulesets/-/spectral-rulesets-1.22.0.tgz",
+      "integrity": "sha512-l2EY2jiKKLsvnPfGy+pXC0LeGsbJzcQP5G/AojHgf+cwN//VYxW1Wvv4WKFx/CLmLxc42mJYF2juwWofjWYNIQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@asyncapi/specs": "^6.8.0",
+        "@stoplight/better-ajv-errors": "1.0.3",
+        "@stoplight/json": "^3.17.0",
+        "@stoplight/spectral-core": "^1.19.4",
+        "@stoplight/spectral-formats": "^1.8.1",
+        "@stoplight/spectral-functions": "^1.9.1",
+        "@stoplight/spectral-runtime": "^1.1.2",
+        "@stoplight/types": "^13.6.0",
+        "@types/json-schema": "^7.0.7",
+        "ajv": "^8.17.1",
+        "ajv-formats": "~2.1.1",
+        "json-schema-traverse": "^1.0.0",
+        "leven": "3.1.0",
+        "lodash": "~4.17.21",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/spectral-runtime": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/@stoplight/spectral-runtime/-/spectral-runtime-1.1.4.tgz",
+      "integrity": "sha512-YHbhX3dqW0do6DhiPSgSGQzr6yQLlWybhKwWx0cqxjMwxej3TqLv3BXMfIUYFKKUqIwH4Q2mV8rrMM8qD2N0rQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/json": "^3.20.1",
+        "@stoplight/path": "^1.3.2",
+        "@stoplight/types": "^13.6.0",
+        "abort-controller": "^3.0.0",
+        "lodash": "^4.17.21",
+        "node-fetch": "^2.7.0",
+        "tslib": "^2.8.1"
+      },
+      "engines": {
+        "node": "^16.20 || ^18.18 || >= 20.17"
+      }
+    },
+    "node_modules/@stoplight/types": {
+      "version": "13.20.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/types/-/types-13.20.0.tgz",
+      "integrity": "sha512-2FNTv05If7ib79VPDA/r9eUet76jewXFH2y2K5vuge6SXbRHtWBhcaRmu+6QpF4/WRNoJj5XYRSwLGXDxysBGA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@types/json-schema": "^7.0.4",
+        "utility-types": "^3.10.0"
+      },
+      "engines": {
+        "node": "^12.20 || >=14.13"
+      }
+    },
+    "node_modules/@stoplight/yaml": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/@stoplight/yaml/-/yaml-4.3.0.tgz",
+      "integrity": "sha512-JZlVFE6/dYpP9tQmV0/ADfn32L9uFarHWxfcRhReKUnljz1ZiUM5zpX+PH8h5CJs6lao3TuFqnPm9IJJCEkE2w==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@stoplight/ordered-object-literal": "^1.0.5",
+        "@stoplight/types": "^14.1.1",
+        "@stoplight/yaml-ast-parser": "0.0.50",
+        "tslib": "^2.2.0"
+      },
+      "engines": {
+        "node": ">=10.8"
+      }
+    },
+    "node_modules/@stoplight/yaml-ast-parser": {
+      "version": "0.0.50",
+      "resolved": "https://registry.npmjs.org/@stoplight/yaml-ast-parser/-/yaml-ast-parser-0.0.50.tgz",
+      "integrity": "sha512-Pb6M8TDO9DtSVla9yXSTAxmo9GVEouq5P40DWXdOie69bXogZTkgvopCq+yEvTMA0F6PEvdJmbtTV3ccIp11VQ==",
+      "license": "Apache-2.0"
+    },
+    "node_modules/@stoplight/yaml/node_modules/@stoplight/types": {
+      "version": "14.1.1",
+      "resolved": "https://registry.npmjs.org/@stoplight/types/-/types-14.1.1.tgz",
+      "integrity": "sha512-/kjtr+0t0tjKr+heVfviO9FrU/uGLc+QNX3fHJc19xsCNYqU7lVhaXxDmEID9BZTjG+/r9pK9xP/xU02XGg65g==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@types/json-schema": "^7.0.4",
+        "utility-types": "^3.10.0"
+      },
+      "engines": {
+        "node": "^12.20 || >=14.13"
+      }
+    },
+    "node_modules/@types/es-aggregate-error": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/@types/es-aggregate-error/-/es-aggregate-error-1.0.6.tgz",
+      "integrity": "sha512-qJ7LIFp06h1QE1aVxbVd+zJP2wdaugYXYfd6JxsyRMrYHaxb6itXPogW2tz+ylUJ1n1b+JF1PHyYCfYHm0dvUg==",
+      "license": "MIT",
+      "dependencies": {
+        "@types/node": "*"
+      }
+    },
+    "node_modules/@types/estree": {
+      "version": "0.0.39",
+      "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.39.tgz",
+      "integrity": "sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==",
+      "license": "MIT"
+    },
+    "node_modules/@types/json-schema": {
+      "version": "7.0.15",
+      "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz",
+      "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
+      "license": "MIT"
+    },
+    "node_modules/@types/long": {
+      "version": "4.0.2",
+      "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz",
+      "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==",
+      "license": "MIT"
+    },
+    "node_modules/@types/markdown-escape": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/@types/markdown-escape/-/markdown-escape-1.1.3.tgz",
+      "integrity": "sha512-JIc1+s3y5ujKnt/+N+wq6s/QdL2qZ11fP79MijrVXsAAnzSxCbT2j/3prHRouJdZ2yFLN3vkP0HytfnoCczjOw==",
+      "license": "MIT"
+    },
+    "node_modules/@types/node": {
+      "version": "25.5.0",
+      "resolved": "https://registry.npmjs.org/@types/node/-/node-25.5.0.tgz",
+      "integrity": "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==",
+      "license": "MIT",
+      "dependencies": {
+        "undici-types": "~7.18.0"
+      }
+    },
+    "node_modules/@types/sarif": {
+      "version": "2.1.7",
+      "resolved": "https://registry.npmjs.org/@types/sarif/-/sarif-2.1.7.tgz",
+      "integrity": "sha512-kRz0VEkJqWLf1LLVN4pT1cg1Z9wAuvI6L97V3m2f5B76Tg8d413ddvLBPTEHAZJlnn4XSvu0FkZtViCQGVyrXQ==",
+      "license": "MIT"
+    },
+    "node_modules/@types/urijs": {
+      "version": "1.19.26",
+      "resolved": "https://registry.npmjs.org/@types/urijs/-/urijs-1.19.26.tgz",
+      "integrity": "sha512-wkXrVzX5yoqLnndOwFsieJA7oKM8cNkOKJtf/3vVGSUFkWDKZvFHpIl9Pvqb/T9UsawBBFMTTD8xu7sK5MWuvg==",
+      "license": "MIT"
+    },
+    "node_modules/@types/uuid": {
+      "version": "3.4.13",
+      "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-3.4.13.tgz",
+      "integrity": "sha512-pAeZeUbLE4Z9Vi9wsWV2bYPTweEHeJJy0G4pEjOA/FSvy1Ad5U5Km8iDV6TKre1mjBiVNfAdVHKruP8bAh4Q5A==",
+      "license": "MIT"
+    },
+    "node_modules/abort-controller": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+      "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+      "license": "MIT",
+      "dependencies": {
+        "event-target-shim": "^5.0.0"
+      },
+      "engines": {
+        "node": ">=6.5"
+      }
+    },
+    "node_modules/ajv": {
+      "version": "8.18.0",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
+      "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
+      "license": "MIT",
+      "peer": true,
+      "dependencies": {
+        "fast-deep-equal": "^3.1.3",
+        "fast-uri": "^3.0.1",
+        "json-schema-traverse": "^1.0.0",
+        "require-from-string": "^2.0.2"
+      },
+      "funding": {
+        "type": "github",
+        "url": "https://github.com/sponsors/epoberezkin"
+      }
+    },
+    "node_modules/ajv-draft-04": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/ajv-draft-04/-/ajv-draft-04-1.0.0.tgz",
+      "integrity": "sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==",
+      "license": "MIT",
+      "peerDependencies": {
+        "ajv": "^8.5.0"
+      },
+      "peerDependenciesMeta": {
+        "ajv": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/ajv-errors": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-3.0.0.tgz",
+      "integrity": "sha512-V3wD15YHfHz6y0KdhYFjyy9vWtEVALT9UrxfN3zqlI6dMioHnJrqOYfyPKol3oqrnCM9uwkcdCwkJ0WUcbLMTQ==",
+      "license": "MIT",
+      "peerDependencies": {
+        "ajv": "^8.0.1"
+      }
+    },
+    "node_modules/ajv-formats": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz",
+      "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==",
+      "license": "MIT",
+      "dependencies": {
+        "ajv": "^8.0.0"
+      },
+      "peerDependencies": {
+        "ajv": "^8.0.0"
+      },
+      "peerDependenciesMeta": {
+        "ajv": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/ansi-regex": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+      "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/ansi-styles": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+      "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+      "license": "MIT",
+      "dependencies": {
+        "color-convert": "^2.0.1"
+      },
+      "engines": {
+        "node": ">=8"
+      },
+      "funding": {
+        "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+      }
+    },
+    "node_modules/array-buffer-byte-length": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz",
+      "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "is-array-buffer": "^3.0.5"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/arraybuffer.prototype.slice": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz",
+      "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==",
+      "license": "MIT",
+      "dependencies": {
+        "array-buffer-byte-length": "^1.0.1",
+        "call-bind": "^1.0.8",
+        "define-properties": "^1.2.1",
+        "es-abstract": "^1.23.5",
+        "es-errors": "^1.3.0",
+        "get-intrinsic": "^1.2.6",
+        "is-array-buffer": "^3.0.4"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/as-table": {
+      "version": "1.0.55",
+      "resolved": "https://registry.npmjs.org/as-table/-/as-table-1.0.55.tgz",
+      "integrity": "sha512-xvsWESUJn0JN421Xb9MQw6AsMHRCUknCe0Wjlxvjud80mU4E6hQf1A6NzQKcYNmYw62MfzEtXc+badstZP3JpQ==",
+      "license": "MIT",
+      "dependencies": {
+        "printable-characters": "^1.0.42"
+      }
+    },
+    "node_modules/ast-types": {
+      "version": "0.14.2",
+      "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.14.2.tgz",
+      "integrity": "sha512-O0yuUDnZeQDL+ncNGlJ78BiO4jnYI3bvMsD5prT0/nsgijG/LpNBIr63gTjVTNsiGkgQhiyCShTgxt8oXOrklA==",
+      "license": "MIT",
+      "dependencies": {
+        "tslib": "^2.0.1"
+      },
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/astring": {
+      "version": "1.9.0",
+      "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz",
+      "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==",
+      "license": "MIT",
+      "bin": {
+        "astring": "bin/astring"
+      }
+    },
+    "node_modules/async-function": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz",
+      "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/available-typed-arrays": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz",
+      "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==",
+      "license": "MIT",
+      "dependencies": {
+        "possible-typed-array-names": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/balanced-match": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+      "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+      "license": "MIT"
+    },
+    "node_modules/brace-expansion": {
+      "version": "1.1.12",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+      "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+      "license": "MIT",
+      "dependencies": {
+        "balanced-match": "^1.0.0",
+        "concat-map": "0.0.1"
+      }
+    },
+    "node_modules/braces": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+      "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+      "license": "MIT",
+      "dependencies": {
+        "fill-range": "^7.1.1"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/buffer-from": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+      "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
+      "license": "MIT"
+    },
+    "node_modules/builtins": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/builtins/-/builtins-1.0.3.tgz",
+      "integrity": "sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==",
+      "license": "MIT"
+    },
+    "node_modules/call-bind": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz",
+      "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind-apply-helpers": "^1.0.0",
+        "es-define-property": "^1.0.0",
+        "get-intrinsic": "^1.2.4",
+        "set-function-length": "^1.2.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/call-bind-apply-helpers": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+      "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "function-bind": "^1.1.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/call-bound": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
+      "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind-apply-helpers": "^1.0.2",
+        "get-intrinsic": "^1.3.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/chalk": {
+      "version": "4.1.2",
+      "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+      "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+      "license": "MIT",
+      "dependencies": {
+        "ansi-styles": "^4.1.0",
+        "supports-color": "^7.1.0"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/chalk/chalk?sponsor=1"
+      }
+    },
+    "node_modules/cliui": {
+      "version": "7.0.4",
+      "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz",
+      "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==",
+      "license": "ISC",
+      "dependencies": {
+        "string-width": "^4.2.0",
+        "strip-ansi": "^6.0.0",
+        "wrap-ansi": "^7.0.0"
+      }
+    },
+    "node_modules/color-convert": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+      "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+      "license": "MIT",
+      "dependencies": {
+        "color-name": "~1.1.4"
+      },
+      "engines": {
+        "node": ">=7.0.0"
+      }
+    },
+    "node_modules/color-name": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+      "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+      "license": "MIT"
+    },
+    "node_modules/commander": {
+      "version": "11.0.0",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-11.0.0.tgz",
+      "integrity": "sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=16"
+      }
+    },
+    "node_modules/commondir": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
+      "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==",
+      "license": "MIT"
+    },
+    "node_modules/concat-map": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+      "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+      "license": "MIT"
+    },
+    "node_modules/core-js": {
+      "version": "3.33.1",
+      "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.33.1.tgz",
+      "integrity": "sha512-qVSq3s+d4+GsqN0teRCJtM6tdEEXyWxjzbhVrCHmBS5ZTM0FS2MOS0D13dUXAWDUN6a+lHI/N1hF9Ytz6iLl9Q==",
+      "hasInstallScript": true,
+      "license": "MIT",
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/core-js"
+      }
+    },
+    "node_modules/cucumber-messages": {
+      "version": "8.0.0",
+      "resolved": "https://registry.npmjs.org/cucumber-messages/-/cucumber-messages-8.0.0.tgz",
+      "integrity": "sha512-lUnWRMjwA9+KhDec/5xRZV3Du67ISumHnVLywWQXyvzmc4P+Eqx8CoeQrBQoau3Pw1hs4kJLTDyV85hFBF00SQ==",
+      "deprecated": "This package is now published under @cucumber/messages",
+      "license": "MIT",
+      "dependencies": {
+        "@types/uuid": "^3.4.6",
+        "protobufjs": "^6.8.8",
+        "uuid": "^3.3.3"
+      }
+    },
+    "node_modules/data-uri-to-buffer": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-2.0.2.tgz",
+      "integrity": "sha512-ND9qDTLc6diwj+Xe5cdAgVTbLVdXbtxTJRXRhli8Mowuaan+0EJOtdqJ0QCHNSSPyoXGx9HX2/VMnKeC34AChA==",
+      "license": "MIT"
+    },
+    "node_modules/data-view-buffer": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz",
+      "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "es-errors": "^1.3.0",
+        "is-data-view": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/data-view-byte-length": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz",
+      "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "es-errors": "^1.3.0",
+        "is-data-view": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/inspect-js"
+      }
+    },
+    "node_modules/data-view-byte-offset": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz",
+      "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "es-errors": "^1.3.0",
+        "is-data-view": "^1.0.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/define-data-property": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
+      "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
+      "license": "MIT",
+      "dependencies": {
+        "es-define-property": "^1.0.0",
+        "es-errors": "^1.3.0",
+        "gopd": "^1.0.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/define-properties": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
+      "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
+      "license": "MIT",
+      "dependencies": {
+        "define-data-property": "^1.0.1",
+        "has-property-descriptors": "^1.0.0",
+        "object-keys": "^1.1.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/dependency-graph": {
+      "version": "0.11.0",
+      "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz",
+      "integrity": "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.6.0"
+      }
+    },
+    "node_modules/dunder-proto": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+      "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind-apply-helpers": "^1.0.1",
+        "es-errors": "^1.3.0",
+        "gopd": "^1.2.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/emoji-regex": {
+      "version": "8.0.0",
+      "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+      "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+      "license": "MIT"
+    },
+    "node_modules/es-abstract": {
+      "version": "1.24.1",
+      "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.1.tgz",
+      "integrity": "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw==",
+      "license": "MIT",
+      "dependencies": {
+        "array-buffer-byte-length": "^1.0.2",
+        "arraybuffer.prototype.slice": "^1.0.4",
+        "available-typed-arrays": "^1.0.7",
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.4",
+        "data-view-buffer": "^1.0.2",
+        "data-view-byte-length": "^1.0.2",
+        "data-view-byte-offset": "^1.0.1",
+        "es-define-property": "^1.0.1",
+        "es-errors": "^1.3.0",
+        "es-object-atoms": "^1.1.1",
+        "es-set-tostringtag": "^2.1.0",
+        "es-to-primitive": "^1.3.0",
+        "function.prototype.name": "^1.1.8",
+        "get-intrinsic": "^1.3.0",
+        "get-proto": "^1.0.1",
+        "get-symbol-description": "^1.1.0",
+        "globalthis": "^1.0.4",
+        "gopd": "^1.2.0",
+        "has-property-descriptors": "^1.0.2",
+        "has-proto": "^1.2.0",
+        "has-symbols": "^1.1.0",
+        "hasown": "^2.0.2",
+        "internal-slot": "^1.1.0",
+        "is-array-buffer": "^3.0.5",
+        "is-callable": "^1.2.7",
+        "is-data-view": "^1.0.2",
+        "is-negative-zero": "^2.0.3",
+        "is-regex": "^1.2.1",
+        "is-set": "^2.0.3",
+        "is-shared-array-buffer": "^1.0.4",
+        "is-string": "^1.1.1",
+        "is-typed-array": "^1.1.15",
+        "is-weakref": "^1.1.1",
+        "math-intrinsics": "^1.1.0",
+        "object-inspect": "^1.13.4",
+        "object-keys": "^1.1.1",
+        "object.assign": "^4.1.7",
+        "own-keys": "^1.0.1",
+        "regexp.prototype.flags": "^1.5.4",
+        "safe-array-concat": "^1.1.3",
+        "safe-push-apply": "^1.0.0",
+        "safe-regex-test": "^1.1.0",
+        "set-proto": "^1.0.0",
+        "stop-iteration-iterator": "^1.1.0",
+        "string.prototype.trim": "^1.2.10",
+        "string.prototype.trimend": "^1.0.9",
+        "string.prototype.trimstart": "^1.0.8",
+        "typed-array-buffer": "^1.0.3",
+        "typed-array-byte-length": "^1.0.3",
+        "typed-array-byte-offset": "^1.0.4",
+        "typed-array-length": "^1.0.7",
+        "unbox-primitive": "^1.1.0",
+        "which-typed-array": "^1.1.19"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/es-aggregate-error": {
+      "version": "1.0.14",
+      "resolved": "https://registry.npmjs.org/es-aggregate-error/-/es-aggregate-error-1.0.14.tgz",
+      "integrity": "sha512-3YxX6rVb07B5TV11AV5wsL7nQCHXNwoHPsQC8S4AmBiqYhyNCJ5BRKXkXyDJvs8QzXN20NgRtxe3dEEQD9NLHA==",
+      "license": "MIT",
+      "dependencies": {
+        "define-data-property": "^1.1.4",
+        "define-properties": "^1.2.1",
+        "es-abstract": "^1.24.0",
+        "es-errors": "^1.3.0",
+        "function-bind": "^1.1.2",
+        "globalthis": "^1.0.4",
+        "has-property-descriptors": "^1.0.2",
+        "set-function-name": "^2.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/es-define-property": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+      "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/es-errors": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+      "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/es-object-atoms": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+      "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/es-set-tostringtag": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+      "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "get-intrinsic": "^1.2.6",
+        "has-tostringtag": "^1.0.2",
+        "hasown": "^2.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/es-to-primitive": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz",
+      "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==",
+      "license": "MIT",
+      "dependencies": {
+        "is-callable": "^1.2.7",
+        "is-date-object": "^1.0.5",
+        "is-symbol": "^1.0.4"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/escalade": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+      "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/estree-walker": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
+      "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==",
+      "license": "MIT"
+    },
+    "node_modules/event-target-shim": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+      "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/fast-deep-equal": {
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+      "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+      "license": "MIT"
+    },
+    "node_modules/fast-glob": {
+      "version": "3.2.12",
+      "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
+      "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
+      "license": "MIT",
+      "dependencies": {
+        "@nodelib/fs.stat": "^2.0.2",
+        "@nodelib/fs.walk": "^1.2.3",
+        "glob-parent": "^5.1.2",
+        "merge2": "^1.3.0",
+        "micromatch": "^4.0.4"
+      },
+      "engines": {
+        "node": ">=8.6.0"
+      }
+    },
+    "node_modules/fast-memoize": {
+      "version": "2.5.2",
+      "resolved": "https://registry.npmjs.org/fast-memoize/-/fast-memoize-2.5.2.tgz",
+      "integrity": "sha512-Ue0LwpDYErFbmNnZSF0UH6eImUwDmogUO1jyE+JbN2gsQz/jICm1Ve7t9QT0rNSsfJt+Hs4/S3GnsDVjL4HVrw==",
+      "license": "MIT"
+    },
+    "node_modules/fast-uri": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
+      "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/fastify"
+        },
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/fastify"
+        }
+      ],
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/fastq": {
+      "version": "1.20.1",
+      "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz",
+      "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==",
+      "license": "ISC",
+      "dependencies": {
+        "reusify": "^1.0.4"
+      }
+    },
+    "node_modules/fill-range": {
+      "version": "7.1.1",
+      "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+      "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+      "license": "MIT",
+      "dependencies": {
+        "to-regex-range": "^5.0.1"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/for-each": {
+      "version": "0.3.5",
+      "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz",
+      "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==",
+      "license": "MIT",
+      "dependencies": {
+        "is-callable": "^1.2.7"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/fs-extra": {
+      "version": "10.1.0",
+      "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+      "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+      "license": "MIT",
+      "dependencies": {
+        "graceful-fs": "^4.2.0",
+        "jsonfile": "^6.0.1",
+        "universalify": "^2.0.0"
+      },
+      "engines": {
+        "node": ">=12"
+      }
+    },
+    "node_modules/fs.realpath": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+      "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
+      "license": "ISC"
+    },
+    "node_modules/fsevents": {
+      "version": "2.3.3",
+      "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+      "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+      "hasInstallScript": true,
+      "license": "MIT",
+      "optional": true,
+      "os": [
+        "darwin"
+      ],
+      "engines": {
+        "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+      }
+    },
+    "node_modules/function-bind": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+      "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+      "license": "MIT",
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/function.prototype.name": {
+      "version": "1.1.8",
+      "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz",
+      "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.3",
+        "define-properties": "^1.2.1",
+        "functions-have-names": "^1.2.3",
+        "hasown": "^2.0.2",
+        "is-callable": "^1.2.7"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/functions-have-names": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
+      "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
+      "license": "MIT",
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/generator-function": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz",
+      "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/get-caller-file": {
+      "version": "2.0.5",
+      "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+      "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+      "license": "ISC",
+      "engines": {
+        "node": "6.* || 8.* || >= 10.*"
+      }
+    },
+    "node_modules/get-intrinsic": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+      "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind-apply-helpers": "^1.0.2",
+        "es-define-property": "^1.0.1",
+        "es-errors": "^1.3.0",
+        "es-object-atoms": "^1.1.1",
+        "function-bind": "^1.1.2",
+        "get-proto": "^1.0.1",
+        "gopd": "^1.2.0",
+        "has-symbols": "^1.1.0",
+        "hasown": "^2.0.2",
+        "math-intrinsics": "^1.1.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/get-proto": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+      "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+      "license": "MIT",
+      "dependencies": {
+        "dunder-proto": "^1.0.1",
+        "es-object-atoms": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/get-source": {
+      "version": "2.0.12",
+      "resolved": "https://registry.npmjs.org/get-source/-/get-source-2.0.12.tgz",
+      "integrity": "sha512-X5+4+iD+HoSeEED+uwrQ07BOQr0kEDFMVqqpBuI+RaZBpBpHCuXxo70bjar6f0b0u/DQJsJ7ssurpP0V60Az+w==",
+      "license": "Unlicense",
+      "dependencies": {
+        "data-uri-to-buffer": "^2.0.0",
+        "source-map": "^0.6.1"
+      }
+    },
+    "node_modules/get-symbol-description": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz",
+      "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "es-errors": "^1.3.0",
+        "get-intrinsic": "^1.2.6"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/gherkin": {
+      "version": "9.0.0",
+      "resolved": "https://registry.npmjs.org/gherkin/-/gherkin-9.0.0.tgz",
+      "integrity": "sha512-6xoAepoxo5vhkBXjB4RCfVnSKHu5z9SqXIQVUyj+Jw8BQX8odATlee5otXgdN8llZvyvHokuvNiBeB3naEnnIQ==",
+      "deprecated": "This package is now published under @cucumber/gherkin",
+      "license": "MIT",
+      "dependencies": {
+        "commander": "^4.0.1",
+        "cucumber-messages": "8.0.0",
+        "source-map-support": "^0.5.16"
+      },
+      "bin": {
+        "gherkin-javascript": "bin/gherkin"
+      }
+    },
+    "node_modules/gherkin-lint": {
+      "version": "4.2.4",
+      "resolved": "https://registry.npmjs.org/gherkin-lint/-/gherkin-lint-4.2.4.tgz",
+      "integrity": "sha512-iM+ECIHOF6Wh94YIF1hSHA6JH9rzcgozlMLHA/uCzGtQiMjb/uL093eh1nTpfoJ/38veL7Jfh4yY2inu7uUoFA==",
+      "license": "ISC",
+      "dependencies": {
+        "commander": "11.0.0",
+        "core-js": "3.33.1",
+        "gherkin": "9.0.0",
+        "glob": "7.1.6",
+        "lodash": "4.17.21",
+        "strip-json-comments": "3.0.1",
+        "xml-js": "^1.6.11"
+      },
+      "bin": {
+        "gherkin-lint": "dist/main.js"
+      },
+      "engines": {
+        "node": ">=10.0.0"
+      }
+    },
+    "node_modules/gherkin-lint/node_modules/glob": {
+      "version": "7.1.6",
+      "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+      "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+      "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me",
+      "license": "ISC",
+      "dependencies": {
+        "fs.realpath": "^1.0.0",
+        "inflight": "^1.0.4",
+        "inherits": "2",
+        "minimatch": "^3.0.4",
+        "once": "^1.3.0",
+        "path-is-absolute": "^1.0.0"
+      },
+      "engines": {
+        "node": "*"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/isaacs"
+      }
+    },
+    "node_modules/gherkin-lint/node_modules/lodash": {
+      "version": "4.17.21",
+      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+      "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+      "license": "MIT"
+    },
+    "node_modules/gherkin/node_modules/commander": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+      "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 6"
+      }
+    },
+    "node_modules/glob": {
+      "version": "7.2.3",
+      "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+      "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+      "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me",
+      "license": "ISC",
+      "dependencies": {
+        "fs.realpath": "^1.0.0",
+        "inflight": "^1.0.4",
+        "inherits": "2",
+        "minimatch": "^3.1.1",
+        "once": "^1.3.0",
+        "path-is-absolute": "^1.0.0"
+      },
+      "engines": {
+        "node": "*"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/isaacs"
+      }
+    },
+    "node_modules/glob-parent": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+      "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+      "license": "ISC",
+      "dependencies": {
+        "is-glob": "^4.0.1"
+      },
+      "engines": {
+        "node": ">= 6"
+      }
+    },
+    "node_modules/globalthis": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz",
+      "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==",
+      "license": "MIT",
+      "dependencies": {
+        "define-properties": "^1.2.1",
+        "gopd": "^1.0.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/gopd": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+      "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/graceful-fs": {
+      "version": "4.2.11",
+      "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+      "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
+      "license": "ISC"
+    },
+    "node_modules/has-bigints": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz",
+      "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/has-flag": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+      "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/has-property-descriptors": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
+      "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
+      "license": "MIT",
+      "dependencies": {
+        "es-define-property": "^1.0.0"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/has-proto": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz",
+      "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==",
+      "license": "MIT",
+      "dependencies": {
+        "dunder-proto": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/has-symbols": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+      "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/has-tostringtag": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+      "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+      "license": "MIT",
+      "dependencies": {
+        "has-symbols": "^1.0.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/hasown": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+      "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+      "license": "MIT",
+      "dependencies": {
+        "function-bind": "^1.1.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/hpagent": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/hpagent/-/hpagent-1.2.0.tgz",
+      "integrity": "sha512-A91dYTeIB6NoXG+PxTQpCCDDnfHsW9kc06Lvpu1TEe9gnd6ZFeiBoRO9JvzEv6xK7EX97/dUE8g/vBMTqTS3CA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=14"
+      }
+    },
+    "node_modules/immer": {
+      "version": "9.0.21",
+      "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz",
+      "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==",
+      "license": "MIT",
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/immer"
+      }
+    },
+    "node_modules/inflight": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+      "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+      "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
+      "license": "ISC",
+      "dependencies": {
+        "once": "^1.3.0",
+        "wrappy": "1"
+      }
+    },
+    "node_modules/inherits": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+      "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+      "license": "ISC"
+    },
+    "node_modules/internal-slot": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz",
+      "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "hasown": "^2.0.2",
+        "side-channel": "^1.1.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/is-array-buffer": {
+      "version": "3.0.5",
+      "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz",
+      "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.3",
+        "get-intrinsic": "^1.2.6"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-async-function": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz",
+      "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==",
+      "license": "MIT",
+      "dependencies": {
+        "async-function": "^1.0.0",
+        "call-bound": "^1.0.3",
+        "get-proto": "^1.0.1",
+        "has-tostringtag": "^1.0.2",
+        "safe-regex-test": "^1.1.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-bigint": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz",
+      "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==",
+      "license": "MIT",
+      "dependencies": {
+        "has-bigints": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-boolean-object": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz",
+      "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "has-tostringtag": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-callable": {
+      "version": "1.2.7",
+      "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+      "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-core-module": {
+      "version": "2.16.1",
+      "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
+      "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
+      "license": "MIT",
+      "dependencies": {
+        "hasown": "^2.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-data-view": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz",
+      "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "get-intrinsic": "^1.2.6",
+        "is-typed-array": "^1.1.13"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-date-object": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz",
+      "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "has-tostringtag": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-extglob": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+      "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/is-finalizationregistry": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz",
+      "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-fullwidth-code-point": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+      "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/is-generator-function": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz",
+      "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.4",
+        "generator-function": "^2.0.0",
+        "get-proto": "^1.0.1",
+        "has-tostringtag": "^1.0.2",
+        "safe-regex-test": "^1.1.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-glob": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+      "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+      "license": "MIT",
+      "dependencies": {
+        "is-extglob": "^2.1.1"
+      },
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/is-map": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz",
+      "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-negative-zero": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz",
+      "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-number": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+      "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.12.0"
+      }
+    },
+    "node_modules/is-number-object": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz",
+      "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "has-tostringtag": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-reference": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-1.2.1.tgz",
+      "integrity": "sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==",
+      "license": "MIT",
+      "dependencies": {
+        "@types/estree": "*"
+      }
+    },
+    "node_modules/is-regex": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz",
+      "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "gopd": "^1.2.0",
+        "has-tostringtag": "^1.0.2",
+        "hasown": "^2.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-set": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz",
+      "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-shared-array-buffer": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz",
+      "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-string": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz",
+      "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "has-tostringtag": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-symbol": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz",
+      "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "has-symbols": "^1.1.0",
+        "safe-regex-test": "^1.1.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-typed-array": {
+      "version": "1.1.15",
+      "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz",
+      "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==",
+      "license": "MIT",
+      "dependencies": {
+        "which-typed-array": "^1.1.16"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-weakmap": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz",
+      "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-weakref": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz",
+      "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-weakset": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz",
+      "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "get-intrinsic": "^1.2.6"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/isarray": {
+      "version": "2.0.5",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+      "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
+      "license": "MIT"
+    },
+    "node_modules/jsep": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/jsep/-/jsep-1.4.0.tgz",
+      "integrity": "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==",
+      "license": "MIT",
+      "peer": true,
+      "engines": {
+        "node": ">= 10.16.0"
+      }
+    },
+    "node_modules/json-schema-traverse": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+      "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
+      "license": "MIT"
+    },
+    "node_modules/jsonc-parser": {
+      "version": "2.2.1",
+      "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-2.2.1.tgz",
+      "integrity": "sha512-o6/yDBYccGvTz1+QFevz6l6OBZ2+fMVu2JZ9CIhzsYRX4mjaK5IyX9eldUdCmga16zlgQxyrj5pt9kzuj2C02w==",
+      "license": "MIT"
+    },
+    "node_modules/jsonfile": {
+      "version": "6.2.0",
+      "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
+      "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
+      "license": "MIT",
+      "dependencies": {
+        "universalify": "^2.0.0"
+      },
+      "optionalDependencies": {
+        "graceful-fs": "^4.1.6"
+      }
+    },
+    "node_modules/jsonpath-plus": {
+      "version": "10.4.0",
+      "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-10.4.0.tgz",
+      "integrity": "sha512-T92WWatJXmhBbKsgH/0hl+jxjdXrifi5IKeMY02DWggRxX0UElcbVzPlmgLTbvsPeW1PasQ6xE2Q75stkhGbsA==",
+      "license": "MIT",
+      "dependencies": {
+        "@jsep-plugin/assignment": "^1.3.0",
+        "@jsep-plugin/regex": "^1.0.4",
+        "jsep": "^1.4.0"
+      },
+      "bin": {
+        "jsonpath": "bin/jsonpath-cli.js",
+        "jsonpath-plus": "bin/jsonpath-cli.js"
+      },
+      "engines": {
+        "node": ">=18.0.0"
+      }
+    },
+    "node_modules/jsonpointer": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz",
+      "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/leven": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+      "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/lodash": {
+      "version": "4.17.23",
+      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
+      "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
+      "license": "MIT"
+    },
+    "node_modules/lodash.topath": {
+      "version": "4.5.2",
+      "resolved": "https://registry.npmjs.org/lodash.topath/-/lodash.topath-4.5.2.tgz",
+      "integrity": "sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg==",
+      "license": "MIT"
+    },
+    "node_modules/long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
+      "license": "Apache-2.0"
+    },
+    "node_modules/magic-string": {
+      "version": "0.25.9",
+      "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz",
+      "integrity": "sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==",
+      "license": "MIT",
+      "dependencies": {
+        "sourcemap-codec": "^1.4.8"
+      }
+    },
+    "node_modules/markdown-escape": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/markdown-escape/-/markdown-escape-2.0.0.tgz",
+      "integrity": "sha512-Trz4v0+XWlwy68LJIyw3bLbsJiC8XAbRCKF9DbEtZjyndKOGVx6n+wNB0VfoRmY2LKboQLeniap3xrb6LGSJ8A==",
+      "license": "MIT"
+    },
+    "node_modules/math-intrinsics": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+      "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/merge2": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+      "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/micromatch": {
+      "version": "4.0.8",
+      "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+      "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+      "license": "MIT",
+      "dependencies": {
+        "braces": "^3.0.3",
+        "picomatch": "^2.3.1"
+      },
+      "engines": {
+        "node": ">=8.6"
+      }
+    },
+    "node_modules/minimatch": {
+      "version": "3.1.2",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+      "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+      "license": "ISC",
+      "dependencies": {
+        "brace-expansion": "^1.1.7"
+      },
+      "engines": {
+        "node": "*"
+      }
+    },
+    "node_modules/nimma": {
+      "version": "0.2.3",
+      "resolved": "https://registry.npmjs.org/nimma/-/nimma-0.2.3.tgz",
+      "integrity": "sha512-1ZOI8J+1PKKGceo/5CT5GfQOG6H8I2BencSK06YarZ2wXwH37BSSUWldqJmMJYA5JfqDqffxDXynt6f11AyKcA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@jsep-plugin/regex": "^1.0.1",
+        "@jsep-plugin/ternary": "^1.0.2",
+        "astring": "^1.8.1",
+        "jsep": "^1.2.0"
+      },
+      "engines": {
+        "node": "^12.20 || >=14.13"
+      },
+      "optionalDependencies": {
+        "jsonpath-plus": "^6.0.1 || ^10.1.0",
+        "lodash.topath": "^4.5.2"
+      }
+    },
+    "node_modules/node-fetch": {
+      "version": "2.7.0",
+      "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+      "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+      "license": "MIT",
+      "dependencies": {
+        "whatwg-url": "^5.0.0"
+      },
+      "engines": {
+        "node": "4.x || >=6.0.0"
+      },
+      "peerDependencies": {
+        "encoding": "^0.1.0"
+      },
+      "peerDependenciesMeta": {
+        "encoding": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/node-sarif-builder": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/node-sarif-builder/-/node-sarif-builder-2.0.3.tgz",
+      "integrity": "sha512-Pzr3rol8fvhG/oJjIq2NTVB0vmdNNlz22FENhhPojYRZ4/ee08CfK4YuKmuL54V9MLhI1kpzxfOJ/63LzmZzDg==",
+      "license": "MIT",
+      "dependencies": {
+        "@types/sarif": "^2.1.4",
+        "fs-extra": "^10.0.0"
+      },
+      "engines": {
+        "node": ">=14"
+      }
+    },
+    "node_modules/object-inspect": {
+      "version": "1.13.4",
+      "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
+      "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/object-keys": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+      "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/object.assign": {
+      "version": "4.1.7",
+      "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz",
+      "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.3",
+        "define-properties": "^1.2.1",
+        "es-object-atoms": "^1.0.0",
+        "has-symbols": "^1.1.0",
+        "object-keys": "^1.1.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/once": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+      "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+      "license": "ISC",
+      "dependencies": {
+        "wrappy": "1"
+      }
+    },
+    "node_modules/own-keys": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz",
+      "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==",
+      "license": "MIT",
+      "dependencies": {
+        "get-intrinsic": "^1.2.6",
+        "object-keys": "^1.1.1",
+        "safe-push-apply": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/path-is-absolute": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+      "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/path-parse": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+      "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+      "license": "MIT"
+    },
+    "node_modules/picomatch": {
+      "version": "2.3.2",
+      "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
+      "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8.6"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/jonschlinkert"
+      }
+    },
+    "node_modules/pony-cause": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/pony-cause/-/pony-cause-1.1.1.tgz",
+      "integrity": "sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==",
+      "license": "0BSD",
+      "engines": {
+        "node": ">=12.0.0"
+      }
+    },
+    "node_modules/possible-typed-array-names": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz",
+      "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/printable-characters": {
+      "version": "1.0.42",
+      "resolved": "https://registry.npmjs.org/printable-characters/-/printable-characters-1.0.42.tgz",
+      "integrity": "sha512-dKp+C4iXWK4vVYZmYSd0KBH5F/h1HoZRsbJ82AVKRO3PEo8L4lBS/vLwhVtpwwuYcoIsVY+1JYKR268yn480uQ==",
+      "license": "Unlicense"
+    },
+    "node_modules/protobufjs": {
+      "version": "6.11.4",
+      "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
+      "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==",
+      "hasInstallScript": true,
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@protobufjs/aspromise": "^1.1.2",
+        "@protobufjs/base64": "^1.1.2",
+        "@protobufjs/codegen": "^2.0.4",
+        "@protobufjs/eventemitter": "^1.1.0",
+        "@protobufjs/fetch": "^1.1.0",
+        "@protobufjs/float": "^1.0.2",
+        "@protobufjs/inquire": "^1.1.0",
+        "@protobufjs/path": "^1.1.2",
+        "@protobufjs/pool": "^1.1.0",
+        "@protobufjs/utf8": "^1.1.0",
+        "@types/long": "^4.0.1",
+        "@types/node": ">=13.7.0",
+        "long": "^4.0.0"
+      },
+      "bin": {
+        "pbjs": "bin/pbjs",
+        "pbts": "bin/pbts"
+      }
+    },
+    "node_modules/queue-microtask": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+      "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/feross"
+        },
+        {
+          "type": "patreon",
+          "url": "https://www.patreon.com/feross"
+        },
+        {
+          "type": "consulting",
+          "url": "https://feross.org/support"
+        }
+      ],
+      "license": "MIT"
+    },
+    "node_modules/reflect.getprototypeof": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz",
+      "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "define-properties": "^1.2.1",
+        "es-abstract": "^1.23.9",
+        "es-errors": "^1.3.0",
+        "es-object-atoms": "^1.0.0",
+        "get-intrinsic": "^1.2.7",
+        "get-proto": "^1.0.1",
+        "which-builtin-type": "^1.2.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/regexp.prototype.flags": {
+      "version": "1.5.4",
+      "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz",
+      "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "define-properties": "^1.2.1",
+        "es-errors": "^1.3.0",
+        "get-proto": "^1.0.1",
+        "gopd": "^1.2.0",
+        "set-function-name": "^2.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/require-directory": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+      "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/require-from-string": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
+      "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/reserved": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/reserved/-/reserved-0.1.2.tgz",
+      "integrity": "sha512-/qO54MWj5L8WCBP9/UNe2iefJc+L9yETbH32xO/ft/EYPOTCR5k+azvDUgdCOKwZH8hXwPd0b8XBL78Nn2U69g==",
+      "engines": {
+        "node": ">=0.8"
+      }
+    },
+    "node_modules/resolve": {
+      "version": "1.22.11",
+      "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz",
+      "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==",
+      "license": "MIT",
+      "dependencies": {
+        "is-core-module": "^2.16.1",
+        "path-parse": "^1.0.7",
+        "supports-preserve-symlinks-flag": "^1.0.0"
+      },
+      "bin": {
+        "resolve": "bin/resolve"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/reusify": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz",
+      "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==",
+      "license": "MIT",
+      "engines": {
+        "iojs": ">=1.0.0",
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/rollup": {
+      "version": "2.79.2",
+      "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.2.tgz",
+      "integrity": "sha512-fS6iqSPZDs3dr/y7Od6y5nha8dW1YnbgtsyotCVvoFGKbERG++CVRFv1meyGDE1SNItQA8BrnCw7ScdAhRJ3XQ==",
+      "license": "MIT",
+      "peer": true,
+      "bin": {
+        "rollup": "dist/bin/rollup"
+      },
+      "engines": {
+        "node": ">=10.0.0"
+      },
+      "optionalDependencies": {
+        "fsevents": "~2.3.2"
+      }
+    },
+    "node_modules/run-parallel": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+      "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/feross"
+        },
+        {
+          "type": "patreon",
+          "url": "https://www.patreon.com/feross"
+        },
+        {
+          "type": "consulting",
+          "url": "https://feross.org/support"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "queue-microtask": "^1.2.2"
+      }
+    },
+    "node_modules/safe-array-concat": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz",
+      "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.2",
+        "get-intrinsic": "^1.2.6",
+        "has-symbols": "^1.1.0",
+        "isarray": "^2.0.5"
+      },
+      "engines": {
+        "node": ">=0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/safe-push-apply": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz",
+      "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "isarray": "^2.0.5"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/safe-regex-test": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz",
+      "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "es-errors": "^1.3.0",
+        "is-regex": "^1.2.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/safe-stable-stringify": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-1.1.1.tgz",
+      "integrity": "sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw==",
+      "license": "MIT"
+    },
+    "node_modules/sax": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/sax/-/sax-1.6.0.tgz",
+      "integrity": "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA==",
+      "license": "BlueOak-1.0.0",
+      "engines": {
+        "node": ">=11.0.0"
+      }
+    },
+    "node_modules/set-function-length": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
+      "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
+      "license": "MIT",
+      "dependencies": {
+        "define-data-property": "^1.1.4",
+        "es-errors": "^1.3.0",
+        "function-bind": "^1.1.2",
+        "get-intrinsic": "^1.2.4",
+        "gopd": "^1.0.1",
+        "has-property-descriptors": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/set-function-name": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz",
+      "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==",
+      "license": "MIT",
+      "dependencies": {
+        "define-data-property": "^1.1.4",
+        "es-errors": "^1.3.0",
+        "functions-have-names": "^1.2.3",
+        "has-property-descriptors": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/set-proto": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz",
+      "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==",
+      "license": "MIT",
+      "dependencies": {
+        "dunder-proto": "^1.0.1",
+        "es-errors": "^1.3.0",
+        "es-object-atoms": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/side-channel": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
+      "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "object-inspect": "^1.13.3",
+        "side-channel-list": "^1.0.0",
+        "side-channel-map": "^1.0.1",
+        "side-channel-weakmap": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/side-channel-list": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
+      "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "object-inspect": "^1.13.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/side-channel-map": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
+      "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "es-errors": "^1.3.0",
+        "get-intrinsic": "^1.2.5",
+        "object-inspect": "^1.13.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/side-channel-weakmap": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
+      "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "es-errors": "^1.3.0",
+        "get-intrinsic": "^1.2.5",
+        "object-inspect": "^1.13.3",
+        "side-channel-map": "^1.0.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/simple-eval": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/simple-eval/-/simple-eval-1.0.1.tgz",
+      "integrity": "sha512-LH7FpTAkeD+y5xQC4fzS+tFtaNlvt3Ib1zKzvhjv/Y+cioV4zIuw4IZr2yhRLu67CWL7FR9/6KXKnjRoZTvGGQ==",
+      "license": "MIT",
+      "dependencies": {
+        "jsep": "^1.3.6"
+      },
+      "engines": {
+        "node": ">=12"
+      }
+    },
+    "node_modules/source-map": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+      "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+      "license": "BSD-3-Clause",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/source-map-support": {
+      "version": "0.5.21",
+      "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
+      "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
+      "license": "MIT",
+      "dependencies": {
+        "buffer-from": "^1.0.0",
+        "source-map": "^0.6.0"
+      }
+    },
+    "node_modules/sourcemap-codec": {
+      "version": "1.4.8",
+      "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz",
+      "integrity": "sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==",
+      "deprecated": "Please use @jridgewell/sourcemap-codec instead",
+      "license": "MIT"
+    },
+    "node_modules/stacktracey": {
+      "version": "2.2.0",
+      "resolved": "https://registry.npmjs.org/stacktracey/-/stacktracey-2.2.0.tgz",
+      "integrity": "sha512-ETyQEz+CzXiLjEbyJqpbp+/T79RQD/6wqFucRBIlVNZfYq2Ay7wbretD4cxpbymZlaPWx58aIhPEY1Cr8DlVvg==",
+      "license": "Unlicense",
+      "dependencies": {
+        "as-table": "^1.0.36",
+        "get-source": "^2.0.12"
+      }
+    },
+    "node_modules/stop-iteration-iterator": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz",
+      "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==",
+      "license": "MIT",
+      "dependencies": {
+        "es-errors": "^1.3.0",
+        "internal-slot": "^1.1.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/string-width": {
+      "version": "4.2.3",
+      "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+      "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+      "license": "MIT",
+      "dependencies": {
+        "emoji-regex": "^8.0.0",
+        "is-fullwidth-code-point": "^3.0.0",
+        "strip-ansi": "^6.0.1"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/string.prototype.trim": {
+      "version": "1.2.10",
+      "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz",
+      "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.2",
+        "define-data-property": "^1.1.4",
+        "define-properties": "^1.2.1",
+        "es-abstract": "^1.23.5",
+        "es-object-atoms": "^1.0.0",
+        "has-property-descriptors": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/string.prototype.trimend": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz",
+      "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.2",
+        "define-properties": "^1.2.1",
+        "es-object-atoms": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/string.prototype.trimstart": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz",
+      "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.7",
+        "define-properties": "^1.2.1",
+        "es-object-atoms": "^1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/strip-ansi": {
+      "version": "6.0.1",
+      "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+      "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+      "license": "MIT",
+      "dependencies": {
+        "ansi-regex": "^5.0.1"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/strip-json-comments": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz",
+      "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/supports-color": {
+      "version": "7.2.0",
+      "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+      "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+      "license": "MIT",
+      "dependencies": {
+        "has-flag": "^4.0.0"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
+    "node_modules/supports-preserve-symlinks-flag": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+      "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/text-table": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+      "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==",
+      "license": "MIT"
+    },
+    "node_modules/to-regex-range": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+      "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+      "license": "MIT",
+      "dependencies": {
+        "is-number": "^7.0.0"
+      },
+      "engines": {
+        "node": ">=8.0"
+      }
+    },
+    "node_modules/tr46": {
+      "version": "0.0.3",
+      "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+      "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+      "license": "MIT"
+    },
+    "node_modules/tslib": {
+      "version": "2.8.1",
+      "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
+      "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
+      "license": "0BSD"
+    },
+    "node_modules/typed-array-buffer": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz",
+      "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "es-errors": "^1.3.0",
+        "is-typed-array": "^1.1.14"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/typed-array-byte-length": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz",
+      "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.8",
+        "for-each": "^0.3.3",
+        "gopd": "^1.2.0",
+        "has-proto": "^1.2.0",
+        "is-typed-array": "^1.1.14"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/typed-array-byte-offset": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz",
+      "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==",
+      "license": "MIT",
+      "dependencies": {
+        "available-typed-arrays": "^1.0.7",
+        "call-bind": "^1.0.8",
+        "for-each": "^0.3.3",
+        "gopd": "^1.2.0",
+        "has-proto": "^1.2.0",
+        "is-typed-array": "^1.1.15",
+        "reflect.getprototypeof": "^1.0.9"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/typed-array-length": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz",
+      "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bind": "^1.0.7",
+        "for-each": "^0.3.3",
+        "gopd": "^1.0.1",
+        "is-typed-array": "^1.1.13",
+        "possible-typed-array-names": "^1.0.0",
+        "reflect.getprototypeof": "^1.0.6"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/unbox-primitive": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz",
+      "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "has-bigints": "^1.0.2",
+        "has-symbols": "^1.1.0",
+        "which-boxed-primitive": "^1.1.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/undici-types": {
+      "version": "7.18.2",
+      "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz",
+      "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==",
+      "license": "MIT"
+    },
+    "node_modules/universalify": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
+      "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 10.0.0"
+      }
+    },
+    "node_modules/urijs": {
+      "version": "1.19.11",
+      "resolved": "https://registry.npmjs.org/urijs/-/urijs-1.19.11.tgz",
+      "integrity": "sha512-HXgFDgDommxn5/bIv0cnQZsPhHDA90NPHD6+c/v21U5+Sx5hoP8+dP9IZXBU1gIfvdRfhG8cel9QNPeionfcCQ==",
+      "license": "MIT"
+    },
+    "node_modules/utility-types": {
+      "version": "3.11.0",
+      "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz",
+      "integrity": "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 4"
+      }
+    },
+    "node_modules/uuid": {
+      "version": "3.4.0",
+      "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+      "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
+      "deprecated": "Please upgrade  to version 7 or higher.  Older versions may use Math.random() in certain circumstances, which is known to be problematic.  See https://v8.dev/blog/math-random for details.",
+      "license": "MIT",
+      "bin": {
+        "uuid": "bin/uuid"
+      }
+    },
+    "node_modules/validate-npm-package-name": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz",
+      "integrity": "sha512-M6w37eVCMMouJ9V/sdPGnC5H4uDr73/+xdq0FBLO3TFFX1+7wiUY6Es328NN+y43tmY+doUdN9g9J21vqB7iLw==",
+      "license": "ISC",
+      "dependencies": {
+        "builtins": "^1.0.3"
+      }
+    },
+    "node_modules/webidl-conversions": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+      "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+      "license": "BSD-2-Clause"
+    },
+    "node_modules/whatwg-url": {
+      "version": "5.0.0",
+      "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+      "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+      "license": "MIT",
+      "dependencies": {
+        "tr46": "~0.0.3",
+        "webidl-conversions": "^3.0.0"
+      }
+    },
+    "node_modules/which-boxed-primitive": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz",
+      "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==",
+      "license": "MIT",
+      "dependencies": {
+        "is-bigint": "^1.1.0",
+        "is-boolean-object": "^1.2.1",
+        "is-number-object": "^1.1.1",
+        "is-string": "^1.1.1",
+        "is-symbol": "^1.1.1"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/which-builtin-type": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz",
+      "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.2",
+        "function.prototype.name": "^1.1.6",
+        "has-tostringtag": "^1.0.2",
+        "is-async-function": "^2.0.0",
+        "is-date-object": "^1.1.0",
+        "is-finalizationregistry": "^1.1.0",
+        "is-generator-function": "^1.0.10",
+        "is-regex": "^1.2.1",
+        "is-weakref": "^1.0.2",
+        "isarray": "^2.0.5",
+        "which-boxed-primitive": "^1.1.0",
+        "which-collection": "^1.0.2",
+        "which-typed-array": "^1.1.16"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/which-collection": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz",
+      "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==",
+      "license": "MIT",
+      "dependencies": {
+        "is-map": "^2.0.3",
+        "is-set": "^2.0.3",
+        "is-weakmap": "^2.0.2",
+        "is-weakset": "^2.0.3"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/which-typed-array": {
+      "version": "1.1.20",
+      "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.20.tgz",
+      "integrity": "sha512-LYfpUkmqwl0h9A2HL09Mms427Q1RZWuOHsukfVcKRq9q95iQxdw0ix1JQrqbcDR9PH1QDwf5Qo8OZb5lksZ8Xg==",
+      "license": "MIT",
+      "dependencies": {
+        "available-typed-arrays": "^1.0.7",
+        "call-bind": "^1.0.8",
+        "call-bound": "^1.0.4",
+        "for-each": "^0.3.5",
+        "get-proto": "^1.0.1",
+        "gopd": "^1.2.0",
+        "has-tostringtag": "^1.0.2"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/wrap-ansi": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+      "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+      "license": "MIT",
+      "dependencies": {
+        "ansi-styles": "^4.0.0",
+        "string-width": "^4.1.0",
+        "strip-ansi": "^6.0.0"
+      },
+      "engines": {
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+      }
+    },
+    "node_modules/wrappy": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+      "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+      "license": "ISC"
+    },
+    "node_modules/xml-js": {
+      "version": "1.6.11",
+      "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz",
+      "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==",
+      "license": "MIT",
+      "dependencies": {
+        "sax": "^1.2.4"
+      },
+      "bin": {
+        "xml-js": "bin/cli.js"
+      }
+    },
+    "node_modules/y18n": {
+      "version": "5.0.8",
+      "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+      "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+      "license": "ISC",
+      "engines": {
+        "node": ">=10"
+      }
+    },
+    "node_modules/yargs": {
+      "version": "17.7.2",
+      "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+      "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+      "license": "MIT",
+      "dependencies": {
+        "cliui": "^8.0.1",
+        "escalade": "^3.1.1",
+        "get-caller-file": "^2.0.5",
+        "require-directory": "^2.1.1",
+        "string-width": "^4.2.3",
+        "y18n": "^5.0.5",
+        "yargs-parser": "^21.1.1"
+      },
+      "engines": {
+        "node": ">=12"
+      }
+    },
+    "node_modules/yargs-parser": {
+      "version": "21.1.1",
+      "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+      "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+      "license": "ISC",
+      "engines": {
+        "node": ">=12"
+      }
+    },
+    "node_modules/yargs/node_modules/cliui": {
+      "version": "8.0.1",
+      "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+      "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+      "license": "ISC",
+      "dependencies": {
+        "string-width": "^4.2.0",
+        "strip-ansi": "^6.0.1",
+        "wrap-ansi": "^7.0.0"
+      },
+      "engines": {
+        "node": ">=12"
+      }
+    }
+  }
+}
diff --git a/validation/package.json b/validation/package.json
new file mode 100644
index 00000000..77e143ed
--- /dev/null
+++ b/validation/package.json
@@ -0,0 +1,9 @@
+{
+  "name": "camara-validation-tools",
+  "private": true,
+  "description": "Node.js tool dependencies for CAMARA validation framework",
+  "dependencies": {
+    "@stoplight/spectral-cli": "^6.14.0",
+    "gherkin-lint": "^4.2.4"
+  }
+}
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
new file mode 100644
index 00000000..81e03e51
--- /dev/null
+++ b/validation/tests/test_orchestrator.py
@@ -0,0 +1,666 @@
+"""Unit tests for validation.orchestrator."""
+
+from __future__ import annotations
+
+import dataclasses
+import json
+from pathlib import Path
+from typing import Dict, List, Optional
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from validation.output.commit_status import CommitStatusPayload
+from validation.orchestrator import (
+    EXIT_INFRA_ERROR,
+    EXIT_OK,
+    OrchestratorArgs,
+    ToolingPaths,
+    discover_spec_files,
+    discover_test_files,
+    main,
+    parse_args,
+    resolve_tooling_paths,
+    run_engines,
+    write_outputs,
+    write_result_json,
+    write_skip_output,
+)
+
+
+# ---------------------------------------------------------------------------
+# Fixtures
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture
+def args():
+    """Default orchestrator args."""
+    return OrchestratorArgs(
+        repo_path=Path("/repo"),
+        tooling_path=Path("/tooling"),
+        output_dir=Path("/output"),
+        repo_name="camaraproject/QualityOnDemand",
+        repo_owner="camaraproject",
+        event_name="pull_request",
+        ref_name="refs/heads/main",
+        base_ref="main",
+        mode="",
+        profile="",
+        pr_number=42,
+        release_plan_changed=False,
+        workflow_run_url="https://github.com/example/runs/1",
+        tooling_ref="abc123",
+        commit_sha="def456",
+    )
+
+
+@pytest.fixture
+def paths():
+    """Default tooling paths."""
+    return resolve_tooling_paths(Path("/tooling"))
+
+
+def _make_finding(
+    engine: str = "spectral",
+    engine_rule: str = "some-rule",
+    level: str = "warn",
+    message: str = "Something is wrong",
+    path: str = "code/API_definitions/quality-on-demand.yaml",
+    line: int = 10,
+    api_name: str | None = "quality-on-demand",
+    blocks: bool = False,
+) -> dict:
+    return {
+        "engine": engine,
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": message,
+        "path": path,
+        "line": line,
+        "api_name": api_name,
+        "blocks": blocks,
+    }
+
+
+def _make_context(**overrides):
+    """Create a mock ValidationContext."""
+    defaults = {
+        "repository": "camaraproject/QualityOnDemand",
+        "branch_type": "main",
+        "trigger_type": "pr",
+        "profile": "standard",
+        "stage": "standard",
+        "target_release_type": None,
+        "commonalities_release": None,
+        "icm_release": None,
+        "is_release_review_pr": False,
+        "release_plan_changed": False,
+        "pr_number": 42,
+        "apis": (),
+        "workflow_run_url": "https://github.com/example/runs/1",
+        "tooling_ref": "abc123",
+    }
+    defaults.update(overrides)
+    ctx = MagicMock()
+    for k, v in defaults.items():
+        setattr(ctx, k, v)
+    ctx.to_dict.return_value = defaults
+    return ctx
+
+
+def _make_post_filter_result(
+    result: str = "pass",
+    summary: str = "All checks passed",
+    findings: list | None = None,
+):
+    """Create a mock PostFilterResult."""
+    mock = MagicMock()
+    mock.result = result
+    mock.summary = summary
+    mock.findings = findings or []
+    return mock
+
+
+# ---------------------------------------------------------------------------
+# TestParseArgs
+# ---------------------------------------------------------------------------
+
+
+class TestParseArgs:
+    """Tests for environment variable parsing."""
+
+    def test_defaults_when_no_env_vars(self):
+        with patch.dict("os.environ", {}, clear=True):
+            result = parse_args()
+        assert result.repo_path == Path(".")
+        assert result.tooling_path == Path(".tooling")
+        assert result.output_dir == Path("validation-output")
+        assert result.repo_name == ""
+        assert result.pr_number is None
+        assert result.release_plan_changed is None
+
+    def test_all_env_vars_set(self):
+        env = {
+            "VALIDATION_REPO_PATH": "/my/repo",
+            "VALIDATION_TOOLING_PATH": "/my/tooling",
+            "VALIDATION_OUTPUT_DIR": "/my/output",
+            "VALIDATION_REPO_NAME": "camaraproject/QoD",
+            "VALIDATION_REPO_OWNER": "camaraproject",
+            "VALIDATION_EVENT_NAME": "pull_request",
+            "VALIDATION_REF_NAME": "refs/heads/feature/foo",
+            "VALIDATION_BASE_REF": "main",
+            "VALIDATION_MODE": "pre-snapshot",
+            "VALIDATION_PROFILE": "strict",
+            "VALIDATION_PR_NUMBER": "42",
+            "VALIDATION_RELEASE_PLAN_CHANGED": "true",
+            "VALIDATION_WORKFLOW_RUN_URL": "https://example.com/runs/1",
+            "VALIDATION_TOOLING_REF": "abc123",
+            "VALIDATION_COMMIT_SHA": "def456",
+        }
+        with patch.dict("os.environ", env, clear=True):
+            result = parse_args()
+        assert result.repo_path == Path("/my/repo")
+        assert result.repo_name == "camaraproject/QoD"
+        assert result.mode == "pre-snapshot"
+        assert result.profile == "strict"
+        assert result.pr_number == 42
+        assert result.release_plan_changed is True
+        assert result.commit_sha == "def456"
+
+    def test_pr_number_non_numeric(self):
+        env = {"VALIDATION_PR_NUMBER": "not-a-number"}
+        with patch.dict("os.environ", env, clear=True):
+            result = parse_args()
+        assert result.pr_number is None
+
+    def test_release_plan_changed_false(self):
+        env = {"VALIDATION_RELEASE_PLAN_CHANGED": "false"}
+        with patch.dict("os.environ", env, clear=True):
+            result = parse_args()
+        assert result.release_plan_changed is False
+
+    def test_release_plan_changed_empty(self):
+        with patch.dict("os.environ", {}, clear=True):
+            result = parse_args()
+        assert result.release_plan_changed is None
+
+
+# ---------------------------------------------------------------------------
+# TestResolveToolingPaths
+# ---------------------------------------------------------------------------
+
+
+class TestResolveToolingPaths:
+    """Tests for path resolution within tooling checkout."""
+
+    def test_paths_resolved(self):
+        result = resolve_tooling_paths(Path("/tooling"))
+        assert result.config_file == Path("/tooling/validation/config/validation-config.yaml")
+        assert result.config_schema == Path("/tooling/validation/schemas/validation-config-schema.yaml")
+        assert result.release_plan_schema == Path("/tooling/validation/schemas/release-plan-schema.yaml")
+        assert result.linting_config_dir == Path("/tooling/linting/config")
+        assert result.rules_dir == Path("/tooling/validation/rules")
+
+
+# ---------------------------------------------------------------------------
+# TestDiscoverFiles
+# ---------------------------------------------------------------------------
+
+
+class TestDiscoverFiles:
+    """Tests for spec and test file discovery."""
+
+    def test_discover_spec_files(self, tmp_path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "quality-on-demand.yaml").write_text("openapi: 3.0.0")
+        (api_dir / "device-location.yaml").write_text("openapi: 3.0.0")
+        (api_dir / "README.md").write_text("Not a spec")  # should not match
+
+        result = discover_spec_files(tmp_path)
+        assert len(result) == 2
+        assert all(p.suffix == ".yaml" for p in result)
+        # sorted alphabetically
+        assert result[0].name == "device-location.yaml"
+        assert result[1].name == "quality-on-demand.yaml"
+
+    def test_discover_spec_files_empty(self, tmp_path):
+        result = discover_spec_files(tmp_path)
+        assert result == []
+
+    def test_discover_test_files(self, tmp_path):
+        test_dir = tmp_path / "code" / "Test_definitions"
+        test_dir.mkdir(parents=True)
+        (test_dir / "quality-on-demand.feature").write_text("Feature: QoD")
+        sub = test_dir / "subfolder"
+        sub.mkdir()
+        (sub / "nested.feature").write_text("Feature: Nested")
+
+        result = discover_test_files(tmp_path)
+        assert len(result) == 2
+
+    def test_discover_test_files_empty(self, tmp_path):
+        result = discover_test_files(tmp_path)
+        assert result == []
+
+
+# ---------------------------------------------------------------------------
+# TestRunEngines
+# ---------------------------------------------------------------------------
+
+
+class TestRunEngines:
+    """Tests for engine orchestration."""
+
+    @patch("validation.orchestrator.run_gherkin_engine")
+    @patch("validation.orchestrator.run_python_engine")
+    @patch("validation.orchestrator.run_spectral_engine")
+    @patch("validation.orchestrator.run_yamllint_engine")
+    def test_all_engines_called(
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+    ):
+        mock_yamllint.return_value = [_make_finding(engine="yamllint")]
+        mock_spectral.return_value = [_make_finding(engine="spectral")]
+        mock_python.return_value = [_make_finding(engine="python")]
+        mock_gherkin.return_value = [_make_finding(engine="gherkin")]
+        context = _make_context()
+        test_files = [Path("/repo/code/Test_definitions/test.feature")]
+
+        findings, statuses = run_engines(Path("/repo"), paths, context, test_files)
+
+        assert len(findings) == 4
+        assert mock_yamllint.called
+        assert mock_spectral.called
+        assert mock_python.called
+        assert mock_gherkin.called
+        assert "finding(s)" in statuses["yamllint"]
+        assert "finding(s)" in statuses["spectral"]
+        assert "finding(s)" in statuses["python"]
+        assert "finding(s)" in statuses["gherkin"]
+        assert statuses["bundling"] == "not yet implemented"
+
+    @patch("validation.orchestrator.run_gherkin_engine")
+    @patch("validation.orchestrator.run_python_engine")
+    @patch("validation.orchestrator.run_spectral_engine")
+    @patch("validation.orchestrator.run_yamllint_engine")
+    def test_release_review_skips_yamllint_and_spectral(
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+    ):
+        mock_python.return_value = []
+        mock_gherkin.return_value = []
+        context = _make_context(is_release_review_pr=True)
+        test_files = [Path("/repo/code/Test_definitions/test.feature")]
+
+        findings, statuses = run_engines(Path("/repo"), paths, context, test_files)
+
+        assert not mock_yamllint.called
+        assert not mock_spectral.called
+        assert "skipped" in statuses["yamllint"]
+        assert "skipped" in statuses["spectral"]
+
+    @patch("validation.orchestrator.run_gherkin_engine")
+    @patch("validation.orchestrator.run_python_engine")
+    @patch("validation.orchestrator.run_spectral_engine")
+    @patch("validation.orchestrator.run_yamllint_engine")
+    def test_no_test_files_skips_gherkin(
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+    ):
+        mock_yamllint.return_value = []
+        mock_spectral.return_value = []
+        mock_python.return_value = []
+        context = _make_context()
+
+        findings, statuses = run_engines(Path("/repo"), paths, context, test_files=[])
+
+        assert not mock_gherkin.called
+        assert "skipped" in statuses["gherkin"]
+
+    @patch("validation.orchestrator.run_python_engine")
+    @patch("validation.orchestrator.run_spectral_engine")
+    @patch("validation.orchestrator.run_yamllint_engine")
+    def test_engine_exception_captured(
+        self, mock_yamllint, mock_spectral, mock_python, paths
+    ):
+        mock_yamllint.side_effect = RuntimeError("yamllint boom")
+        mock_spectral.return_value = []
+        mock_python.return_value = []
+        context = _make_context()
+
+        findings, statuses = run_engines(Path("/repo"), paths, context, test_files=[])
+
+        assert "error:" in statuses["yamllint"]
+        assert "finding(s)" in statuses["spectral"]
+
+
+# ---------------------------------------------------------------------------
+# TestWriteOutputs
+# ---------------------------------------------------------------------------
+
+
+class TestWriteResultJson:
+    """Tests for result.json writing."""
+
+    def test_pass(self, tmp_path):
+        write_result_json(tmp_path, "pass", "All checks passed")
+        data = json.loads((tmp_path / "result.json").read_text())
+        assert data["result"] == "pass"
+        assert data["should_fail"] is False
+
+    def test_fail(self, tmp_path):
+        write_result_json(tmp_path, "fail", "2 errors")
+        data = json.loads((tmp_path / "result.json").read_text())
+        assert data["result"] == "fail"
+        assert data["should_fail"] is True
+
+    def test_error(self, tmp_path):
+        write_result_json(tmp_path, "error", "Engine crashed")
+        data = json.loads((tmp_path / "result.json").read_text())
+        assert data["result"] == "error"
+        assert data["should_fail"] is True
+
+
+class TestWriteSkipOutput:
+    """Tests for skip output writing."""
+
+    def test_creates_output_dir(self, tmp_path):
+        out = tmp_path / "nested" / "output"
+        write_skip_output(out, "Validation disabled")
+        assert out.exists()
+        assert (out / "summary.md").exists()
+        assert (out / "result.json").exists()
+
+    def test_skip_reason_in_summary(self, tmp_path):
+        write_skip_output(tmp_path, "Validation is advisory — use dispatch")
+        content = (tmp_path / "summary.md").read_text()
+        assert "advisory" in content
+
+    def test_skip_result_json(self, tmp_path):
+        write_skip_output(tmp_path, "Disabled")
+        data = json.loads((tmp_path / "result.json").read_text())
+        assert data["result"] == "skipped"
+        assert data["should_fail"] is False
+
+
+class TestWriteOutputs:
+    """Tests for full output writing."""
+
+    @patch("validation.orchestrator.write_diagnostics")
+    @patch("validation.orchestrator.generate_commit_status")
+    @patch("validation.orchestrator.generate_pr_comment")
+    @patch("validation.orchestrator.generate_workflow_summary")
+    @patch("validation.orchestrator.generate_annotations")
+    def test_all_files_written(
+        self,
+        mock_annotations,
+        mock_summary,
+        mock_pr_comment,
+        mock_commit_status,
+        mock_diagnostics,
+        tmp_path,
+    ):
+        # Setup mocks
+        mock_annotations.return_value = MagicMock(
+            commands=["::warning file=a.yaml::msg"],
+            total_findings=1,
+            annotations_emitted=1,
+            truncated=False,
+        )
+        mock_summary.return_value = MagicMock(
+            markdown="# Summary\nAll good",
+            truncated=False,
+            truncation_note="",
+        )
+        mock_pr_comment.return_value = "\nAll good"
+        mock_commit_status.return_value = CommitStatusPayload(
+            state="success",
+            description="All checks passed",
+            context="CAMARA Validation",
+            target_url="https://example.com",
+        )
+        mock_diagnostics.return_value = []
+
+        pfr = _make_post_filter_result()
+        ctx = _make_context()
+
+        write_outputs(pfr, ctx, tmp_path, {"spectral": "0 finding(s)"}, "abc123")
+
+        # Verify files
+        assert (tmp_path / "annotations.txt").exists()
+        assert "::warning" in (tmp_path / "annotations.txt").read_text()
+        assert (tmp_path / "summary.md").exists()
+        assert (tmp_path / "pr-comment.md").exists()
+        assert "camara-validation" in (tmp_path / "pr-comment.md").read_text()
+        assert (tmp_path / "commit-status.json").exists()
+        status = json.loads((tmp_path / "commit-status.json").read_text())
+        assert status["state"] == "success"
+        assert (tmp_path / "result.json").exists()
+
+    @patch("validation.orchestrator.write_diagnostics")
+    @patch("validation.orchestrator.generate_commit_status")
+    @patch("validation.orchestrator.generate_pr_comment")
+    @patch("validation.orchestrator.generate_workflow_summary")
+    @patch("validation.orchestrator.generate_annotations")
+    def test_no_annotations_file_when_empty(
+        self,
+        mock_annotations,
+        mock_summary,
+        mock_pr_comment,
+        mock_commit_status,
+        mock_diagnostics,
+        tmp_path,
+    ):
+        mock_annotations.return_value = MagicMock(
+            commands=[],
+            total_findings=0,
+            annotations_emitted=0,
+            truncated=False,
+        )
+        mock_summary.return_value = MagicMock(markdown="# ok", truncated=False, truncation_note="")
+        mock_pr_comment.return_value = "ok"
+        mock_commit_status.return_value = CommitStatusPayload(
+            state="success", description="ok", context="test", target_url=""
+        )
+        mock_diagnostics.return_value = []
+
+        write_outputs(
+            _make_post_filter_result(), _make_context(), tmp_path, {}, ""
+        )
+
+        assert not (tmp_path / "annotations.txt").exists()
+
+    @patch("validation.orchestrator.write_diagnostics")
+    @patch("validation.orchestrator.generate_commit_status")
+    @patch("validation.orchestrator.generate_pr_comment")
+    @patch("validation.orchestrator.generate_workflow_summary")
+    @patch("validation.orchestrator.generate_annotations")
+    def test_creates_output_dir(
+        self,
+        mock_annotations,
+        mock_summary,
+        mock_pr_comment,
+        mock_commit_status,
+        mock_diagnostics,
+        tmp_path,
+    ):
+        out = tmp_path / "nested" / "output"
+        mock_annotations.return_value = MagicMock(
+            commands=[], total_findings=0, annotations_emitted=0, truncated=False
+        )
+        mock_summary.return_value = MagicMock(markdown="ok", truncated=False, truncation_note="")
+        mock_pr_comment.return_value = "ok"
+        mock_commit_status.return_value = CommitStatusPayload(
+            state="success", description="ok", context="test", target_url=""
+        )
+        mock_diagnostics.return_value = []
+
+        write_outputs(
+            _make_post_filter_result(), _make_context(), out, {}, ""
+        )
+
+        assert out.exists()
+        assert (out / "result.json").exists()
+
+
+# ---------------------------------------------------------------------------
+# TestMainPipeline
+# ---------------------------------------------------------------------------
+
+
+class TestMainPipeline:
+    """Integration tests for the main() pipeline."""
+
+    def _set_env(self, tmp_path, **overrides):
+        """Return env dict for a standard pipeline run."""
+        env = {
+            "VALIDATION_REPO_PATH": str(tmp_path / "repo"),
+            "VALIDATION_TOOLING_PATH": str(tmp_path / "tooling"),
+            "VALIDATION_OUTPUT_DIR": str(tmp_path / "output"),
+            "VALIDATION_REPO_NAME": "camaraproject/QualityOnDemand",
+            "VALIDATION_REPO_OWNER": "camaraproject",
+            "VALIDATION_EVENT_NAME": "pull_request",
+            "VALIDATION_REF_NAME": "refs/heads/main",
+            "VALIDATION_BASE_REF": "main",
+            "VALIDATION_MODE": "",
+            "VALIDATION_PROFILE": "",
+            "VALIDATION_PR_NUMBER": "42",
+            "VALIDATION_RELEASE_PLAN_CHANGED": "false",
+            "VALIDATION_WORKFLOW_RUN_URL": "https://example.com/runs/1",
+            "VALIDATION_TOOLING_REF": "abc123",
+            "VALIDATION_COMMIT_SHA": "def456",
+        }
+        env.update(overrides)
+        return env
+
+    @patch("validation.orchestrator.run_post_filter")
+    @patch("validation.orchestrator.run_engines")
+    @patch("validation.orchestrator.build_validation_context")
+    @patch("validation.orchestrator.resolve_stage_from_files")
+    def test_full_pipeline_pass(
+        self, mock_gate, mock_context, mock_engines, mock_postfilter, tmp_path
+    ):
+        env = self._set_env(tmp_path)
+        (tmp_path / "repo").mkdir()
+        (tmp_path / "tooling").mkdir()
+
+        mock_gate.return_value = MagicMock(
+            stage="standard",
+            should_continue=True,
+            is_fork=False,
+            fork_override_applied=False,
+            reason="",
+        )
+        ctx = _make_context()
+        mock_context.return_value = ctx
+        mock_engines.return_value = ([], {"spectral": "0 finding(s)"})
+        mock_postfilter.return_value = _make_post_filter_result(
+            result="pass", summary="All checks passed", findings=[]
+        )
+
+        with patch.dict("os.environ", env, clear=True):
+            exit_code = main()
+
+        assert exit_code == EXIT_OK
+        result_file = tmp_path / "output" / "result.json"
+        assert result_file.exists()
+        data = json.loads(result_file.read_text())
+        assert data["result"] == "pass"
+        assert data["should_fail"] is False
+
+    @patch("validation.orchestrator.run_post_filter")
+    @patch("validation.orchestrator.run_engines")
+    @patch("validation.orchestrator.build_validation_context")
+    @patch("validation.orchestrator.resolve_stage_from_files")
+    def test_full_pipeline_fail(
+        self, mock_gate, mock_context, mock_engines, mock_postfilter, tmp_path
+    ):
+        env = self._set_env(tmp_path)
+        (tmp_path / "repo").mkdir()
+        (tmp_path / "tooling").mkdir()
+
+        mock_gate.return_value = MagicMock(
+            stage="standard", should_continue=True, is_fork=False,
+            fork_override_applied=False, reason="",
+        )
+        mock_context.return_value = _make_context()
+        findings = [_make_finding(level="error", blocks=True)]
+        mock_engines.return_value = (findings, {"spectral": "1 finding(s)"})
+        mock_postfilter.return_value = _make_post_filter_result(
+            result="fail", summary="1 error", findings=findings
+        )
+
+        with patch.dict("os.environ", env, clear=True):
+            exit_code = main()
+
+        assert exit_code == EXIT_OK  # orchestrator always returns 0
+        data = json.loads((tmp_path / "output" / "result.json").read_text())
+        assert data["result"] == "fail"
+        assert data["should_fail"] is True
+
+    @patch("validation.orchestrator.resolve_stage_from_files")
+    def test_config_gate_skip(self, mock_gate, tmp_path):
+        env = self._set_env(tmp_path)
+        (tmp_path / "repo").mkdir()
+        (tmp_path / "tooling").mkdir()
+
+        mock_gate.return_value = MagicMock(
+            stage="disabled", should_continue=False,
+            is_fork=False, fork_override_applied=False,
+            reason="Validation is not enabled for this repository",
+        )
+
+        with patch.dict("os.environ", env, clear=True):
+            exit_code = main()
+
+        assert exit_code == EXIT_OK
+        data = json.loads((tmp_path / "output" / "result.json").read_text())
+        assert data["result"] == "skipped"
+        assert data["should_fail"] is False
+
+    @patch("validation.orchestrator.run_post_filter")
+    @patch("validation.orchestrator.run_engines")
+    @patch("validation.orchestrator.build_validation_context")
+    @patch("validation.orchestrator.resolve_stage_from_files")
+    def test_engine_statuses_passed_to_summary(
+        self, mock_gate, mock_context, mock_engines, mock_postfilter, tmp_path
+    ):
+        env = self._set_env(tmp_path)
+        (tmp_path / "repo").mkdir()
+        (tmp_path / "tooling").mkdir()
+
+        mock_gate.return_value = MagicMock(
+            stage="standard", should_continue=True, is_fork=False,
+            fork_override_applied=False, reason="",
+        )
+        mock_context.return_value = _make_context()
+        statuses = {
+            "yamllint": "2 finding(s)",
+            "spectral": "3 finding(s)",
+            "python": "0 finding(s)",
+            "gherkin": "skipped (no test files)",
+            "bundling": "not yet implemented",
+        }
+        mock_engines.return_value = ([], statuses)
+        mock_postfilter.return_value = _make_post_filter_result()
+
+        with patch.dict("os.environ", env, clear=True):
+            exit_code = main()
+
+        assert exit_code == EXIT_OK
+        # summary.md should exist
+        assert (tmp_path / "output" / "summary.md").exists()
+
+
+# ---------------------------------------------------------------------------
+# TestExitCodes
+# ---------------------------------------------------------------------------
+
+
+class TestExitCodes:
+    """Tests for exit code semantics."""
+
+    def test_exit_ok_constant(self):
+        assert EXIT_OK == 0
+
+    def test_exit_infra_error_constant(self):
+        assert EXIT_INFRA_ERROR == 2
diff --git a/validation/workflows/validation-caller.yml b/validation/workflows/validation-caller.yml
new file mode 100644
index 00000000..12c47bdd
--- /dev/null
+++ b/validation/workflows/validation-caller.yml
@@ -0,0 +1,35 @@
+# CAMARA Validation Framework — Caller Workflow
+#
+# Copy this file to .github/workflows/camara-validation.yml in your
+# API repository.  No modification needed — all configuration is
+# centralized in the tooling repository.
+#
+# This replaces the v0 pr_validation_caller.yml for repositories that
+# have opted into the v1 validation framework.
+
+name: CAMARA Validation
+
+on:
+  pull_request:
+    branches:
+      - main
+      - release-snapshot/**
+      - maintenance/**
+  workflow_dispatch:
+
+concurrency:
+  group: ${{ github.ref }}-${{ github.workflow }}
+  cancel-in-progress: true
+
+permissions:
+  checks: write
+  pull-requests: write
+  issues: write
+  contents: read
+  statuses: write
+  id-token: write
+
+jobs:
+  validation:
+    uses: camaraproject/tooling/.github/workflows/validation.yml@v1-rc
+    secrets: inherit

From 41ae2b7b22020f4b7e231e6bf41f4dca4b3c1611 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 23:13:20 +0100
Subject: [PATCH 013/157] fix(validation): rename conditional level "off" to
 "muted"

YAML parses unquoted "off" as boolean False, causing the post-filter
to miss the suppression check (resolved_level == "off" fails when
the actual value is False). Renamed to "muted" which is YAML-safe
and familiar from linting tools.

Found during smoke test: P-007 appeared in annotations with
level: false instead of being filtered out.
---
 validation/postfilter/engine.py              | 4 ++--
 validation/postfilter/level_resolver.py      | 2 +-
 validation/rules/python-rules.yaml           | 2 +-
 validation/schemas/findings-schema.yaml      | 2 +-
 validation/schemas/rule-metadata-schema.yaml | 6 +++---
 validation/tests/test_postfilter_engine.py   | 6 +++---
 validation/tests/test_postfilter_levels.py   | 6 +++---
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
index 66dc0339..94448c9c 100644
--- a/validation/postfilter/engine.py
+++ b/validation/postfilter/engine.py
@@ -176,7 +176,7 @@ def run_post_filter(
        ``had_engine_error``.
     2. Look up ``(engine, engine_rule)`` in the metadata index.
     3. **Mapped rule**: evaluate applicability (remove if not applicable),
-       resolve conditional level (remove if ``"off"``), enrich with
+       resolve conditional level (remove if ``"muted"``), enrich with
        ``rule_id``, optional ``message_override``/``hint``, and
        adjusted ``level``.
     4. **Unmapped rule** (pass-through): keep engine severity and
@@ -230,7 +230,7 @@ def run_post_filter(
             # Conditional level resolution (skip for identity-only entries)
             if rule.conditional_level is not None:
                 resolved_level = resolve_level(rule, context, api_ctx)
-                if resolved_level == "off":
+                if resolved_level == "muted":
                     continue
                 enriched = _enrich_finding(finding, rule, resolved_level)
             else:
diff --git a/validation/postfilter/level_resolver.py b/validation/postfilter/level_resolver.py
index 947a1676..62918dc7 100644
--- a/validation/postfilter/level_resolver.py
+++ b/validation/postfilter/level_resolver.py
@@ -45,7 +45,7 @@ def resolve_level(
         api_context: Per-API context, or ``None`` for repo-level findings.
 
     Returns:
-        Resolved level: ``"error"``, ``"warn"``, ``"hint"``, or ``"off"``.
+        Resolved level: ``"error"``, ``"warn"``, ``"hint"``, or ``"muted"``.
     """
     for override in rule.conditional_level.overrides:
         if evaluate_condition(override.condition, context, api_context):
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index b87b698c..3da52388 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -64,7 +64,7 @@
   engine: python
   engine_rule: check-test-file-version
   conditional_level:
-    default: off
+    default: muted
 
 # P-008: check-test-directory-exists
 - id: P-008
diff --git a/validation/schemas/findings-schema.yaml b/validation/schemas/findings-schema.yaml
index c6352b1c..5c2824b4 100644
--- a/validation/schemas/findings-schema.yaml
+++ b/validation/schemas/findings-schema.yaml
@@ -44,7 +44,7 @@ properties:
     description: >
       Severity as reported by the engine, before post-filter remapping.
       The post-filter may change this based on conditional_level overrides
-      and may suppress the finding entirely (level "off").
+      and may suppress the finding entirely (level "muted").
 
   message:
     type: string
diff --git a/validation/schemas/rule-metadata-schema.yaml b/validation/schemas/rule-metadata-schema.yaml
index ae8c79c1..8c49e308 100644
--- a/validation/schemas/rule-metadata-schema.yaml
+++ b/validation/schemas/rule-metadata-schema.yaml
@@ -119,9 +119,9 @@ properties:
     properties:
       default:
         type: string
-        enum: [error, warn, hint, off]
+        enum: [error, warn, hint, muted]
         description: >
-          Base severity level.  "off" means the rule is suppressed in
+          Base severity level.  "muted" means the rule is suppressed in
           contexts where no override matches.
 
       overrides:
@@ -139,6 +139,6 @@ properties:
                 fields must match; array fields use OR logic.
             level:
               type: string
-              enum: [error, warn, hint, off]
+              enum: [error, warn, hint, muted]
 
 additionalProperties: false
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index 98978513..7887949d 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -278,12 +278,12 @@ def test_applicability_filters_finding(self, tmp_path: Path):
         assert result.findings == []
         assert result.result == "pass"
 
-    def test_level_off_removes_finding(self, tmp_path: Path):
-        """Level resolved to 'off' removes the finding."""
+    def test_level_muted_removes_finding(self, tmp_path: Path):
+        """Level resolved to 'muted' removes the finding."""
         _write_rules(tmp_path, [
             _minimal_rule(
                 engine_rule="some-rule",
-                default_level="off",
+                default_level="muted",
             )
         ])
         ctx = _make_context()
diff --git a/validation/tests/test_postfilter_levels.py b/validation/tests/test_postfilter_levels.py
index 8fb923a1..6ff24296 100644
--- a/validation/tests/test_postfilter_levels.py
+++ b/validation/tests/test_postfilter_levels.py
@@ -137,16 +137,16 @@ def test_second_override_matches(self):
         ctx = _make_context(branch_type="main")
         assert resolve_level(rule, ctx, None) == "warn"
 
-    def test_override_resolves_to_off(self):
+    def test_override_resolves_to_muted(self):
         rule = _make_rule(
             default="warn",
             overrides=[
-                ({"target_api_status": ["draft"]}, "off"),
+                ({"target_api_status": ["draft"]}, "muted"),
             ],
         )
         ctx = _make_context()
         api = _make_api(target_api_status="draft")
-        assert resolve_level(rule, ctx, api) == "off"
+        assert resolve_level(rule, ctx, api) == "muted"
 
     def test_api_context_used_in_override(self):
         rule = _make_rule(

From 54eb556b7776f7efa6dc65b3ac64bc31b6a51514 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 26 Mar 2026 23:56:22 +0100
Subject: [PATCH 014/157] fix(validation): use testing fallback for fork PR ref
 resolution
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

OIDC tokens are unavailable for fork PRs, so the fallback ref is used.
During testing the fallback must point to hdamker/tooling@validation-framework
instead of the production target camaraproject/tooling@v1-rc.

TESTING ONLY — revert to camaraproject/tooling@v1-rc before production.
---
 .github/workflows/validation.yml | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 22c50ea6..91f39294 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -117,10 +117,14 @@ jobs:
             }
 
             // Tier 3: Hardcoded fallback tag
-            core.setOutput('tooling_checkout_repo', 'camaraproject/tooling');
-            core.setOutput('tooling_checkout_ref', 'v1-rc');
+            // ┌──────────────────────────────────────────────────────────┐
+            // │ TESTING ONLY — replace before production deployment:     │
+            // │   repo: camaraproject/tooling    ref: v1-rc             │
+            // └──────────────────────────────────────────────────────────┘
+            core.setOutput('tooling_checkout_repo', 'hdamker/tooling');
+            core.setOutput('tooling_checkout_ref', 'validation-framework');
             core.setOutput('tooling_ref_source', 'fallback_tag');
-            core.info('Tooling ref: fallback tag v1-rc');
+            core.info('Tooling ref: fallback (TESTING) hdamker/tooling@validation-framework');
 
       # ── Step 3: Checkout tooling (sparse) ──────────────────────────
       - name: Checkout tooling

From 7591d09a75c3b7a208b992a2f9a6e12db0b49272 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 00:03:51 +0100
Subject: [PATCH 015/157] config(validation): promote ReleaseTest to standard
 stage

---
 validation/config/validation-config.yaml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/validation/config/validation-config.yaml b/validation/config/validation-config.yaml
index bf552f3f..9bc81586 100644
--- a/validation/config/validation-config.yaml
+++ b/validation/config/validation-config.yaml
@@ -13,4 +13,4 @@ fork_owners:
 
 repositories:
   ReleaseTest:
-    stage: advisory
+    stage: standard

From ea7dbbe2e208236855b11ee36f1de257ce3e0e64 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 06:57:32 +0100
Subject: [PATCH 016/157] fix(validation): add --quiet to Spectral CLI to
 suppress diagnostic text

Spectral v6.15.0 appends "No results with a severity of 'error' found!"
to stdout after the JSON array, causing json.loads() to fail with
"Extra data: line 1 column 3". The --quiet flag suppresses this.

Also adds a defensive test for the trailing-diagnostic pattern.
---
 validation/engines/spectral_adapter.py    | 1 +
 validation/tests/test_spectral_adapter.py | 8 ++++++++
 2 files changed, 9 insertions(+)

diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index 3b0bff5a..034689de 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -244,6 +244,7 @@ def run_spectral(
         "spectral",
         "lint",
         "--format", "json",
+        "--quiet",
         "--ruleset", str(ruleset_path),
         *spec_patterns,
     ]
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index 150c2e2e..033bd6d4 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -227,6 +227,13 @@ def test_empty_string(self):
     def test_whitespace_only(self):
         assert parse_spectral_output("   \n  ") == []
 
+    def test_json_with_trailing_diagnostic(self):
+        """Spectral appends diagnostic text after JSON when not using --quiet."""
+        findings = parse_spectral_output(
+            "[]No results with a severity of 'error' found!"
+        )
+        assert findings == []
+
     def test_invalid_json_returns_empty(self):
         findings = parse_spectral_output("not json at all")
         assert findings == []
@@ -318,6 +325,7 @@ def test_command_includes_ruleset_and_patterns(self, mock_run, tmp_path):
         call_args = mock_run.call_args
         cmd = call_args[0][0]
         assert "--ruleset" in cmd
+        assert "--quiet" in cmd
         assert str(ruleset) in cmd
         assert "code/API_definitions/*.yaml" in cmd
         assert call_args[1]["cwd"] == str(tmp_path)

From 76d1dc413b6c2c30c9d442e768f5c2275f5e552d Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 08:34:06 +0100
Subject: [PATCH 017/157] chore(validation): bump action/runtime versions, pin
 pip deps, exclude gherkin-lint

Bump GitHub Actions to match sibling workflows: setup-python@v6,
setup-node@v6, upload-artifact@v6. Bump node-version from 20 to 24
(GitHub deprecating Node 20 from June 2026). Pin pip dependencies
(pyyaml==6.0.3, jsonschema==4.26.0) for reproducible builds.

Exclude unmaintained gherkin-lint from v1 validation pipeline. The
engine adapter is retained for re-integration with a replacement tool.
Orchestrator now unconditionally skips the gherkin engine.
---
 .github/workflows/validation.yml      |  10 +-
 validation/orchestrator.py            |  21 +-
 validation/package-lock.json          | 271 +-------------------------
 validation/package.json               |   3 +-
 validation/tests/test_orchestrator.py |  24 +--
 5 files changed, 20 insertions(+), 309 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 91f39294..58355fc0 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -140,19 +140,19 @@ jobs:
 
       # ── Step 4: Setup Python ───────────────────────────────────────
       - name: Setup Python
-        uses: actions/setup-python@v5
+        uses: actions/setup-python@v6
         with:
           python-version: "3.11"
 
       # ── Step 5: Setup Node ─────────────────────────────────────────
       - name: Setup Node
-        uses: actions/setup-node@v4
+        uses: actions/setup-node@v6
         with:
-          node-version: "20"
+          node-version: "24"
 
       # ── Step 6: Install Python dependencies ────────────────────────
       - name: Install Python dependencies
-        run: pip install --quiet pyyaml jsonschema
+        run: pip install --quiet pyyaml==6.0.3 jsonschema==4.26.0
 
       # ── Step 7: Install Node dependencies ──────────────────────────
       - name: Install Node dependencies
@@ -277,7 +277,7 @@ jobs:
       # ── Step 14: Upload diagnostics ────────────────────────────────
       - name: Upload diagnostics
         if: always() && steps.orchestrator.outcome == 'success'
-        uses: actions/upload-artifact@v4
+        uses: actions/upload-artifact@v6
         with:
           name: validation-diagnostics
           path: validation-output/diagnostics/
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index bd3c714d..1b518305 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -26,7 +26,6 @@
 from validation.config.config_gate import StageGateResult, resolve_stage_from_files
 from validation.context import ValidationContext, build_validation_context
 from validation.engines import (
-    run_gherkin_engine,
     run_python_engine,
     run_spectral_engine,
     run_yamllint_engine,
@@ -246,23 +245,9 @@ def run_engines(
         engine_statuses["python"] = f"error: {exc}"
         logger.error("Python checks failed: %s", exc)
 
-    # --- gherkin-lint ---
-    if not test_files:
-        engine_statuses["gherkin"] = "skipped (no test files)"
-        logger.info("gherkin-lint: skipped (no test files)")
-    else:
-        try:
-            gherkin_config = paths.linting_config_dir / ".gherkin-lintrc"
-            findings = run_gherkin_engine(
-                repo_path=repo_path,
-                config_path=gherkin_config,
-            )
-            all_findings.extend(findings)
-            engine_statuses["gherkin"] = f"{len(findings)} finding(s)"
-            logger.info("gherkin-lint: %d finding(s)", len(findings))
-        except Exception as exc:
-            engine_statuses["gherkin"] = f"error: {exc}"
-            logger.error("gherkin-lint failed: %s", exc)
+    # --- gherkin-lint (excluded from v1 — unmaintained, seeking alternative) ---
+    engine_statuses["gherkin"] = "skipped (excluded from v1)"
+    logger.info("gherkin-lint: skipped (excluded from v1)")
 
     # --- Bundling (placeholder for WP-06.08) ---
     engine_statuses["bundling"] = "not yet implemented"
diff --git a/validation/package-lock.json b/validation/package-lock.json
index b598bf36..4ae75f7d 100644
--- a/validation/package-lock.json
+++ b/validation/package-lock.json
@@ -6,8 +6,7 @@
     "": {
       "name": "camara-validation-tools",
       "dependencies": {
-        "@stoplight/spectral-cli": "^6.14.0",
-        "gherkin-lint": "^4.2.4"
+        "@stoplight/spectral-cli": "^6.14.0"
       }
     },
     "node_modules/@asyncapi/specs": {
@@ -90,70 +89,6 @@
         "node": ">= 8"
       }
     },
-    "node_modules/@protobufjs/aspromise": {
-      "version": "1.1.2",
-      "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
-      "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/base64": {
-      "version": "1.1.2",
-      "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
-      "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/codegen": {
-      "version": "2.0.4",
-      "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
-      "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/eventemitter": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
-      "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/fetch": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
-      "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
-      "license": "BSD-3-Clause",
-      "dependencies": {
-        "@protobufjs/aspromise": "^1.1.1",
-        "@protobufjs/inquire": "^1.1.0"
-      }
-    },
-    "node_modules/@protobufjs/float": {
-      "version": "1.0.2",
-      "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
-      "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/inquire": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
-      "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/path": {
-      "version": "1.1.2",
-      "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
-      "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/pool": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
-      "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
-      "license": "BSD-3-Clause"
-    },
-    "node_modules/@protobufjs/utf8": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
-      "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
-      "license": "BSD-3-Clause"
-    },
     "node_modules/@rollup/plugin-commonjs": {
       "version": "22.0.2",
       "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-22.0.2.tgz",
@@ -657,12 +592,6 @@
       "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
       "license": "MIT"
     },
-    "node_modules/@types/long": {
-      "version": "4.0.2",
-      "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz",
-      "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==",
-      "license": "MIT"
-    },
     "node_modules/@types/markdown-escape": {
       "version": "1.1.3",
       "resolved": "https://registry.npmjs.org/@types/markdown-escape/-/markdown-escape-1.1.3.tgz",
@@ -690,12 +619,6 @@
       "integrity": "sha512-wkXrVzX5yoqLnndOwFsieJA7oKM8cNkOKJtf/3vVGSUFkWDKZvFHpIl9Pvqb/T9UsawBBFMTTD8xu7sK5MWuvg==",
       "license": "MIT"
     },
-    "node_modules/@types/uuid": {
-      "version": "3.4.13",
-      "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-3.4.13.tgz",
-      "integrity": "sha512-pAeZeUbLE4Z9Vi9wsWV2bYPTweEHeJJy0G4pEjOA/FSvy1Ad5U5Km8iDV6TKre1mjBiVNfAdVHKruP8bAh4Q5A==",
-      "license": "MIT"
-    },
     "node_modules/abort-controller": {
       "version": "3.0.0",
       "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
@@ -908,12 +831,6 @@
         "node": ">=8"
       }
     },
-    "node_modules/buffer-from": {
-      "version": "1.1.2",
-      "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
-      "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
-      "license": "MIT"
-    },
     "node_modules/builtins": {
       "version": "1.0.3",
       "resolved": "https://registry.npmjs.org/builtins/-/builtins-1.0.3.tgz",
@@ -1012,15 +929,6 @@
       "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
       "license": "MIT"
     },
-    "node_modules/commander": {
-      "version": "11.0.0",
-      "resolved": "https://registry.npmjs.org/commander/-/commander-11.0.0.tgz",
-      "integrity": "sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ==",
-      "license": "MIT",
-      "engines": {
-        "node": ">=16"
-      }
-    },
     "node_modules/commondir": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
@@ -1033,29 +941,6 @@
       "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
       "license": "MIT"
     },
-    "node_modules/core-js": {
-      "version": "3.33.1",
-      "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.33.1.tgz",
-      "integrity": "sha512-qVSq3s+d4+GsqN0teRCJtM6tdEEXyWxjzbhVrCHmBS5ZTM0FS2MOS0D13dUXAWDUN6a+lHI/N1hF9Ytz6iLl9Q==",
-      "hasInstallScript": true,
-      "license": "MIT",
-      "funding": {
-        "type": "opencollective",
-        "url": "https://opencollective.com/core-js"
-      }
-    },
-    "node_modules/cucumber-messages": {
-      "version": "8.0.0",
-      "resolved": "https://registry.npmjs.org/cucumber-messages/-/cucumber-messages-8.0.0.tgz",
-      "integrity": "sha512-lUnWRMjwA9+KhDec/5xRZV3Du67ISumHnVLywWQXyvzmc4P+Eqx8CoeQrBQoau3Pw1hs4kJLTDyV85hFBF00SQ==",
-      "deprecated": "This package is now published under @cucumber/messages",
-      "license": "MIT",
-      "dependencies": {
-        "@types/uuid": "^3.4.6",
-        "protobufjs": "^6.8.8",
-        "uuid": "^3.3.3"
-      }
-    },
     "node_modules/data-uri-to-buffer": {
       "version": "2.0.2",
       "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-2.0.2.tgz",
@@ -1586,78 +1471,6 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
-    "node_modules/gherkin": {
-      "version": "9.0.0",
-      "resolved": "https://registry.npmjs.org/gherkin/-/gherkin-9.0.0.tgz",
-      "integrity": "sha512-6xoAepoxo5vhkBXjB4RCfVnSKHu5z9SqXIQVUyj+Jw8BQX8odATlee5otXgdN8llZvyvHokuvNiBeB3naEnnIQ==",
-      "deprecated": "This package is now published under @cucumber/gherkin",
-      "license": "MIT",
-      "dependencies": {
-        "commander": "^4.0.1",
-        "cucumber-messages": "8.0.0",
-        "source-map-support": "^0.5.16"
-      },
-      "bin": {
-        "gherkin-javascript": "bin/gherkin"
-      }
-    },
-    "node_modules/gherkin-lint": {
-      "version": "4.2.4",
-      "resolved": "https://registry.npmjs.org/gherkin-lint/-/gherkin-lint-4.2.4.tgz",
-      "integrity": "sha512-iM+ECIHOF6Wh94YIF1hSHA6JH9rzcgozlMLHA/uCzGtQiMjb/uL093eh1nTpfoJ/38veL7Jfh4yY2inu7uUoFA==",
-      "license": "ISC",
-      "dependencies": {
-        "commander": "11.0.0",
-        "core-js": "3.33.1",
-        "gherkin": "9.0.0",
-        "glob": "7.1.6",
-        "lodash": "4.17.21",
-        "strip-json-comments": "3.0.1",
-        "xml-js": "^1.6.11"
-      },
-      "bin": {
-        "gherkin-lint": "dist/main.js"
-      },
-      "engines": {
-        "node": ">=10.0.0"
-      }
-    },
-    "node_modules/gherkin-lint/node_modules/glob": {
-      "version": "7.1.6",
-      "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
-      "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
-      "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me",
-      "license": "ISC",
-      "dependencies": {
-        "fs.realpath": "^1.0.0",
-        "inflight": "^1.0.4",
-        "inherits": "2",
-        "minimatch": "^3.0.4",
-        "once": "^1.3.0",
-        "path-is-absolute": "^1.0.0"
-      },
-      "engines": {
-        "node": "*"
-      },
-      "funding": {
-        "url": "https://github.com/sponsors/isaacs"
-      }
-    },
-    "node_modules/gherkin-lint/node_modules/lodash": {
-      "version": "4.17.21",
-      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
-      "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
-      "license": "MIT"
-    },
-    "node_modules/gherkin/node_modules/commander": {
-      "version": "4.1.1",
-      "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
-      "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
-      "license": "MIT",
-      "engines": {
-        "node": ">= 6"
-      }
-    },
     "node_modules/glob": {
       "version": "7.2.3",
       "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
@@ -2335,12 +2148,6 @@
       "integrity": "sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg==",
       "license": "MIT"
     },
-    "node_modules/long": {
-      "version": "4.0.0",
-      "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
-      "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
-      "license": "Apache-2.0"
-    },
     "node_modules/magic-string": {
       "version": "0.25.9",
       "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz",
@@ -2569,32 +2376,6 @@
       "integrity": "sha512-dKp+C4iXWK4vVYZmYSd0KBH5F/h1HoZRsbJ82AVKRO3PEo8L4lBS/vLwhVtpwwuYcoIsVY+1JYKR268yn480uQ==",
       "license": "Unlicense"
     },
-    "node_modules/protobufjs": {
-      "version": "6.11.4",
-      "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
-      "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==",
-      "hasInstallScript": true,
-      "license": "BSD-3-Clause",
-      "dependencies": {
-        "@protobufjs/aspromise": "^1.1.2",
-        "@protobufjs/base64": "^1.1.2",
-        "@protobufjs/codegen": "^2.0.4",
-        "@protobufjs/eventemitter": "^1.1.0",
-        "@protobufjs/fetch": "^1.1.0",
-        "@protobufjs/float": "^1.0.2",
-        "@protobufjs/inquire": "^1.1.0",
-        "@protobufjs/path": "^1.1.2",
-        "@protobufjs/pool": "^1.1.0",
-        "@protobufjs/utf8": "^1.1.0",
-        "@types/long": "^4.0.1",
-        "@types/node": ">=13.7.0",
-        "long": "^4.0.0"
-      },
-      "bin": {
-        "pbjs": "bin/pbjs",
-        "pbts": "bin/pbts"
-      }
-    },
     "node_modules/queue-microtask": {
       "version": "1.2.3",
       "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
@@ -2810,15 +2591,6 @@
       "integrity": "sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw==",
       "license": "MIT"
     },
-    "node_modules/sax": {
-      "version": "1.6.0",
-      "resolved": "https://registry.npmjs.org/sax/-/sax-1.6.0.tgz",
-      "integrity": "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA==",
-      "license": "BlueOak-1.0.0",
-      "engines": {
-        "node": ">=11.0.0"
-      }
-    },
     "node_modules/set-function-length": {
       "version": "1.2.2",
       "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
@@ -2958,16 +2730,6 @@
         "node": ">=0.10.0"
       }
     },
-    "node_modules/source-map-support": {
-      "version": "0.5.21",
-      "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
-      "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
-      "license": "MIT",
-      "dependencies": {
-        "buffer-from": "^1.0.0",
-        "source-map": "^0.6.0"
-      }
-    },
     "node_modules/sourcemap-codec": {
       "version": "1.4.8",
       "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz",
@@ -3080,15 +2842,6 @@
         "node": ">=8"
       }
     },
-    "node_modules/strip-json-comments": {
-      "version": "3.0.1",
-      "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz",
-      "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==",
-      "license": "MIT",
-      "engines": {
-        "node": ">=8"
-      }
-    },
     "node_modules/supports-color": {
       "version": "7.2.0",
       "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
@@ -3265,16 +3018,6 @@
         "node": ">= 4"
       }
     },
-    "node_modules/uuid": {
-      "version": "3.4.0",
-      "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
-      "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
-      "deprecated": "Please upgrade  to version 7 or higher.  Older versions may use Math.random() in certain circumstances, which is known to be problematic.  See https://v8.dev/blog/math-random for details.",
-      "license": "MIT",
-      "bin": {
-        "uuid": "bin/uuid"
-      }
-    },
     "node_modules/validate-npm-package-name": {
       "version": "3.0.0",
       "resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz",
@@ -3408,18 +3151,6 @@
       "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
       "license": "ISC"
     },
-    "node_modules/xml-js": {
-      "version": "1.6.11",
-      "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz",
-      "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==",
-      "license": "MIT",
-      "dependencies": {
-        "sax": "^1.2.4"
-      },
-      "bin": {
-        "xml-js": "bin/cli.js"
-      }
-    },
     "node_modules/y18n": {
       "version": "5.0.8",
       "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
diff --git a/validation/package.json b/validation/package.json
index 77e143ed..bd3efd6f 100644
--- a/validation/package.json
+++ b/validation/package.json
@@ -3,7 +3,6 @@
   "private": true,
   "description": "Node.js tool dependencies for CAMARA validation framework",
   "dependencies": {
-    "@stoplight/spectral-cli": "^6.14.0",
-    "gherkin-lint": "^4.2.4"
+    "@stoplight/spectral-cli": "^6.14.0"
   }
 }
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 81e03e51..5492b5d9 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -253,42 +253,37 @@ def test_discover_test_files_empty(self, tmp_path):
 class TestRunEngines:
     """Tests for engine orchestration."""
 
-    @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
     def test_all_engines_called(
-        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+        self, mock_yamllint, mock_spectral, mock_python, paths
     ):
         mock_yamllint.return_value = [_make_finding(engine="yamllint")]
         mock_spectral.return_value = [_make_finding(engine="spectral")]
         mock_python.return_value = [_make_finding(engine="python")]
-        mock_gherkin.return_value = [_make_finding(engine="gherkin")]
         context = _make_context()
         test_files = [Path("/repo/code/Test_definitions/test.feature")]
 
         findings, statuses = run_engines(Path("/repo"), paths, context, test_files)
 
-        assert len(findings) == 4
+        assert len(findings) == 3
         assert mock_yamllint.called
         assert mock_spectral.called
         assert mock_python.called
-        assert mock_gherkin.called
         assert "finding(s)" in statuses["yamllint"]
         assert "finding(s)" in statuses["spectral"]
         assert "finding(s)" in statuses["python"]
-        assert "finding(s)" in statuses["gherkin"]
+        assert "skipped" in statuses["gherkin"]
         assert statuses["bundling"] == "not yet implemented"
 
-    @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
     def test_release_review_skips_yamllint_and_spectral(
-        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+        self, mock_yamllint, mock_spectral, mock_python, paths
     ):
         mock_python.return_value = []
-        mock_gherkin.return_value = []
         context = _make_context(is_release_review_pr=True)
         test_files = [Path("/repo/code/Test_definitions/test.feature")]
 
@@ -298,14 +293,15 @@ def test_release_review_skips_yamllint_and_spectral(
         assert not mock_spectral.called
         assert "skipped" in statuses["yamllint"]
         assert "skipped" in statuses["spectral"]
+        assert "skipped" in statuses["gherkin"]
 
-    @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
-    def test_no_test_files_skips_gherkin(
-        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+    def test_gherkin_always_skipped_v1(
+        self, mock_yamllint, mock_spectral, mock_python, paths
     ):
+        """gherkin-lint is excluded from v1 — always shows skipped."""
         mock_yamllint.return_value = []
         mock_spectral.return_value = []
         mock_python.return_value = []
@@ -313,8 +309,8 @@ def test_no_test_files_skips_gherkin(
 
         findings, statuses = run_engines(Path("/repo"), paths, context, test_files=[])
 
-        assert not mock_gherkin.called
         assert "skipped" in statuses["gherkin"]
+        assert "excluded" in statuses["gherkin"]
 
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
@@ -637,7 +633,7 @@ def test_engine_statuses_passed_to_summary(
             "yamllint": "2 finding(s)",
             "spectral": "3 finding(s)",
             "python": "0 finding(s)",
-            "gherkin": "skipped (no test files)",
+            "gherkin": "skipped (excluded from v1)",
             "bundling": "not yet implemented",
         }
         mock_engines.return_value = ([], statuses)

From a8eff6e2796c1935143e1d40ba6c3715d6963f8b Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 09:25:12 +0100
Subject: [PATCH 018/157] fix(validation): restore gherkin-lint for Node 24
 compatibility testing

Revert the gherkin-lint exclusion from the previous commit. Testing
shows gherkin-lint works with Node 24 despite being unmaintained.
The deprecation warnings are cosmetic npm-level notices, not runtime
failures. Better to have gherkin linting with warnings than none.
---
 validation/orchestrator.py            |  21 +-
 validation/package-lock.json          | 271 +++++++++++++++++++++++++-
 validation/package.json               |   3 +-
 validation/tests/test_orchestrator.py |  24 ++-
 4 files changed, 304 insertions(+), 15 deletions(-)

diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index 1b518305..bd3c714d 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -26,6 +26,7 @@
 from validation.config.config_gate import StageGateResult, resolve_stage_from_files
 from validation.context import ValidationContext, build_validation_context
 from validation.engines import (
+    run_gherkin_engine,
     run_python_engine,
     run_spectral_engine,
     run_yamllint_engine,
@@ -245,9 +246,23 @@ def run_engines(
         engine_statuses["python"] = f"error: {exc}"
         logger.error("Python checks failed: %s", exc)
 
-    # --- gherkin-lint (excluded from v1 — unmaintained, seeking alternative) ---
-    engine_statuses["gherkin"] = "skipped (excluded from v1)"
-    logger.info("gherkin-lint: skipped (excluded from v1)")
+    # --- gherkin-lint ---
+    if not test_files:
+        engine_statuses["gherkin"] = "skipped (no test files)"
+        logger.info("gherkin-lint: skipped (no test files)")
+    else:
+        try:
+            gherkin_config = paths.linting_config_dir / ".gherkin-lintrc"
+            findings = run_gherkin_engine(
+                repo_path=repo_path,
+                config_path=gherkin_config,
+            )
+            all_findings.extend(findings)
+            engine_statuses["gherkin"] = f"{len(findings)} finding(s)"
+            logger.info("gherkin-lint: %d finding(s)", len(findings))
+        except Exception as exc:
+            engine_statuses["gherkin"] = f"error: {exc}"
+            logger.error("gherkin-lint failed: %s", exc)
 
     # --- Bundling (placeholder for WP-06.08) ---
     engine_statuses["bundling"] = "not yet implemented"
diff --git a/validation/package-lock.json b/validation/package-lock.json
index 4ae75f7d..b598bf36 100644
--- a/validation/package-lock.json
+++ b/validation/package-lock.json
@@ -6,7 +6,8 @@
     "": {
       "name": "camara-validation-tools",
       "dependencies": {
-        "@stoplight/spectral-cli": "^6.14.0"
+        "@stoplight/spectral-cli": "^6.14.0",
+        "gherkin-lint": "^4.2.4"
       }
     },
     "node_modules/@asyncapi/specs": {
@@ -89,6 +90,70 @@
         "node": ">= 8"
       }
     },
+    "node_modules/@protobufjs/aspromise": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
+      "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/base64": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
+      "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/codegen": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
+      "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/eventemitter": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
+      "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/fetch": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
+      "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@protobufjs/aspromise": "^1.1.1",
+        "@protobufjs/inquire": "^1.1.0"
+      }
+    },
+    "node_modules/@protobufjs/float": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
+      "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/inquire": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
+      "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/path": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
+      "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/pool": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
+      "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
+      "license": "BSD-3-Clause"
+    },
+    "node_modules/@protobufjs/utf8": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
+      "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
+      "license": "BSD-3-Clause"
+    },
     "node_modules/@rollup/plugin-commonjs": {
       "version": "22.0.2",
       "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-22.0.2.tgz",
@@ -592,6 +657,12 @@
       "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==",
       "license": "MIT"
     },
+    "node_modules/@types/long": {
+      "version": "4.0.2",
+      "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz",
+      "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==",
+      "license": "MIT"
+    },
     "node_modules/@types/markdown-escape": {
       "version": "1.1.3",
       "resolved": "https://registry.npmjs.org/@types/markdown-escape/-/markdown-escape-1.1.3.tgz",
@@ -619,6 +690,12 @@
       "integrity": "sha512-wkXrVzX5yoqLnndOwFsieJA7oKM8cNkOKJtf/3vVGSUFkWDKZvFHpIl9Pvqb/T9UsawBBFMTTD8xu7sK5MWuvg==",
       "license": "MIT"
     },
+    "node_modules/@types/uuid": {
+      "version": "3.4.13",
+      "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-3.4.13.tgz",
+      "integrity": "sha512-pAeZeUbLE4Z9Vi9wsWV2bYPTweEHeJJy0G4pEjOA/FSvy1Ad5U5Km8iDV6TKre1mjBiVNfAdVHKruP8bAh4Q5A==",
+      "license": "MIT"
+    },
     "node_modules/abort-controller": {
       "version": "3.0.0",
       "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
@@ -831,6 +908,12 @@
         "node": ">=8"
       }
     },
+    "node_modules/buffer-from": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+      "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
+      "license": "MIT"
+    },
     "node_modules/builtins": {
       "version": "1.0.3",
       "resolved": "https://registry.npmjs.org/builtins/-/builtins-1.0.3.tgz",
@@ -929,6 +1012,15 @@
       "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
       "license": "MIT"
     },
+    "node_modules/commander": {
+      "version": "11.0.0",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-11.0.0.tgz",
+      "integrity": "sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=16"
+      }
+    },
     "node_modules/commondir": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
@@ -941,6 +1033,29 @@
       "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
       "license": "MIT"
     },
+    "node_modules/core-js": {
+      "version": "3.33.1",
+      "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.33.1.tgz",
+      "integrity": "sha512-qVSq3s+d4+GsqN0teRCJtM6tdEEXyWxjzbhVrCHmBS5ZTM0FS2MOS0D13dUXAWDUN6a+lHI/N1hF9Ytz6iLl9Q==",
+      "hasInstallScript": true,
+      "license": "MIT",
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/core-js"
+      }
+    },
+    "node_modules/cucumber-messages": {
+      "version": "8.0.0",
+      "resolved": "https://registry.npmjs.org/cucumber-messages/-/cucumber-messages-8.0.0.tgz",
+      "integrity": "sha512-lUnWRMjwA9+KhDec/5xRZV3Du67ISumHnVLywWQXyvzmc4P+Eqx8CoeQrBQoau3Pw1hs4kJLTDyV85hFBF00SQ==",
+      "deprecated": "This package is now published under @cucumber/messages",
+      "license": "MIT",
+      "dependencies": {
+        "@types/uuid": "^3.4.6",
+        "protobufjs": "^6.8.8",
+        "uuid": "^3.3.3"
+      }
+    },
     "node_modules/data-uri-to-buffer": {
       "version": "2.0.2",
       "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-2.0.2.tgz",
@@ -1471,6 +1586,78 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/gherkin": {
+      "version": "9.0.0",
+      "resolved": "https://registry.npmjs.org/gherkin/-/gherkin-9.0.0.tgz",
+      "integrity": "sha512-6xoAepoxo5vhkBXjB4RCfVnSKHu5z9SqXIQVUyj+Jw8BQX8odATlee5otXgdN8llZvyvHokuvNiBeB3naEnnIQ==",
+      "deprecated": "This package is now published under @cucumber/gherkin",
+      "license": "MIT",
+      "dependencies": {
+        "commander": "^4.0.1",
+        "cucumber-messages": "8.0.0",
+        "source-map-support": "^0.5.16"
+      },
+      "bin": {
+        "gherkin-javascript": "bin/gherkin"
+      }
+    },
+    "node_modules/gherkin-lint": {
+      "version": "4.2.4",
+      "resolved": "https://registry.npmjs.org/gherkin-lint/-/gherkin-lint-4.2.4.tgz",
+      "integrity": "sha512-iM+ECIHOF6Wh94YIF1hSHA6JH9rzcgozlMLHA/uCzGtQiMjb/uL093eh1nTpfoJ/38veL7Jfh4yY2inu7uUoFA==",
+      "license": "ISC",
+      "dependencies": {
+        "commander": "11.0.0",
+        "core-js": "3.33.1",
+        "gherkin": "9.0.0",
+        "glob": "7.1.6",
+        "lodash": "4.17.21",
+        "strip-json-comments": "3.0.1",
+        "xml-js": "^1.6.11"
+      },
+      "bin": {
+        "gherkin-lint": "dist/main.js"
+      },
+      "engines": {
+        "node": ">=10.0.0"
+      }
+    },
+    "node_modules/gherkin-lint/node_modules/glob": {
+      "version": "7.1.6",
+      "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+      "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+      "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me",
+      "license": "ISC",
+      "dependencies": {
+        "fs.realpath": "^1.0.0",
+        "inflight": "^1.0.4",
+        "inherits": "2",
+        "minimatch": "^3.0.4",
+        "once": "^1.3.0",
+        "path-is-absolute": "^1.0.0"
+      },
+      "engines": {
+        "node": "*"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/isaacs"
+      }
+    },
+    "node_modules/gherkin-lint/node_modules/lodash": {
+      "version": "4.17.21",
+      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+      "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+      "license": "MIT"
+    },
+    "node_modules/gherkin/node_modules/commander": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+      "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 6"
+      }
+    },
     "node_modules/glob": {
       "version": "7.2.3",
       "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
@@ -2148,6 +2335,12 @@
       "integrity": "sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg==",
       "license": "MIT"
     },
+    "node_modules/long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
+      "license": "Apache-2.0"
+    },
     "node_modules/magic-string": {
       "version": "0.25.9",
       "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz",
@@ -2376,6 +2569,32 @@
       "integrity": "sha512-dKp+C4iXWK4vVYZmYSd0KBH5F/h1HoZRsbJ82AVKRO3PEo8L4lBS/vLwhVtpwwuYcoIsVY+1JYKR268yn480uQ==",
       "license": "Unlicense"
     },
+    "node_modules/protobufjs": {
+      "version": "6.11.4",
+      "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
+      "integrity": "sha512-5kQWPaJHi1WoCpjTGszzQ32PG2F4+wRY6BmAT4Vfw56Q2FZ4YZzK20xUYQH4YkfehY1e6QSICrJquM6xXZNcrw==",
+      "hasInstallScript": true,
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@protobufjs/aspromise": "^1.1.2",
+        "@protobufjs/base64": "^1.1.2",
+        "@protobufjs/codegen": "^2.0.4",
+        "@protobufjs/eventemitter": "^1.1.0",
+        "@protobufjs/fetch": "^1.1.0",
+        "@protobufjs/float": "^1.0.2",
+        "@protobufjs/inquire": "^1.1.0",
+        "@protobufjs/path": "^1.1.2",
+        "@protobufjs/pool": "^1.1.0",
+        "@protobufjs/utf8": "^1.1.0",
+        "@types/long": "^4.0.1",
+        "@types/node": ">=13.7.0",
+        "long": "^4.0.0"
+      },
+      "bin": {
+        "pbjs": "bin/pbjs",
+        "pbts": "bin/pbts"
+      }
+    },
     "node_modules/queue-microtask": {
       "version": "1.2.3",
       "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
@@ -2591,6 +2810,15 @@
       "integrity": "sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw==",
       "license": "MIT"
     },
+    "node_modules/sax": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/sax/-/sax-1.6.0.tgz",
+      "integrity": "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA==",
+      "license": "BlueOak-1.0.0",
+      "engines": {
+        "node": ">=11.0.0"
+      }
+    },
     "node_modules/set-function-length": {
       "version": "1.2.2",
       "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
@@ -2730,6 +2958,16 @@
         "node": ">=0.10.0"
       }
     },
+    "node_modules/source-map-support": {
+      "version": "0.5.21",
+      "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
+      "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
+      "license": "MIT",
+      "dependencies": {
+        "buffer-from": "^1.0.0",
+        "source-map": "^0.6.0"
+      }
+    },
     "node_modules/sourcemap-codec": {
       "version": "1.4.8",
       "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz",
@@ -2842,6 +3080,15 @@
         "node": ">=8"
       }
     },
+    "node_modules/strip-json-comments": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz",
+      "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      }
+    },
     "node_modules/supports-color": {
       "version": "7.2.0",
       "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
@@ -3018,6 +3265,16 @@
         "node": ">= 4"
       }
     },
+    "node_modules/uuid": {
+      "version": "3.4.0",
+      "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+      "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
+      "deprecated": "Please upgrade  to version 7 or higher.  Older versions may use Math.random() in certain circumstances, which is known to be problematic.  See https://v8.dev/blog/math-random for details.",
+      "license": "MIT",
+      "bin": {
+        "uuid": "bin/uuid"
+      }
+    },
     "node_modules/validate-npm-package-name": {
       "version": "3.0.0",
       "resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz",
@@ -3151,6 +3408,18 @@
       "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
       "license": "ISC"
     },
+    "node_modules/xml-js": {
+      "version": "1.6.11",
+      "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz",
+      "integrity": "sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g==",
+      "license": "MIT",
+      "dependencies": {
+        "sax": "^1.2.4"
+      },
+      "bin": {
+        "xml-js": "bin/cli.js"
+      }
+    },
     "node_modules/y18n": {
       "version": "5.0.8",
       "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
diff --git a/validation/package.json b/validation/package.json
index bd3efd6f..77e143ed 100644
--- a/validation/package.json
+++ b/validation/package.json
@@ -3,6 +3,7 @@
   "private": true,
   "description": "Node.js tool dependencies for CAMARA validation framework",
   "dependencies": {
-    "@stoplight/spectral-cli": "^6.14.0"
+    "@stoplight/spectral-cli": "^6.14.0",
+    "gherkin-lint": "^4.2.4"
   }
 }
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 5492b5d9..81e03e51 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -253,37 +253,42 @@ def test_discover_test_files_empty(self, tmp_path):
 class TestRunEngines:
     """Tests for engine orchestration."""
 
+    @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
     def test_all_engines_called(
-        self, mock_yamllint, mock_spectral, mock_python, paths
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
     ):
         mock_yamllint.return_value = [_make_finding(engine="yamllint")]
         mock_spectral.return_value = [_make_finding(engine="spectral")]
         mock_python.return_value = [_make_finding(engine="python")]
+        mock_gherkin.return_value = [_make_finding(engine="gherkin")]
         context = _make_context()
         test_files = [Path("/repo/code/Test_definitions/test.feature")]
 
         findings, statuses = run_engines(Path("/repo"), paths, context, test_files)
 
-        assert len(findings) == 3
+        assert len(findings) == 4
         assert mock_yamllint.called
         assert mock_spectral.called
         assert mock_python.called
+        assert mock_gherkin.called
         assert "finding(s)" in statuses["yamllint"]
         assert "finding(s)" in statuses["spectral"]
         assert "finding(s)" in statuses["python"]
-        assert "skipped" in statuses["gherkin"]
+        assert "finding(s)" in statuses["gherkin"]
         assert statuses["bundling"] == "not yet implemented"
 
+    @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
     def test_release_review_skips_yamllint_and_spectral(
-        self, mock_yamllint, mock_spectral, mock_python, paths
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
     ):
         mock_python.return_value = []
+        mock_gherkin.return_value = []
         context = _make_context(is_release_review_pr=True)
         test_files = [Path("/repo/code/Test_definitions/test.feature")]
 
@@ -293,15 +298,14 @@ def test_release_review_skips_yamllint_and_spectral(
         assert not mock_spectral.called
         assert "skipped" in statuses["yamllint"]
         assert "skipped" in statuses["spectral"]
-        assert "skipped" in statuses["gherkin"]
 
+    @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
-    def test_gherkin_always_skipped_v1(
-        self, mock_yamllint, mock_spectral, mock_python, paths
+    def test_no_test_files_skips_gherkin(
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
     ):
-        """gherkin-lint is excluded from v1 — always shows skipped."""
         mock_yamllint.return_value = []
         mock_spectral.return_value = []
         mock_python.return_value = []
@@ -309,8 +313,8 @@ def test_gherkin_always_skipped_v1(
 
         findings, statuses = run_engines(Path("/repo"), paths, context, test_files=[])
 
+        assert not mock_gherkin.called
         assert "skipped" in statuses["gherkin"]
-        assert "excluded" in statuses["gherkin"]
 
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
@@ -633,7 +637,7 @@ def test_engine_statuses_passed_to_summary(
             "yamllint": "2 finding(s)",
             "spectral": "3 finding(s)",
             "python": "0 finding(s)",
-            "gherkin": "skipped (excluded from v1)",
+            "gherkin": "skipped (no test files)",
             "bundling": "not yet implemented",
         }
         mock_engines.return_value = ([], statuses)

From d7aba76178e7fd00bbe2113e3cd8ff31864c9ece Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 15:06:59 +0100
Subject: [PATCH 019/157] fix(validation): normalize Spectral paths to
 repo-relative at finding creation

Spectral may emit absolute runner paths in its JSON output depending on
how the shell resolves globs. Previously only the annotation module
handled this; diagnostics artifacts retained absolute paths.

Now _normalize_path() strips the repo root prefix at finding creation
time so all downstream consumers get clean repo-relative paths.

Also re-exports PROFILE_ADVISORY/STANDARD/STRICT from validation.context
package and updates level_resolver.py to use the public import.
---
 validation/context/__init__.py            |  3 +
 validation/engines/spectral_adapter.py    | 37 ++++++++--
 validation/postfilter/level_resolver.py   |  5 +-
 validation/tests/test_spectral_adapter.py | 82 +++++++++++++++++++++++
 4 files changed, 120 insertions(+), 7 deletions(-)

diff --git a/validation/context/__init__.py b/validation/context/__init__.py
index 317f074b..d8ccf588 100644
--- a/validation/context/__init__.py
+++ b/validation/context/__init__.py
@@ -3,6 +3,9 @@
 # release-plan.yaml, PR metadata, and central config.
 
 from .context_builder import (  # noqa: F401
+    PROFILE_ADVISORY,
+    PROFILE_STANDARD,
+    PROFILE_STRICT,
     ApiContext,
     ValidationContext,
     build_validation_context,
diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index 034689de..026602a5 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -140,7 +140,24 @@ def select_ruleset_path(
     return fallback
 
 
-def normalize_finding(raw: dict) -> dict:
+def _normalize_path(source: str, repo_root: Optional[str] = None) -> str:
+    """Strip repo-root prefix from an absolute path to make it repo-relative.
+
+    Spectral may emit absolute runner paths (e.g.
+    ``/home/runner/work/Repo/Repo/code/API_definitions/api.yaml``) depending
+    on how the shell resolves the glob.  Normalising at finding-creation time
+    ensures every downstream consumer (annotations, diagnostics, PR comment)
+    sees clean repo-relative paths.
+    """
+    if not source or not repo_root:
+        return source
+    root = repo_root.rstrip("/") + "/"
+    if source.startswith(root):
+        return source[len(root):]
+    return source
+
+
+def normalize_finding(raw: dict, repo_root: Optional[str] = None) -> dict:
     """Convert one Spectral JSON finding to the common findings model.
 
     Critical field mapping:
@@ -148,8 +165,13 @@ def normalize_finding(raw: dict) -> dict:
       which is the JSONPath within the document).
     - ``raw["range"]["start"]["line"]`` is 0-indexed; add 1 for the framework.
     - ``raw["range"]["start"]["character"]`` is 0-indexed; add 1.
+
+    Args:
+        raw: Single finding dict from Spectral JSON output.
+        repo_root: Absolute path to the repository root.  When provided,
+            absolute ``source`` paths are normalised to repo-relative.
     """
-    source = raw.get("source", "")
+    source = _normalize_path(raw.get("source", ""), repo_root)
     start = raw.get("range", {}).get("start", {})
 
     line = start.get("line", 0) + 1
@@ -172,11 +194,16 @@ def normalize_finding(raw: dict) -> dict:
     return finding
 
 
-def parse_spectral_output(raw_json: str) -> List[dict]:
+def parse_spectral_output(
+    raw_json: str,
+    repo_root: Optional[str] = None,
+) -> List[dict]:
     """Parse Spectral ``--format json`` stdout into normalised findings.
 
     Args:
         raw_json: Raw JSON string from Spectral stdout.
+        repo_root: Repository root path passed to :func:`normalize_finding`
+            for path normalisation.
 
     Returns:
         List of findings conforming to the common findings model.
@@ -198,7 +225,7 @@ def parse_spectral_output(raw_json: str) -> List[dict]:
     findings = []
     for item in data:
         try:
-            findings.append(normalize_finding(item))
+            findings.append(normalize_finding(item, repo_root=repo_root))
         except (KeyError, TypeError) as exc:
             logger.warning("Skipping malformed Spectral finding: %s", exc)
     return findings
@@ -272,7 +299,7 @@ def run_spectral(
 
     # Exit 0 or 1: normal operation (findings may or may not exist).
     if result.returncode in (0, 1):
-        findings = parse_spectral_output(result.stdout)
+        findings = parse_spectral_output(result.stdout, repo_root=str(cwd))
         return SpectralResult(findings=findings, success=True)
 
     # Exit 2+: Spectral runtime error.
diff --git a/validation/postfilter/level_resolver.py b/validation/postfilter/level_resolver.py
index 62918dc7..12efccf6 100644
--- a/validation/postfilter/level_resolver.py
+++ b/validation/postfilter/level_resolver.py
@@ -12,11 +12,12 @@
 
 from typing import Optional
 
-from validation.context import ApiContext, ValidationContext
-from validation.context.context_builder import (
+from validation.context import (
     PROFILE_ADVISORY,
     PROFILE_STANDARD,
     PROFILE_STRICT,
+    ApiContext,
+    ValidationContext,
 )
 
 from .condition_evaluator import evaluate_condition
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index 033bd6d4..b19f5104 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -13,6 +13,7 @@
     DEFAULT_RULESET,
     ENGINE_NAME,
     SpectralResult,
+    _normalize_path,
     derive_api_name,
     map_severity,
     normalize_finding,
@@ -122,6 +123,45 @@ def test_nested_api_definitions(self):
         )
 
 
+# ---------------------------------------------------------------------------
+# TestNormalizePath
+# ---------------------------------------------------------------------------
+
+
+class TestNormalizePath:
+    def test_absolute_path_stripped(self):
+        source = "/home/runner/work/Repo/Repo/code/API_definitions/api.yaml"
+        result = _normalize_path(source, "/home/runner/work/Repo/Repo")
+        assert result == "code/API_definitions/api.yaml"
+
+    def test_absolute_path_with_trailing_slash(self):
+        source = "/home/runner/work/Repo/Repo/code/API_definitions/api.yaml"
+        result = _normalize_path(source, "/home/runner/work/Repo/Repo/")
+        assert result == "code/API_definitions/api.yaml"
+
+    def test_already_relative_unchanged(self):
+        source = "code/API_definitions/api.yaml"
+        result = _normalize_path(source, "/home/runner/work/Repo/Repo")
+        assert result == "code/API_definitions/api.yaml"
+
+    def test_no_repo_root_unchanged(self):
+        source = "/absolute/path/to/file.yaml"
+        assert _normalize_path(source, None) == source
+
+    def test_empty_source(self):
+        assert _normalize_path("", "/some/root") == ""
+
+    def test_empty_repo_root(self):
+        source = "/absolute/path/to/file.yaml"
+        assert _normalize_path(source, "") == source
+
+    def test_partial_prefix_not_stripped(self):
+        """A path that starts with a substring of repo_root is not stripped."""
+        source = "/home/runner/work/RepoExtra/code/api.yaml"
+        result = _normalize_path(source, "/home/runner/work/Repo")
+        assert result == source
+
+
 # ---------------------------------------------------------------------------
 # TestSelectRulesetPath
 # ---------------------------------------------------------------------------
@@ -204,6 +244,22 @@ def test_rule_id_and_hint_not_set(self):
         assert "rule_id" not in finding
         assert "hint" not in finding
 
+    def test_absolute_path_normalised_with_repo_root(self):
+        raw = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "source": "/home/runner/work/R/R/code/API_definitions/quality-on-demand.yaml",
+        }
+        finding = normalize_finding(raw, repo_root="/home/runner/work/R/R")
+        assert finding["path"] == "code/API_definitions/quality-on-demand.yaml"
+        assert finding["api_name"] == "quality-on-demand"
+
+    def test_relative_path_unchanged_with_repo_root(self):
+        finding = normalize_finding(
+            SAMPLE_SPECTRAL_FINDING,
+            repo_root="/home/runner/work/R/R",
+        )
+        assert finding["path"] == "code/API_definitions/quality-on-demand.yaml"
+
 
 # ---------------------------------------------------------------------------
 # TestParseSpectralOutput
@@ -252,6 +308,15 @@ def test_mixed_severities(self):
         levels = [f["level"] for f in findings]
         assert levels == ["error", "warn", "hint"]
 
+    def test_repo_root_normalises_paths(self):
+        abs_finding = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "source": "/runner/work/code/API_definitions/quality-on-demand.yaml",
+        }
+        raw = json.dumps([abs_finding])
+        findings = parse_spectral_output(raw, repo_root="/runner/work")
+        assert findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
+
 
 # ---------------------------------------------------------------------------
 # TestRunSpectral
@@ -315,6 +380,23 @@ def test_spectral_timeout(self, mock_run, tmp_path):
         assert result.success is False
         assert "timed out" in result.error_message
 
+    @patch("validation.engines.spectral_adapter.subprocess.run")
+    def test_findings_paths_normalised_by_cwd(self, mock_run, tmp_path):
+        """run_spectral passes cwd as repo_root to normalise absolute paths."""
+        abs_finding = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "source": f"{tmp_path}/code/API_definitions/quality-on-demand.yaml",
+        }
+        mock_run.return_value = subprocess.CompletedProcess(
+            args=[], returncode=1,
+            stdout=json.dumps([abs_finding]), stderr="",
+        )
+        result = run_spectral(
+            tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path,
+        )
+        assert result.success is True
+        assert result.findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
+
     @patch("validation.engines.spectral_adapter.subprocess.run")
     def test_command_includes_ruleset_and_patterns(self, mock_run, tmp_path):
         mock_run.return_value = subprocess.CompletedProcess(

From 66b7d94bffec92cc4e93e72361a68b028ef442cf Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 16:11:03 +0100
Subject: [PATCH 020/157] fix(validation): expand globs in yamllint/gherkin
 adapters, advisory label
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Three bugs found during smoke testing on ReleaseTest:

1. yamllint adapter passed glob patterns as literal strings to
   subprocess.run() — yamllint doesn't expand globs internally,
   resulting in 0 findings on all files.

2. gherkin-lint's feature-finder mangles ** glob patterns
   (appends /**.feature), matching nothing.

Both adapters now expand globs via Path.glob() before invoking
the subprocess.

3. Advisory profile with findings showed "PASS" in summary/PR
   comment/commit status, which is misleading when errors exist.
   compute_overall_result() now returns "advisory" when advisory
   profile has findings, rendered as "ADVISORY" in all surfaces.
---
 validation/engines/gherkin_adapter.py      | 28 +++++++++++++++++--
 validation/engines/yamllint_adapter.py     | 28 +++++++++++++++++--
 validation/output/commit_status.py         |  1 +
 validation/output/pr_comment.py            | 20 ++++++++++----
 validation/output/workflow_summary.py      | 32 ++++++++++++++++++++--
 validation/postfilter/engine.py            | 14 ++++++++--
 validation/tests/test_gherkin_adapter.py   |  5 ++++
 validation/tests/test_postfilter_engine.py |  3 +-
 validation/tests/test_yamllint_adapter.py  | 13 +++++++++
 9 files changed, 128 insertions(+), 16 deletions(-)

diff --git a/validation/engines/gherkin_adapter.py b/validation/engines/gherkin_adapter.py
index 06ea227a..aef6a174 100644
--- a/validation/engines/gherkin_adapter.py
+++ b/validation/engines/gherkin_adapter.py
@@ -16,7 +16,7 @@
 import subprocess
 from dataclasses import dataclass
 from pathlib import Path, PurePosixPath
-from typing import List, Optional
+from typing import List, Optional, Sequence
 
 logger = logging.getLogger(__name__)
 
@@ -145,6 +145,23 @@ class GherkinResult:
     error_message: str = ""
 
 
+def _expand_globs(patterns: Sequence[str], cwd: Path) -> List[str]:
+    """Expand glob patterns relative to *cwd* into concrete file paths.
+
+    ``subprocess.run()`` without ``shell=True`` does not expand globs,
+    and gherkin-lint's internal feature-finder mangles ``**`` patterns
+    (appends ``/**.feature`` to any pattern containing ``/**``).
+    Expanding in Python avoids both issues.
+
+    Returns repo-relative POSIX path strings.
+    """
+    expanded: List[str] = []
+    for pattern in patterns:
+        matches = sorted(cwd.glob(pattern))
+        expanded.extend(str(m.relative_to(cwd)) for m in matches)
+    return expanded
+
+
 def run_gherkin_lint(
     config_path: Path,
     file_patterns: List[str],
@@ -162,11 +179,18 @@ def run_gherkin_lint(
     Returns:
         :class:`GherkinResult` with parsed findings and status.
     """
+    # Expand globs in Python — gherkin-lint's feature-finder mangles
+    # ** patterns (turns "dir/**/*.feature" into "dir/**/*.feature/**.feature").
+    files = _expand_globs(file_patterns, cwd)
+    if not files:
+        logger.info("No files matched patterns: %s", file_patterns)
+        return GherkinResult(findings=[], success=True)
+
     cmd = [
         "npx", "gherkin-lint",
         "--format", "json",
         "--config", str(config_path),
-        *file_patterns,
+        *files,
     ]
 
     try:
diff --git a/validation/engines/yamllint_adapter.py b/validation/engines/yamllint_adapter.py
index 9235b0c4..d5cc897a 100644
--- a/validation/engines/yamllint_adapter.py
+++ b/validation/engines/yamllint_adapter.py
@@ -20,7 +20,7 @@
 import sys
 from dataclasses import dataclass
 from pathlib import Path, PurePosixPath
-from typing import List, Optional
+from typing import List, Optional, Sequence
 
 logger = logging.getLogger(__name__)
 
@@ -148,6 +148,23 @@ class YamllintResult:
     error_message: str = ""
 
 
+def _expand_globs(patterns: Sequence[str], cwd: Path) -> List[str]:
+    """Expand glob patterns relative to *cwd* into concrete file paths.
+
+    ``subprocess.run()`` without ``shell=True`` does not expand globs,
+    and yamllint does not expand them internally.  This helper bridges
+    the gap so that the adapter receives the same files the orchestrator
+    discovers.
+
+    Returns repo-relative POSIX path strings (matching yamllint output).
+    """
+    expanded: List[str] = []
+    for pattern in patterns:
+        matches = sorted(cwd.glob(pattern))
+        expanded.extend(str(m.relative_to(cwd)) for m in matches)
+    return expanded
+
+
 def run_yamllint(
     config_path: Path,
     file_patterns: List[str],
@@ -166,11 +183,18 @@ def run_yamllint(
     Returns:
         :class:`YamllintResult` with parsed findings and status.
     """
+    # Expand globs in Python — subprocess doesn't expand them and
+    # yamllint doesn't accept glob patterns.
+    files = _expand_globs(file_patterns, cwd)
+    if not files:
+        logger.info("No files matched patterns: %s", file_patterns)
+        return YamllintResult(findings=[], success=True)
+
     cmd = [
         sys.executable, "-m", "yamllint",
         "--format", "parsable",
         "--config-file", str(config_path),
-        *file_patterns,
+        *files,
     ]
 
     try:
diff --git a/validation/output/commit_status.py b/validation/output/commit_status.py
index 916a94ce..b75f9962 100644
--- a/validation/output/commit_status.py
+++ b/validation/output/commit_status.py
@@ -28,6 +28,7 @@
 
 _RESULT_TO_STATE = {
     "pass": "success",
+    "advisory": "success",
     "fail": "failure",
     "error": "error",
 }
diff --git a/validation/output/pr_comment.py b/validation/output/pr_comment.py
index c7497855..4567a26f 100644
--- a/validation/output/pr_comment.py
+++ b/validation/output/pr_comment.py
@@ -25,7 +25,12 @@
 
 MARKER = ""
 
-_RESULT_LABEL = {"pass": "PASS", "fail": "FAIL", "error": "ERROR"}
+_RESULT_LABEL = {
+    "pass": "PASS",
+    "fail": "FAIL",
+    "error": "ERROR",
+    "advisory": "ADVISORY",
+}
 
 
 # ---------------------------------------------------------------------------
@@ -49,11 +54,14 @@ def generate_pr_comment(
     Returns:
         Complete Markdown string ready to post as a PR comment.
     """
-    result_label = _RESULT_LABEL.get(
-        post_filter_result.result,
-        post_filter_result.result.upper(),
-    )
-    counts = count_findings(post_filter_result.findings)
+    result = post_filter_result.result
+    findings = post_filter_result.findings
+    # Advisory profile: show ADVISORY instead of PASS when findings exist
+    if result == "pass" and context.profile == "advisory" and findings:
+        result_label = _RESULT_LABEL["advisory"]
+    else:
+        result_label = _RESULT_LABEL.get(result, result.upper())
+    counts = count_findings(findings)
 
     lines = [
         MARKER,
diff --git a/validation/output/workflow_summary.py b/validation/output/workflow_summary.py
index 964fa5fe..3aec6e2e 100644
--- a/validation/output/workflow_summary.py
+++ b/validation/output/workflow_summary.py
@@ -35,7 +35,12 @@
 
 SUMMARY_SIZE_LIMIT = 900 * 1024  # 900 KB (GitHub limit is 1 MB)
 
-_RESULT_LABEL = {"pass": "PASS", "fail": "FAIL", "error": "ERROR"}
+_RESULT_LABEL = {
+    "pass": "PASS",
+    "fail": "FAIL",
+    "error": "ERROR",
+    "advisory": "ADVISORY",
+}
 
 # ---------------------------------------------------------------------------
 # Result type
@@ -63,12 +68,33 @@ class SummaryResult:
 # ---------------------------------------------------------------------------
 
 
+def _resolve_result_label(
+    result: str,
+    context: ValidationContext,
+    findings: List[dict],
+) -> str:
+    """Map result string to display label, with advisory override.
+
+    Advisory profile never blocks, so the post-filter always returns
+    ``"pass"``.  Showing **PASS** when there are errors is misleading;
+    **ADVISORY** signals that findings exist but nothing was blocked.
+    """
+    if (
+        result == "pass"
+        and context.profile == "advisory"
+        and findings
+    ):
+        return _RESULT_LABEL["advisory"]
+    return _RESULT_LABEL.get(result, result.upper())
+
+
 def _render_header(
     result: str,
     context: ValidationContext,
+    findings: List[dict],
 ) -> str:
     """Render the summary header with result and metadata."""
-    label = _RESULT_LABEL.get(result, result.upper())
+    label = _resolve_result_label(result, context, findings)
     return (
         f"## CAMARA Validation — {label}\n\n"
         f"**Profile**: {context.profile} | "
@@ -211,7 +237,7 @@ def generate_workflow_summary(
     hints = [f for f in sorted_all if f.get("level") == "hint"]
 
     # Fixed sections (always rendered)
-    header = _render_header(post_filter_result.result, context)
+    header = _render_header(post_filter_result.result, context, findings)
     api_table = _render_api_table(findings)
     engine_table = _render_engine_table(engine_statuses)
     footer = _render_footer(context, commit_sha)
diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
index 94448c9c..584eb4a4 100644
--- a/validation/postfilter/engine.py
+++ b/validation/postfilter/engine.py
@@ -111,24 +111,29 @@ def _passthrough_finding(finding: dict) -> dict:
 def compute_overall_result(
     findings: List[dict],
     had_engine_error: bool,
+    profile: str = "",
 ) -> str:
     """Compute the overall result from processed findings.
 
-    Priority: ``"error"`` > ``"fail"`` > ``"pass"``.
+    Priority: ``"error"`` > ``"fail"`` > ``"advisory"`` > ``"pass"``.
 
     Args:
         findings: Post-filtered findings with ``blocks`` field set.
         had_engine_error: Whether any engine execution error occurred.
+        profile: Validation profile (advisory/standard/strict).
 
     Returns:
         ``"error"`` if evaluation was incomplete (engine failure),
         ``"fail"`` if any finding has ``blocks=True``,
+        ``"advisory"`` if profile is advisory and findings exist,
         ``"pass"`` otherwise.
     """
     if had_engine_error:
         return "error"
     if any(f.get("blocks") for f in findings):
         return "fail"
+    if profile == "advisory" and findings:
+        return "advisory"
     return "pass"
 
 
@@ -150,6 +155,11 @@ def _build_summary(result: str, findings: List[dict]) -> str:
             f"Failed: {blocking} blocking out of {total} findings "
             f"({errors} errors, {warnings} warnings, {hints} hints)"
         )
+    if result == "advisory":
+        return (
+            f"Advisory: {total} findings "
+            f"({errors} errors, {warnings} warnings, {hints} hints)"
+        )
     if total == 0:
         return "Passed: no findings"
     return (
@@ -247,7 +257,7 @@ def run_post_filter(
         processed.append(enriched)
 
     # Step 6: Overall result
-    result = compute_overall_result(processed, had_engine_error)
+    result = compute_overall_result(processed, had_engine_error, context.profile)
     summary = _build_summary(result, processed)
 
     logger.info("Post-filter result: %s — %s", result, summary)
diff --git a/validation/tests/test_gherkin_adapter.py b/validation/tests/test_gherkin_adapter.py
index fff51e20..deb085d4 100644
--- a/validation/tests/test_gherkin_adapter.py
+++ b/validation/tests/test_gherkin_adapter.py
@@ -147,6 +147,11 @@ def test_json_object_instead_of_array(self):
 
 
 class TestRunGherkinLint:
+    @pytest.fixture(autouse=True)
+    def _create_feature_file(self, tmp_path):
+        """Create a dummy .feature so glob patterns match."""
+        (tmp_path / "test.feature").write_text("Feature: dummy\n")
+
     @patch("validation.engines.gherkin_adapter.subprocess.run")
     def test_exit_0_no_findings(self, mock_run, tmp_path):
         mock_run.return_value = subprocess.CompletedProcess(
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index 7887949d..0d2abb86 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -332,8 +332,9 @@ def test_advisory_profile_nothing_blocks(self, tmp_path: Path):
         ctx = _make_context(profile="advisory")
         findings = [_make_finding(level="error")]
         result = run_post_filter(findings, ctx, tmp_path)
-        assert result.result == "pass"
+        assert result.result == "advisory"
         assert result.findings[0]["blocks"] is False
+        assert "Advisory" in result.summary
 
     def test_strict_profile_warns_block(self, tmp_path: Path):
         ctx = _make_context(profile="strict")
diff --git a/validation/tests/test_yamllint_adapter.py b/validation/tests/test_yamllint_adapter.py
index c60cfe71..2c590ff4 100644
--- a/validation/tests/test_yamllint_adapter.py
+++ b/validation/tests/test_yamllint_adapter.py
@@ -159,6 +159,11 @@ def test_all_warnings(self):
 
 
 class TestRunYamllint:
+    @pytest.fixture(autouse=True)
+    def _create_yaml_file(self, tmp_path):
+        """Create a dummy .yaml so glob patterns match."""
+        (tmp_path / "f.yaml").write_text("key: value\n")
+
     @patch("validation.engines.yamllint_adapter.subprocess.run")
     def test_exit_0_no_findings(self, mock_run, tmp_path):
         mock_run.return_value = subprocess.CompletedProcess(
@@ -204,6 +209,14 @@ def test_timeout(self, mock_run, tmp_path):
         assert result.success is False
         assert "timed out" in result.error_message
 
+    def test_no_matching_files_returns_empty_success(self, tmp_path):
+        """When no files match the glob, return empty success."""
+        result = run_yamllint(
+            tmp_path / ".yamllint.yaml", ["nonexistent/*.yaml"], cwd=tmp_path,
+        )
+        assert result.success is True
+        assert result.findings == []
+
 
 # ---------------------------------------------------------------------------
 # TestRunYamllintEngine

From 14f56f00d41270ad1a99006640f4b9487165afec Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 16:49:15 +0100
Subject: [PATCH 021/157] fix(validation): install yamllint, invoke
 gherkin-lint directly
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Root causes for yamllint/gherkin producing 0 findings on CI:

1. yamllint was never installed — pip only installed pyyaml and
   jsonschema. python3 -m yamllint exited 1 (module not found),
   which the adapter treated as "findings found, empty stdout".
   Fix: add yamllint==1.38.0 to pip install.

2. gherkin-lint was installed in .tooling/validation/node_modules
   but npx couldn't find it because cwd was the repo root.
   The workflow already adds node_modules/.bin to PATH, so invoke
   gherkin-lint directly instead of via npx.
---
 .github/workflows/validation.yml      | 2 +-
 validation/engines/gherkin_adapter.py | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 58355fc0..80081591 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -152,7 +152,7 @@ jobs:
 
       # ── Step 6: Install Python dependencies ────────────────────────
       - name: Install Python dependencies
-        run: pip install --quiet pyyaml==6.0.3 jsonschema==4.26.0
+        run: pip install --quiet pyyaml==6.0.3 jsonschema==4.26.0 yamllint==1.38.0
 
       # ── Step 7: Install Node dependencies ──────────────────────────
       - name: Install Node dependencies
diff --git a/validation/engines/gherkin_adapter.py b/validation/engines/gherkin_adapter.py
index aef6a174..dd091e37 100644
--- a/validation/engines/gherkin_adapter.py
+++ b/validation/engines/gherkin_adapter.py
@@ -187,7 +187,7 @@ def run_gherkin_lint(
         return GherkinResult(findings=[], success=True)
 
     cmd = [
-        "npx", "gherkin-lint",
+        "gherkin-lint",
         "--format", "json",
         "--config", str(config_path),
         *files,
@@ -205,7 +205,7 @@ def run_gherkin_lint(
         return GherkinResult(
             findings=[],
             success=False,
-            error_message="npx/gherkin-lint not found — is Node.js installed?",
+            error_message="gherkin-lint not found — is it installed and on PATH?",
         )
     except subprocess.TimeoutExpired:
         return GherkinResult(

From 9a1377056cd3547a6c73aa78877ec7c8d07ff700 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 16:56:23 +0100
Subject: [PATCH 022/157] fix(validation): parse gherkin-lint JSON from stderr,
 not stdout

gherkin-lint writes its --format json output to stderr, not stdout.
The adapter was parsing result.stdout which was always empty.
Now reads from stderr first, falling back to stdout.
---
 validation/engines/gherkin_adapter.py    | 4 +++-
 validation/tests/test_gherkin_adapter.py | 6 +++---
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/validation/engines/gherkin_adapter.py b/validation/engines/gherkin_adapter.py
index dd091e37..3e076f0b 100644
--- a/validation/engines/gherkin_adapter.py
+++ b/validation/engines/gherkin_adapter.py
@@ -215,8 +215,10 @@ def run_gherkin_lint(
         )
 
     # Exit 0 = clean, exit 1 = findings found.  Both produce valid JSON.
+    # gherkin-lint writes JSON to stderr (not stdout).
     if result.returncode in (0, 1):
-        findings = parse_gherkin_output(result.stdout, str(cwd))
+        raw_json = result.stderr or result.stdout
+        findings = parse_gherkin_output(raw_json, str(cwd))
         return GherkinResult(findings=findings, success=True)
 
     # Other exit codes: check for config-not-found or other runtime errors.
diff --git a/validation/tests/test_gherkin_adapter.py b/validation/tests/test_gherkin_adapter.py
index deb085d4..f5fe981f 100644
--- a/validation/tests/test_gherkin_adapter.py
+++ b/validation/tests/test_gherkin_adapter.py
@@ -155,7 +155,7 @@ def _create_feature_file(self, tmp_path):
     @patch("validation.engines.gherkin_adapter.subprocess.run")
     def test_exit_0_no_findings(self, mock_run, tmp_path):
         mock_run.return_value = subprocess.CompletedProcess(
-            args=[], returncode=0, stdout="[]", stderr="",
+            args=[], returncode=0, stdout="", stderr="[]",
         )
         result = run_gherkin_lint(
             tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path,
@@ -168,11 +168,11 @@ def test_exit_1_with_findings(self, mock_run, tmp_path):
         mock_run.return_value = subprocess.CompletedProcess(
             args=[],
             returncode=1,
-            stdout=json.dumps([{
+            stdout="",
+            stderr=json.dumps([{
                 "filePath": str(tmp_path / "code/Test_definitions/api.feature"),
                 "errors": [{"message": "m", "rule": "r", "line": 1}],
             }]),
-            stderr="",
         )
         result = run_gherkin_lint(
             tmp_path / ".gherkin-lintrc", ["*.feature"], cwd=tmp_path,

From 6ed363ee500a1eda37b99f83b5f140d91100375a Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 27 Mar 2026 17:27:24 +0100
Subject: [PATCH 023/157] fix(validation): rename summary table column to "API
 / Test"

Gherkin test file findings are attributed by filename stem which
includes the operationId suffix. The column label "API" was
misleading for test file entries. Renamed to "API / Test" to
reflect that both API specs and test files appear as row keys.
---
 validation/output/workflow_summary.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/validation/output/workflow_summary.py b/validation/output/workflow_summary.py
index 3aec6e2e..f23eab32 100644
--- a/validation/output/workflow_summary.py
+++ b/validation/output/workflow_summary.py
@@ -111,8 +111,8 @@ def _render_api_table(findings: List[dict]) -> str:
 
     lines = [
         "\n### Summary\n",
-        "| API | Errors | Warnings | Hints |",
-        "|-----|--------|----------|-------|",
+        "| API / Test | Errors | Warnings | Hints |",
+        "|------------|--------|----------|-------|",
     ]
     for api_name, counts in by_api.items():
         lines.append(

From 8d8216198bbfe6d04e832894a19831f235c33a4c Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sat, 28 Mar 2026 17:55:37 +0100
Subject: [PATCH 024/157] feat(validation): add layered token resolution for
 write surfaces

Mint validation app token via create-github-app-token, probe
GITHUB_TOKEN write access as fallback, degrade to read-only when
neither is available. Token resolution, PR comment, and commit
status consolidated into a single step, gated on PR events only.
Annotations also restricted to PR events to prevent dispatch
duplicates.
---
 .github/workflows/validation.yml | 162 ++++++++++++++++++++-----------
 1 file changed, 107 insertions(+), 55 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 80081591..7a8fb80a 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -195,9 +195,9 @@ jobs:
           export PATH="${PATH_NODE_MODULES}:${PATH}"
           python -m validation.orchestrator
 
-      # ── Step 10: Emit annotations ──────────────────────────────────
+      # ── Step 10: Emit annotations (PR only) ────────────────────────
       - name: Emit annotations
-        if: always() && steps.orchestrator.outcome == 'success'
+        if: always() && steps.orchestrator.outcome == 'success' && github.event_name == 'pull_request'
         run: |
           if [ -f validation-output/annotations.txt ]; then
             cat validation-output/annotations.txt
@@ -211,68 +211,120 @@ jobs:
             cat validation-output/summary.md >> "$GITHUB_STEP_SUMMARY"
           fi
 
-      # ── Step 12: Post PR comment ───────────────────────────────────
-      - name: Post PR comment
-        if: always() && steps.orchestrator.outcome == 'success' && github.event_name == 'pull_request'
+      # ── Step 12: Mint validation app token (PR only) ─────────────
+      - name: Mint validation app token
+        id: mint-token
+        if: >-
+          always() && steps.orchestrator.outcome == 'success'
+          && github.event_name == 'pull_request'
+          && vars.VALIDATION_APP_ID != ''
         continue-on-error: true
+        uses: actions/create-github-app-token@v2
+        with:
+          app-id: ${{ vars.VALIDATION_APP_ID }}
+          private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
+
+      # ── Step 13: Post findings to PR ───────────────────────────────
+      #
+      # Token resolution (design doc section 5.1), PR comment, and commit
+      # status in a single step.  Skipped entirely for non-PR events.
+      #
+      # Tier 1 (snapshot context): placeholder — RA handoff is via
+      # artifacts (DEC-014).  Reconsidered in WP-06.13.
+      - name: Post findings to PR
+        if: >-
+          always() && steps.orchestrator.outcome == 'success'
+          && github.event_name == 'pull_request'
         uses: actions/github-script@v8
         with:
+          github-token: ${{ steps.mint-token.outputs.token || github.token }}
           script: |
             const fs = require('fs');
-            const commentPath = 'validation-output/pr-comment.md';
-            if (!fs.existsSync(commentPath)) return;
-            const body = fs.readFileSync(commentPath, 'utf8');
-            const marker = '';
-            const prNumber = context.payload.pull_request.number;
-
-            // Find existing comment with marker
-            const comments = await github.paginate(
-              github.rest.issues.listComments,
-              { owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber }
-            );
-            const existing = comments.find(c => c.body && c.body.includes(marker));
-
-            if (existing) {
-              await github.rest.issues.updateComment({
-                owner: context.repo.owner,
-                repo: context.repo.repo,
-                comment_id: existing.id,
-                body: body,
-              });
-              core.info(`Updated existing comment ${existing.id}`);
+
+            // ── Token source detection ────────────────────────────
+            // The github-token input already resolved via || :
+            //   tier 2 (app token) when mint succeeded, else GITHUB_TOKEN.
+            // For fork PRs GITHUB_TOKEN lacks write access → early exit.
+            const mintOutcome = '${{ steps.mint-token.outcome }}';
+            const appToken = '${{ steps.mint-token.outputs.token }}';
+            let tokenSource;
+
+            if (mintOutcome === 'success' && appToken) {
+              tokenSource = 'validation_app';
             } else {
-              const created = await github.rest.issues.createComment({
-                owner: context.repo.owner,
-                repo: context.repo.repo,
-                issue_number: prNumber,
-                body: body,
-              });
-              core.info(`Created comment ${created.data.id}`);
+              if (mintOutcome === 'failure') {
+                core.info('Validation app token minting failed — falling back');
+              } else {
+                core.info('Validation app not configured — falling back');
+              }
+              const pr = context.payload.pull_request;
+              const isForkPR = pr.head.repo.full_name !== pr.base.repo.full_name;
+              if (isForkPR) {
+                core.info('Fork PR — GITHUB_TOKEN write access restricted');
+                const notice = [
+                  '',
+                  '> **Note**: No write permissions available (expected for fork PRs',
+                  '> without validation app) — PR comment and commit status skipped.',
+                  '> Showing findings in workflow summary only.',
+                ].join('\n');
+                fs.appendFileSync(process.env.GITHUB_STEP_SUMMARY, notice);
+                return;
+              }
+              tokenSource = 'github_token';
             }
 
-      # ── Step 13: Set commit status ─────────────────────────────────
-      - name: Set commit status
-        if: always() && steps.orchestrator.outcome == 'success'
-        continue-on-error: true
-        uses: actions/github-script@v8
-        with:
-          script: |
-            const fs = require('fs');
+            core.info(`Token source: ${tokenSource}`);
+            const owner = context.repo.owner;
+            const repo = context.repo.repo;
+
+            // ── PR comment (create-or-update) ─────────────────────
+            const commentPath = 'validation-output/pr-comment.md';
+            if (fs.existsSync(commentPath)) {
+              try {
+                const body = fs.readFileSync(commentPath, 'utf8');
+                const marker = '';
+                const prNumber = context.payload.pull_request.number;
+
+                const comments = await github.paginate(
+                  github.rest.issues.listComments,
+                  { owner, repo, issue_number: prNumber }
+                );
+                const existing = comments.find(c => c.body && c.body.includes(marker));
+
+                if (existing) {
+                  await github.rest.issues.updateComment({
+                    owner, repo, comment_id: existing.id, body,
+                  });
+                  core.info(`Updated existing comment ${existing.id}`);
+                } else {
+                  const created = await github.rest.issues.createComment({
+                    owner, repo, issue_number: prNumber, body,
+                  });
+                  core.info(`Created comment ${created.data.id}`);
+                }
+              } catch (e) {
+                core.warning(`PR comment failed: ${e.message}`);
+              }
+            }
+
+            // ── Commit status ─────────────────────────────────────
             const statusPath = 'validation-output/commit-status.json';
-            if (!fs.existsSync(statusPath)) return;
-            const payload = JSON.parse(fs.readFileSync(statusPath, 'utf8'));
-            const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
-
-            await github.rest.repos.createCommitStatus({
-              owner: context.repo.owner,
-              repo: context.repo.repo,
-              sha: sha,
-              state: payload.state,
-              description: payload.description,
-              context: payload.context,
-              target_url: payload.target_url,
-            });
-            core.info(`Commit status: ${payload.state}`);
+            if (fs.existsSync(statusPath)) {
+              try {
+                const payload = JSON.parse(fs.readFileSync(statusPath, 'utf8'));
+                const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
+                await github.rest.repos.createCommitStatus({
+                  owner, repo, sha,
+                  state: payload.state,
+                  description: payload.description,
+                  context: payload.context,
+                  target_url: payload.target_url,
+                });
+                core.info(`Commit status: ${payload.state}`);
+              } catch (e) {
+                core.warning(`Commit status failed: ${e.message}`);
+              }
+            }
 
       # ── Step 14: Upload diagnostics ────────────────────────────────
       - name: Upload diagnostics

From 0c801201923f8b49c171fedc30d3dcbcb9234b35 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sat, 28 Mar 2026 22:43:40 +0100
Subject: [PATCH 025/157] feat(validation): add bundling pipeline as workflow
 step
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Spectral resolves external $ref natively — no pre-bundling needed for
validation. Bundling is now an output/artifact step in the workflow that
produces standalone specs via Redocly CLI for reviewer download and
release automation handoff.

Changes:
- Spectral adapter: downgrade findings from external files (e.g.
  code/common/CAMARA_common.yaml) to hint level
- Workflow: add Redocly bundle step + artifact upload after validation
- Add @redocly/cli to package.json dependencies
- Update orchestrator bundling status to reflect workflow-step design
---
 .github/workflows/validation.yml          |   38 +-
 validation/engines/spectral_adapter.py    |   14 +-
 validation/orchestrator.py                |    7 +-
 validation/package-lock.json              | 2112 ++++++++++++++++++++-
 validation/package.json                   |    1 +
 validation/tests/test_orchestrator.py     |    4 +-
 validation/tests/test_spectral_adapter.py |   45 +
 7 files changed, 2190 insertions(+), 31 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 7a8fb80a..29cf78e9 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -336,7 +336,43 @@ jobs:
           if-no-files-found: ignore
           retention-days: 30
 
-      # ── Step 15: Check result ──────────────────────────────────────
+      # ── Step 15: Bundle API specs ─────────────────────────────────
+      #
+      # Produce standalone bundled specs for artifact download and
+      # release automation handoff (DEC-014).  Spectral resolves $ref
+      # natively (DEC-021), so this is an output step, not validation.
+      - name: Bundle API specs
+        if: always() && steps.orchestrator.outcome == 'success'
+        run: |
+          mkdir -p validation-output/bundled
+          BUNDLED=0
+          for spec in code/API_definitions/*.yaml; do
+            [ -f "$spec" ] || continue
+            name=$(basename "$spec")
+            if redocly bundle "$spec" -o "validation-output/bundled/$name" 2>/dev/null; then
+              if ! diff -q "$spec" "validation-output/bundled/$name" > /dev/null 2>&1; then
+                BUNDLED=$((BUNDLED + 1))
+                echo "Bundled: $name"
+              else
+                rm "validation-output/bundled/$name"
+              fi
+            else
+              echo "::warning::Bundling failed for $name (likely unresolved refs)"
+            fi
+          done
+          echo "Bundled $BUNDLED spec(s)"
+
+      # ── Step 16: Upload bundled specs ──────────────────────────────
+      - name: Upload bundled specs
+        if: always() && steps.orchestrator.outcome == 'success'
+        uses: actions/upload-artifact@v6
+        with:
+          name: validation-bundled-specs
+          path: validation-output/bundled/
+          if-no-files-found: ignore
+          retention-days: 90
+
+      # ── Step 17: Check result ──────────────────────────────────────
       - name: Check result
         if: always() && steps.orchestrator.outcome == 'success'
         run: |
diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index 026602a5..0e3b52c0 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -166,22 +166,34 @@ def normalize_finding(raw: dict, repo_root: Optional[str] = None) -> dict:
     - ``raw["range"]["start"]["line"]`` is 0-indexed; add 1 for the framework.
     - ``raw["range"]["start"]["character"]`` is 0-indexed; add 1.
 
+    Findings on external files (e.g. ``code/common/CAMARA_common.yaml``)
+    that Spectral followed via ``$ref`` are downgraded to ``hint`` level
+    since they are not directly actionable by the API developer.
+
     Args:
         raw: Single finding dict from Spectral JSON output.
         repo_root: Absolute path to the repository root.  When provided,
             absolute ``source`` paths are normalised to repo-relative.
     """
     source = _normalize_path(raw.get("source", ""), repo_root)
+
+    # Findings from external files that Spectral followed via $ref
+    # (e.g. code/common/CAMARA_common.yaml) are downgraded to hint —
+    # they are not directly actionable by the API developer.
+    from_external = bool(source and "API_definitions" not in source)
+
     start = raw.get("range", {}).get("start", {})
 
     line = start.get("line", 0) + 1
     character = start.get("character")
     column = (character + 1) if character is not None else None
 
+    level = "hint" if from_external else map_severity(raw.get("severity", 1))
+
     finding: dict = {
         "engine": ENGINE_NAME,
         "engine_rule": raw.get("code", "unknown"),
-        "level": map_severity(raw.get("severity", 1)),
+        "level": level,
         "message": raw.get("message", ""),
         "path": source,
         "line": line,
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index bd3c714d..9b5c364a 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -264,8 +264,11 @@ def run_engines(
             engine_statuses["gherkin"] = f"error: {exc}"
             logger.error("gherkin-lint failed: %s", exc)
 
-    # --- Bundling (placeholder for WP-06.08) ---
-    engine_statuses["bundling"] = "not yet implemented"
+    # --- Bundling ---
+    # Spectral resolves external $ref natively (DEC-021), so bundling is not
+    # a validation prerequisite.  Bundled standalone specs are produced by a
+    # separate workflow step for artifact upload and release automation handoff.
+    engine_statuses["bundling"] = "separate workflow step"
 
     return all_findings, engine_statuses
 
diff --git a/validation/package-lock.json b/validation/package-lock.json
index b598bf36..8a0adc0e 100644
--- a/validation/package-lock.json
+++ b/validation/package-lock.json
@@ -6,6 +6,7 @@
     "": {
       "name": "camara-validation-tools",
       "dependencies": {
+        "@redocly/cli": "^1.31.0",
         "@stoplight/spectral-cli": "^6.14.0",
         "gherkin-lint": "^4.2.4"
       }
@@ -19,6 +20,96 @@
         "@types/json-schema": "^7.0.11"
       }
     },
+    "node_modules/@babel/code-frame": {
+      "version": "7.29.0",
+      "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz",
+      "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==",
+      "license": "MIT",
+      "dependencies": {
+        "@babel/helper-validator-identifier": "^7.28.5",
+        "js-tokens": "^4.0.0",
+        "picocolors": "^1.1.1"
+      },
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/helper-validator-identifier": {
+      "version": "7.28.5",
+      "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+      "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@babel/runtime": {
+      "version": "7.29.2",
+      "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.29.2.tgz",
+      "integrity": "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6.9.0"
+      }
+    },
+    "node_modules/@emotion/is-prop-valid": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.4.0.tgz",
+      "integrity": "sha512-QgD4fyscGcbbKwJmqNvUMSE02OsHUa+lAWKdEUIJKgqe5IwRSKd7+KhibEWdaKwgjLj0DRSHA9biAIqGBk05lw==",
+      "license": "MIT",
+      "dependencies": {
+        "@emotion/memoize": "^0.9.0"
+      }
+    },
+    "node_modules/@emotion/memoize": {
+      "version": "0.9.0",
+      "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz",
+      "integrity": "sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==",
+      "license": "MIT"
+    },
+    "node_modules/@emotion/unitless": {
+      "version": "0.10.0",
+      "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz",
+      "integrity": "sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==",
+      "license": "MIT"
+    },
+    "node_modules/@exodus/schemasafe": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@exodus/schemasafe/-/schemasafe-1.3.0.tgz",
+      "integrity": "sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==",
+      "license": "MIT"
+    },
+    "node_modules/@faker-js/faker": {
+      "version": "7.6.0",
+      "resolved": "https://registry.npmjs.org/@faker-js/faker/-/faker-7.6.0.tgz",
+      "integrity": "sha512-XK6BTq1NDMo9Xqw/YkYyGjSsg44fbNwYRx7QK2CuoQgyy+f1rrTDHoExVM5PsyXCtfl2vs2vVJ0MN0yN6LppRw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=14.0.0",
+        "npm": ">=6.0.0"
+      }
+    },
+    "node_modules/@humanwhocodes/momoa": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/@humanwhocodes/momoa/-/momoa-2.0.4.tgz",
+      "integrity": "sha512-RE815I4arJFtt+FVeU1Tgp9/Xvecacji8w/V6XtXsWWH/wz/eNkNbhb+ny/+PlVZjV0rxQpRSQKNKE3lcktHEA==",
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=10.10.0"
+      }
+    },
+    "node_modules/@jest/schemas": {
+      "version": "29.6.3",
+      "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+      "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+      "license": "MIT",
+      "dependencies": {
+        "@sinclair/typebox": "^0.27.8"
+      },
+      "engines": {
+        "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+      }
+    },
     "node_modules/@jsep-plugin/assignment": {
       "version": "1.3.0",
       "resolved": "https://registry.npmjs.org/@jsep-plugin/assignment/-/assignment-1.3.0.tgz",
@@ -90,6 +181,266 @@
         "node": ">= 8"
       }
     },
+    "node_modules/@opentelemetry/api": {
+      "version": "1.9.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
+      "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
+      "license": "Apache-2.0",
+      "peer": true,
+      "engines": {
+        "node": ">=8.0.0"
+      }
+    },
+    "node_modules/@opentelemetry/api-logs": {
+      "version": "0.53.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.53.0.tgz",
+      "integrity": "sha512-8HArjKx+RaAI8uEIgcORbZIPklyh1YLjPSBus8hjRmvLi6DeFzgOcdZ7KwPabKj8mXF8dX0hyfAyGfycz0DbFw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/api": "^1.0.0"
+      },
+      "engines": {
+        "node": ">=14"
+      }
+    },
+    "node_modules/@opentelemetry/context-async-hooks": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.26.0.tgz",
+      "integrity": "sha512-HedpXXYzzbaoutw6DFLWLDket2FwLkLpil4hGCZ1xYEIMTcivdfwEOISgdbLEWyG3HW52gTq2V9mOVJrONgiwg==",
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/core": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.26.0.tgz",
+      "integrity": "sha512-1iKxXXE8415Cdv0yjG3G6hQnB5eVEsJce3QaawX8SjDn0mAS0ZM8fAbZZJD4ajvhC15cePvosSCut404KrIIvQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/semantic-conventions": "1.27.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/exporter-trace-otlp-http": {
+      "version": "0.53.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.53.0.tgz",
+      "integrity": "sha512-m7F5ZTq+V9mKGWYpX8EnZ7NjoqAU7VemQ1E2HAG+W/u0wpY1x0OmbxAXfGKFHCspdJk8UKlwPGrpcB8nay3P8A==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/otlp-exporter-base": "0.53.0",
+        "@opentelemetry/otlp-transformer": "0.53.0",
+        "@opentelemetry/resources": "1.26.0",
+        "@opentelemetry/sdk-trace-base": "1.26.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": "^1.0.0"
+      }
+    },
+    "node_modules/@opentelemetry/otlp-exporter-base": {
+      "version": "0.53.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.53.0.tgz",
+      "integrity": "sha512-UCWPreGQEhD6FjBaeDuXhiMf6kkBODF0ZQzrk/tuQcaVDJ+dDQ/xhJp192H9yWnKxVpEjFrSSLnpqmX4VwX+eA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/otlp-transformer": "0.53.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": "^1.0.0"
+      }
+    },
+    "node_modules/@opentelemetry/otlp-transformer": {
+      "version": "0.53.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.53.0.tgz",
+      "integrity": "sha512-rM0sDA9HD8dluwuBxLetUmoqGJKSAbWenwD65KY9iZhUxdBHRLrIdrABfNDP7aiTjcgK8XFyTn5fhDz7N+W6DA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/api-logs": "0.53.0",
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/resources": "1.26.0",
+        "@opentelemetry/sdk-logs": "0.53.0",
+        "@opentelemetry/sdk-metrics": "1.26.0",
+        "@opentelemetry/sdk-trace-base": "1.26.0",
+        "protobufjs": "^7.3.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": "^1.3.0"
+      }
+    },
+    "node_modules/@opentelemetry/otlp-transformer/node_modules/long": {
+      "version": "5.3.2",
+      "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz",
+      "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==",
+      "license": "Apache-2.0"
+    },
+    "node_modules/@opentelemetry/otlp-transformer/node_modules/protobufjs": {
+      "version": "7.5.4",
+      "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz",
+      "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==",
+      "hasInstallScript": true,
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@protobufjs/aspromise": "^1.1.2",
+        "@protobufjs/base64": "^1.1.2",
+        "@protobufjs/codegen": "^2.0.4",
+        "@protobufjs/eventemitter": "^1.1.0",
+        "@protobufjs/fetch": "^1.1.0",
+        "@protobufjs/float": "^1.0.2",
+        "@protobufjs/inquire": "^1.1.0",
+        "@protobufjs/path": "^1.1.2",
+        "@protobufjs/pool": "^1.1.0",
+        "@protobufjs/utf8": "^1.1.0",
+        "@types/node": ">=13.7.0",
+        "long": "^5.0.0"
+      },
+      "engines": {
+        "node": ">=12.0.0"
+      }
+    },
+    "node_modules/@opentelemetry/propagator-b3": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.26.0.tgz",
+      "integrity": "sha512-vvVkQLQ/lGGyEy9GT8uFnI047pajSOVnZI2poJqVGD3nJ+B9sFGdlHNnQKophE3lHfnIH0pw2ubrCTjZCgIj+Q==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/propagator-jaeger": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.26.0.tgz",
+      "integrity": "sha512-DelFGkCdaxA1C/QA0Xilszfr0t4YbGd3DjxiCDPh34lfnFr+VkkrjV9S8ZTJvAzfdKERXhfOxIKBoGPJwoSz7Q==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/resources": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.26.0.tgz",
+      "integrity": "sha512-CPNYchBE7MBecCSVy0HKpUISEeJOniWqcHaAHpmasZ3j9o6V3AyBzhRc90jdmemq0HOxDr6ylhUbDhBqqPpeNw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/semantic-conventions": "1.27.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/sdk-logs": {
+      "version": "0.53.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.53.0.tgz",
+      "integrity": "sha512-dhSisnEgIj/vJZXZV6f6KcTnyLDx/VuQ6l3ejuZpMpPlh9S1qMHiZU9NMmOkVkwwHkMy3G6mEBwdP23vUZVr4g==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/api-logs": "0.53.0",
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/resources": "1.26.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.4.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/sdk-metrics": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.26.0.tgz",
+      "integrity": "sha512-0SvDXmou/JjzSDOjUmetAAvcKQW6ZrvosU0rkbDGpXvvZN+pQF6JbK/Kd4hNdK4q/22yeruqvukXEJyySTzyTQ==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/resources": "1.26.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.3.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/sdk-trace-base": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.26.0.tgz",
+      "integrity": "sha512-olWQldtvbK4v22ymrKLbIcBi9L2SpMO84sCPY54IVsJhP9fRsxJT194C/AVaAuJzLE30EdhhM1VmvVYR7az+cw==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/resources": "1.26.0",
+        "@opentelemetry/semantic-conventions": "1.27.0"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/sdk-trace-node": {
+      "version": "1.26.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.26.0.tgz",
+      "integrity": "sha512-Fj5IVKrj0yeUwlewCRwzOVcr5avTuNnMHWf7GPc1t6WaT78J6CJyF3saZ/0RkZfdeNO8IcBl/bNcWMVZBMRW8Q==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@opentelemetry/context-async-hooks": "1.26.0",
+        "@opentelemetry/core": "1.26.0",
+        "@opentelemetry/propagator-b3": "1.26.0",
+        "@opentelemetry/propagator-jaeger": "1.26.0",
+        "@opentelemetry/sdk-trace-base": "1.26.0",
+        "semver": "^7.5.2"
+      },
+      "engines": {
+        "node": ">=14"
+      },
+      "peerDependencies": {
+        "@opentelemetry/api": ">=1.0.0 <1.10.0"
+      }
+    },
+    "node_modules/@opentelemetry/semantic-conventions": {
+      "version": "1.27.0",
+      "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.27.0.tgz",
+      "integrity": "sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg==",
+      "license": "Apache-2.0",
+      "engines": {
+        "node": ">=14"
+      }
+    },
     "node_modules/@protobufjs/aspromise": {
       "version": "1.1.2",
       "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
@@ -154,6 +505,217 @@
       "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
       "license": "BSD-3-Clause"
     },
+    "node_modules/@redocly/ajv": {
+      "version": "8.11.2",
+      "resolved": "https://registry.npmjs.org/@redocly/ajv/-/ajv-8.11.2.tgz",
+      "integrity": "sha512-io1JpnwtIcvojV7QKDUSIuMN/ikdOUd1ReEnUnMKGfDVridQZ31J0MmIuqwuRjWDZfmvr+Q0MqCcfHM2gTivOg==",
+      "license": "MIT",
+      "dependencies": {
+        "fast-deep-equal": "^3.1.1",
+        "json-schema-traverse": "^1.0.0",
+        "require-from-string": "^2.0.2",
+        "uri-js-replace": "^1.0.1"
+      },
+      "funding": {
+        "type": "github",
+        "url": "https://github.com/sponsors/epoberezkin"
+      }
+    },
+    "node_modules/@redocly/cli": {
+      "version": "1.34.11",
+      "resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.11.tgz",
+      "integrity": "sha512-NTFegt2uQ0A4xewDPcTadsZYSlmEUzF7wnvRgmFI8Ftvjpq/coIdr9EQPmCF6wjrh9UBKdz2rMNckxLo/jhViA==",
+      "license": "MIT",
+      "dependencies": {
+        "@opentelemetry/api": "1.9.0",
+        "@opentelemetry/exporter-trace-otlp-http": "0.53.0",
+        "@opentelemetry/resources": "1.26.0",
+        "@opentelemetry/sdk-trace-node": "1.26.0",
+        "@opentelemetry/semantic-conventions": "1.27.0",
+        "@redocly/config": "0.22.0",
+        "@redocly/openapi-core": "1.34.11",
+        "@redocly/respect-core": "1.34.11",
+        "abort-controller": "3.0.0",
+        "chokidar": "3.5.3",
+        "colorette": "1.4.0",
+        "core-js": "3.32.1",
+        "dotenv": "16.4.7",
+        "form-data": "4.0.4",
+        "get-port-please": "3.0.1",
+        "glob": "7.2.3",
+        "handlebars": "4.7.8",
+        "mobx": "6.12.3",
+        "pluralize": "8.0.0",
+        "react": "^17.0.0 || ^18.2.0 || ^19.2.1",
+        "react-dom": "^17.0.0 || ^18.2.0 || ^19.2.1",
+        "redoc": "2.5.0",
+        "semver": "7.7.4",
+        "simple-websocket": "9.1.0",
+        "styled-components": "6.3.9",
+        "yargs": "17.0.1"
+      },
+      "bin": {
+        "openapi": "bin/cli.js",
+        "redocly": "bin/cli.js"
+      },
+      "engines": {
+        "node": ">=18.17.0",
+        "npm": ">=9.5.0"
+      }
+    },
+    "node_modules/@redocly/cli/node_modules/core-js": {
+      "version": "3.32.1",
+      "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.32.1.tgz",
+      "integrity": "sha512-lqufgNn9NLnESg5mQeYsxQP5w7wrViSj0jr/kv6ECQiByzQkrn1MKvV0L3acttpDqfQrHLwr2KCMgX5b8X+lyQ==",
+      "hasInstallScript": true,
+      "license": "MIT",
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/core-js"
+      }
+    },
+    "node_modules/@redocly/cli/node_modules/yargs": {
+      "version": "17.0.1",
+      "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz",
+      "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==",
+      "license": "MIT",
+      "dependencies": {
+        "cliui": "^7.0.2",
+        "escalade": "^3.1.1",
+        "get-caller-file": "^2.0.5",
+        "require-directory": "^2.1.1",
+        "string-width": "^4.2.0",
+        "y18n": "^5.0.5",
+        "yargs-parser": "^20.2.2"
+      },
+      "engines": {
+        "node": ">=12"
+      }
+    },
+    "node_modules/@redocly/cli/node_modules/yargs-parser": {
+      "version": "20.2.9",
+      "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
+      "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
+      "license": "ISC",
+      "engines": {
+        "node": ">=10"
+      }
+    },
+    "node_modules/@redocly/config": {
+      "version": "0.22.0",
+      "resolved": "https://registry.npmjs.org/@redocly/config/-/config-0.22.0.tgz",
+      "integrity": "sha512-gAy93Ddo01Z3bHuVdPWfCwzgfaYgMdaZPcfL7JZ7hWJoK9V0lXDbigTWkhiPFAaLWzbOJ+kbUQG1+XwIm0KRGQ==",
+      "license": "MIT"
+    },
+    "node_modules/@redocly/openapi-core": {
+      "version": "1.34.11",
+      "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.11.tgz",
+      "integrity": "sha512-V09ayfnb5GyysmvARbt+voFZAjGcf7hSYxOYxSkCc4fbH/DTfq5YWoec8cflvmHHqyIFbqvmGKmYFzqhr9zxDg==",
+      "license": "MIT",
+      "dependencies": {
+        "@redocly/ajv": "8.11.2",
+        "@redocly/config": "0.22.0",
+        "colorette": "1.4.0",
+        "https-proxy-agent": "7.0.6",
+        "js-levenshtein": "1.1.6",
+        "js-yaml": "4.1.1",
+        "minimatch": "5.1.9",
+        "pluralize": "8.0.0",
+        "yaml-ast-parser": "0.0.43"
+      },
+      "engines": {
+        "node": ">=18.17.0",
+        "npm": ">=9.5.0"
+      }
+    },
+    "node_modules/@redocly/openapi-core/node_modules/brace-expansion": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.3.tgz",
+      "integrity": "sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==",
+      "license": "MIT",
+      "dependencies": {
+        "balanced-match": "^1.0.0"
+      }
+    },
+    "node_modules/@redocly/openapi-core/node_modules/minimatch": {
+      "version": "5.1.9",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.9.tgz",
+      "integrity": "sha512-7o1wEA2RyMP7Iu7GNba9vc0RWWGACJOCZBJX2GJWip0ikV+wcOsgVuY9uE8CPiyQhkGFSlhuSkZPavN7u1c2Fw==",
+      "license": "ISC",
+      "dependencies": {
+        "brace-expansion": "^2.0.1"
+      },
+      "engines": {
+        "node": ">=10"
+      }
+    },
+    "node_modules/@redocly/respect-core": {
+      "version": "1.34.11",
+      "resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.11.tgz",
+      "integrity": "sha512-0USZA1iRkyCZN/o5ZmgJ9vFVfWe5dE6m2IabF4g7s8EWJErbRFc8LoJ20hfoR75yGUfde+0vTPNOG5T9dlqq1g==",
+      "license": "MIT",
+      "dependencies": {
+        "@faker-js/faker": "7.6.0",
+        "@redocly/ajv": "8.11.2",
+        "@redocly/openapi-core": "1.34.11",
+        "better-ajv-errors": "1.2.0",
+        "colorette": "2.0.20",
+        "concat-stream": "2.0.0",
+        "cookie": "0.7.2",
+        "dotenv": "16.4.7",
+        "form-data": "4.0.4",
+        "jest-diff": "29.7.0",
+        "jest-matcher-utils": "29.7.0",
+        "js-yaml": "4.1.0",
+        "json-pointer": "0.6.2",
+        "jsonpath-plus": "10.3.0",
+        "open": "10.1.0",
+        "openapi-sampler": "1.7.0",
+        "outdent": "0.8.0",
+        "set-cookie-parser": "2.7.1",
+        "undici": "6.24.1"
+      },
+      "engines": {
+        "node": ">=18.17.0",
+        "npm": ">=9.5.0"
+      }
+    },
+    "node_modules/@redocly/respect-core/node_modules/colorette": {
+      "version": "2.0.20",
+      "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
+      "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==",
+      "license": "MIT"
+    },
+    "node_modules/@redocly/respect-core/node_modules/js-yaml": {
+      "version": "4.1.0",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+      "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+      "license": "MIT",
+      "dependencies": {
+        "argparse": "^2.0.1"
+      },
+      "bin": {
+        "js-yaml": "bin/js-yaml.js"
+      }
+    },
+    "node_modules/@redocly/respect-core/node_modules/jsonpath-plus": {
+      "version": "10.3.0",
+      "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-10.3.0.tgz",
+      "integrity": "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA==",
+      "license": "MIT",
+      "dependencies": {
+        "@jsep-plugin/assignment": "^1.3.0",
+        "@jsep-plugin/regex": "^1.0.4",
+        "jsep": "^1.4.0"
+      },
+      "bin": {
+        "jsonpath": "bin/jsonpath-cli.js",
+        "jsonpath-plus": "bin/jsonpath-cli.js"
+      },
+      "engines": {
+        "node": ">=18.0.0"
+      }
+    },
     "node_modules/@rollup/plugin-commonjs": {
       "version": "22.0.2",
       "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-22.0.2.tgz",
@@ -198,6 +760,12 @@
       "integrity": "sha512-1fMXF3YP4pZZVozF8j/ZLfvnR8NSIljt56UhbZ5PeeDmmGHpgpdwQt7ITlGvYaQukCvuBRMLEiKiYC+oeIg4cg==",
       "license": "MIT"
     },
+    "node_modules/@sinclair/typebox": {
+      "version": "0.27.10",
+      "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.10.tgz",
+      "integrity": "sha512-MTBk/3jGLNB2tVxv6uLlFh1iu64iYOQ2PbdOSK3NW8JZsmlaOh2q6sdtKowBhfw8QFLmYNzTW4/oK4uATIi6ZA==",
+      "license": "MIT"
+    },
     "node_modules/@stoplight/better-ajv-errors": {
       "version": "1.0.3",
       "resolved": "https://registry.npmjs.org/@stoplight/better-ajv-errors/-/better-ajv-errors-1.0.3.tgz",
@@ -684,6 +1252,19 @@
       "integrity": "sha512-kRz0VEkJqWLf1LLVN4pT1cg1Z9wAuvI6L97V3m2f5B76Tg8d413ddvLBPTEHAZJlnn4XSvu0FkZtViCQGVyrXQ==",
       "license": "MIT"
     },
+    "node_modules/@types/stylis": {
+      "version": "4.2.7",
+      "resolved": "https://registry.npmjs.org/@types/stylis/-/stylis-4.2.7.tgz",
+      "integrity": "sha512-VgDNokpBoKF+wrdvhAAfS55OMQpL6QRglwTwNC3kIgBrzZxA4WsFj+2eLfEA/uMUDzBcEhYmjSbwQakn/i3ajA==",
+      "license": "MIT"
+    },
+    "node_modules/@types/trusted-types": {
+      "version": "2.0.7",
+      "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
+      "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
+      "license": "MIT",
+      "optional": true
+    },
     "node_modules/@types/urijs": {
       "version": "1.19.26",
       "resolved": "https://registry.npmjs.org/@types/urijs/-/urijs-1.19.26.tgz",
@@ -708,6 +1289,15 @@
         "node": ">=6.5"
       }
     },
+    "node_modules/agent-base": {
+      "version": "7.1.4",
+      "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
+      "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 14"
+      }
+    },
     "node_modules/ajv": {
       "version": "8.18.0",
       "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
@@ -789,6 +1379,25 @@
         "url": "https://github.com/chalk/ansi-styles?sponsor=1"
       }
     },
+    "node_modules/anymatch": {
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+      "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+      "license": "ISC",
+      "dependencies": {
+        "normalize-path": "^3.0.0",
+        "picomatch": "^2.0.4"
+      },
+      "engines": {
+        "node": ">= 8"
+      }
+    },
+    "node_modules/argparse": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+      "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+      "license": "Python-2.0"
+    },
     "node_modules/array-buffer-byte-length": {
       "version": "1.0.2",
       "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz",
@@ -865,6 +1474,12 @@
         "node": ">= 0.4"
       }
     },
+    "node_modules/asynckit": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+      "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+      "license": "MIT"
+    },
     "node_modules/available-typed-arrays": {
       "version": "1.0.7",
       "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz",
@@ -886,6 +1501,37 @@
       "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
       "license": "MIT"
     },
+    "node_modules/better-ajv-errors": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/better-ajv-errors/-/better-ajv-errors-1.2.0.tgz",
+      "integrity": "sha512-UW+IsFycygIo7bclP9h5ugkNH8EjCSgqyFB/yQ4Hqqa1OEYDtb0uFIkYE0b6+CjkgJYVM5UKI/pJPxjYe9EZlA==",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@babel/code-frame": "^7.16.0",
+        "@humanwhocodes/momoa": "^2.0.2",
+        "chalk": "^4.1.2",
+        "jsonpointer": "^5.0.0",
+        "leven": "^3.1.0 < 4"
+      },
+      "engines": {
+        "node": ">= 12.13.0"
+      },
+      "peerDependencies": {
+        "ajv": "4.11.8 - 8"
+      }
+    },
+    "node_modules/binary-extensions": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
+      "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/brace-expansion": {
       "version": "1.1.12",
       "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
@@ -920,6 +1566,21 @@
       "integrity": "sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==",
       "license": "MIT"
     },
+    "node_modules/bundle-name": {
+      "version": "4.1.0",
+      "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz",
+      "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==",
+      "license": "MIT",
+      "dependencies": {
+        "run-applescript": "^7.0.0"
+      },
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/call-bind": {
       "version": "1.0.8",
       "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz",
@@ -967,6 +1628,21 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/call-me-maybe": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz",
+      "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==",
+      "license": "MIT"
+    },
+    "node_modules/camelize": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/camelize/-/camelize-1.0.1.tgz",
+      "integrity": "sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ==",
+      "license": "MIT",
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
     "node_modules/chalk": {
       "version": "4.1.2",
       "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
@@ -983,6 +1659,39 @@
         "url": "https://github.com/chalk/chalk?sponsor=1"
       }
     },
+    "node_modules/chokidar": {
+      "version": "3.5.3",
+      "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
+      "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
+      "funding": [
+        {
+          "type": "individual",
+          "url": "https://paulmillr.com/funding/"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "anymatch": "~3.1.2",
+        "braces": "~3.0.2",
+        "glob-parent": "~5.1.2",
+        "is-binary-path": "~2.1.0",
+        "is-glob": "~4.0.1",
+        "normalize-path": "~3.0.0",
+        "readdirp": "~3.6.0"
+      },
+      "engines": {
+        "node": ">= 8.10.0"
+      },
+      "optionalDependencies": {
+        "fsevents": "~2.3.2"
+      }
+    },
+    "node_modules/classnames": {
+      "version": "2.5.1",
+      "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz",
+      "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==",
+      "license": "MIT"
+    },
     "node_modules/cliui": {
       "version": "7.0.4",
       "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz",
@@ -994,6 +1703,15 @@
         "wrap-ansi": "^7.0.0"
       }
     },
+    "node_modules/clsx": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz",
+      "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
     "node_modules/color-convert": {
       "version": "2.0.1",
       "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
@@ -1012,6 +1730,24 @@
       "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
       "license": "MIT"
     },
+    "node_modules/colorette": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.4.0.tgz",
+      "integrity": "sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g==",
+      "license": "MIT"
+    },
+    "node_modules/combined-stream": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+      "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+      "license": "MIT",
+      "dependencies": {
+        "delayed-stream": "~1.0.0"
+      },
+      "engines": {
+        "node": ">= 0.8"
+      }
+    },
     "node_modules/commander": {
       "version": "11.0.0",
       "resolved": "https://registry.npmjs.org/commander/-/commander-11.0.0.tgz",
@@ -1033,17 +1769,68 @@
       "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
       "license": "MIT"
     },
+    "node_modules/concat-stream": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz",
+      "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==",
+      "engines": [
+        "node >= 6.0"
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "buffer-from": "^1.0.0",
+        "inherits": "^2.0.3",
+        "readable-stream": "^3.0.2",
+        "typedarray": "^0.0.6"
+      }
+    },
+    "node_modules/cookie": {
+      "version": "0.7.2",
+      "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+      "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.6"
+      }
+    },
     "node_modules/core-js": {
       "version": "3.33.1",
       "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.33.1.tgz",
       "integrity": "sha512-qVSq3s+d4+GsqN0teRCJtM6tdEEXyWxjzbhVrCHmBS5ZTM0FS2MOS0D13dUXAWDUN6a+lHI/N1hF9Ytz6iLl9Q==",
       "hasInstallScript": true,
       "license": "MIT",
+      "peer": true,
       "funding": {
         "type": "opencollective",
         "url": "https://opencollective.com/core-js"
       }
     },
+    "node_modules/css-color-keywords": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/css-color-keywords/-/css-color-keywords-1.0.0.tgz",
+      "integrity": "sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==",
+      "license": "ISC",
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/css-to-react-native": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/css-to-react-native/-/css-to-react-native-3.2.0.tgz",
+      "integrity": "sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ==",
+      "license": "MIT",
+      "dependencies": {
+        "camelize": "^1.0.0",
+        "css-color-keywords": "^1.0.0",
+        "postcss-value-parser": "^4.0.2"
+      }
+    },
+    "node_modules/csstype": {
+      "version": "3.2.3",
+      "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+      "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
+      "license": "MIT"
+    },
     "node_modules/cucumber-messages": {
       "version": "8.0.0",
       "resolved": "https://registry.npmjs.org/cucumber-messages/-/cucumber-messages-8.0.0.tgz",
@@ -1113,6 +1900,56 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/debug": {
+      "version": "4.4.3",
+      "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+      "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+      "license": "MIT",
+      "dependencies": {
+        "ms": "^2.1.3"
+      },
+      "engines": {
+        "node": ">=6.0"
+      },
+      "peerDependenciesMeta": {
+        "supports-color": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/decko": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/decko/-/decko-1.2.0.tgz",
+      "integrity": "sha512-m8FnyHXV1QX+S1cl+KPFDIl6NMkxtKsy6+U/aYyjrOqWMuwAwYWu7ePqrsUHtDR5Y8Yk2pi/KIDSgF+vT4cPOQ=="
+    },
+    "node_modules/default-browser": {
+      "version": "5.5.0",
+      "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.5.0.tgz",
+      "integrity": "sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw==",
+      "license": "MIT",
+      "dependencies": {
+        "bundle-name": "^4.1.0",
+        "default-browser-id": "^5.0.0"
+      },
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/default-browser-id": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.1.tgz",
+      "integrity": "sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/define-data-property": {
       "version": "1.1.4",
       "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
@@ -1130,6 +1967,18 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/define-lazy-prop": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz",
+      "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=12"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/define-properties": {
       "version": "1.2.1",
       "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
@@ -1147,6 +1996,15 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/delayed-stream": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+      "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.4.0"
+      }
+    },
     "node_modules/dependency-graph": {
       "version": "0.11.0",
       "resolved": "https://registry.npmjs.org/dependency-graph/-/dependency-graph-0.11.0.tgz",
@@ -1156,6 +2014,36 @@
         "node": ">= 0.6.0"
       }
     },
+    "node_modules/diff-sequences": {
+      "version": "29.6.3",
+      "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
+      "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==",
+      "license": "MIT",
+      "engines": {
+        "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+      }
+    },
+    "node_modules/dompurify": {
+      "version": "3.3.3",
+      "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.3.tgz",
+      "integrity": "sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==",
+      "license": "(MPL-2.0 OR Apache-2.0)",
+      "optionalDependencies": {
+        "@types/trusted-types": "^2.0.7"
+      }
+    },
+    "node_modules/dotenv": {
+      "version": "16.4.7",
+      "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
+      "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==",
+      "license": "BSD-2-Clause",
+      "engines": {
+        "node": ">=12"
+      },
+      "funding": {
+        "url": "https://dotenvx.com"
+      }
+    },
     "node_modules/dunder-proto": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
@@ -1328,6 +2216,12 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/es6-promise": {
+      "version": "3.3.1",
+      "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.3.1.tgz",
+      "integrity": "sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==",
+      "license": "MIT"
+    },
     "node_modules/escalade": {
       "version": "3.2.0",
       "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
@@ -1352,6 +2246,12 @@
         "node": ">=6"
       }
     },
+    "node_modules/eventemitter3": {
+      "version": "5.0.4",
+      "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz",
+      "integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==",
+      "license": "MIT"
+    },
     "node_modules/fast-deep-equal": {
       "version": "3.1.3",
       "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
@@ -1380,6 +2280,12 @@
       "integrity": "sha512-Ue0LwpDYErFbmNnZSF0UH6eImUwDmogUO1jyE+JbN2gsQz/jICm1Ve7t9QT0rNSsfJt+Hs4/S3GnsDVjL4HVrw==",
       "license": "MIT"
     },
+    "node_modules/fast-safe-stringify": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz",
+      "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==",
+      "license": "MIT"
+    },
     "node_modules/fast-uri": {
       "version": "3.1.0",
       "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
@@ -1396,6 +2302,41 @@
       ],
       "license": "BSD-3-Clause"
     },
+    "node_modules/fast-xml-builder": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/fast-xml-builder/-/fast-xml-builder-1.1.4.tgz",
+      "integrity": "sha512-f2jhpN4Eccy0/Uz9csxh3Nu6q4ErKxf0XIsasomfOihuSUa3/xw6w8dnOtCDgEItQFJG8KyXPzQXzcODDrrbOg==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/NaturalIntelligence"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "path-expression-matcher": "^1.1.3"
+      }
+    },
+    "node_modules/fast-xml-parser": {
+      "version": "5.5.9",
+      "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-5.5.9.tgz",
+      "integrity": "sha512-jldvxr1MC6rtiZKgrFnDSvT8xuH+eJqxqOBThUVjYrxssYTo1avZLGql5l0a0BAERR01CadYzZ83kVEkbyDg+g==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/NaturalIntelligence"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "fast-xml-builder": "^1.1.4",
+        "path-expression-matcher": "^1.2.0",
+        "strnum": "^2.2.2"
+      },
+      "bin": {
+        "fxparser": "src/cli/cli.js"
+      }
+    },
     "node_modules/fastq": {
       "version": "1.20.1",
       "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz",
@@ -1432,6 +2373,28 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/foreach": {
+      "version": "2.0.6",
+      "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.6.tgz",
+      "integrity": "sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg==",
+      "license": "MIT"
+    },
+    "node_modules/form-data": {
+      "version": "4.0.4",
+      "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
+      "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
+      "license": "MIT",
+      "dependencies": {
+        "asynckit": "^0.4.0",
+        "combined-stream": "^1.0.8",
+        "es-set-tostringtag": "^2.1.0",
+        "hasown": "^2.0.2",
+        "mime-types": "^2.1.12"
+      },
+      "engines": {
+        "node": ">= 6"
+      }
+    },
     "node_modules/fs-extra": {
       "version": "10.1.0",
       "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
@@ -1546,6 +2509,12 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/get-port-please": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/get-port-please/-/get-port-please-3.0.1.tgz",
+      "integrity": "sha512-R5pcVO8Z1+pVDu8Ml3xaJCEkBiiy1VQN9za0YqH8GIi1nIqD4IzQhzY6dDzMRtdS1lyiGlucRzm8IN8wtLIXng==",
+      "license": "MIT"
+    },
     "node_modules/get-proto": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
@@ -1725,6 +2694,27 @@
       "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
       "license": "ISC"
     },
+    "node_modules/handlebars": {
+      "version": "4.7.8",
+      "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
+      "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==",
+      "license": "MIT",
+      "dependencies": {
+        "minimist": "^1.2.5",
+        "neo-async": "^2.6.2",
+        "source-map": "^0.6.1",
+        "wordwrap": "^1.0.0"
+      },
+      "bin": {
+        "handlebars": "bin/handlebars"
+      },
+      "engines": {
+        "node": ">=0.4.7"
+      },
+      "optionalDependencies": {
+        "uglify-js": "^3.1.4"
+      }
+    },
     "node_modules/has-bigints": {
       "version": "1.1.0",
       "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz",
@@ -1821,6 +2811,25 @@
         "node": ">=14"
       }
     },
+    "node_modules/http2-client": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/http2-client/-/http2-client-1.3.5.tgz",
+      "integrity": "sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==",
+      "license": "MIT"
+    },
+    "node_modules/https-proxy-agent": {
+      "version": "7.0.6",
+      "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
+      "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
+      "license": "MIT",
+      "dependencies": {
+        "agent-base": "^7.1.2",
+        "debug": "4"
+      },
+      "engines": {
+        "node": ">= 14"
+      }
+    },
     "node_modules/immer": {
       "version": "9.0.21",
       "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz",
@@ -1913,6 +2922,18 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/is-binary-path": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+      "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+      "license": "MIT",
+      "dependencies": {
+        "binary-extensions": "^2.0.0"
+      },
+      "engines": {
+        "node": ">=8"
+      }
+    },
     "node_modules/is-boolean-object": {
       "version": "1.2.2",
       "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz",
@@ -1989,6 +3010,21 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/is-docker": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz",
+      "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==",
+      "license": "MIT",
+      "bin": {
+        "is-docker": "cli.js"
+      },
+      "engines": {
+        "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/is-extglob": {
       "version": "2.1.1",
       "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
@@ -2053,6 +3089,24 @@
         "node": ">=0.10.0"
       }
     },
+    "node_modules/is-inside-container": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz",
+      "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==",
+      "license": "MIT",
+      "dependencies": {
+        "is-docker": "^3.0.0"
+      },
+      "bin": {
+        "is-inside-container": "cli.js"
+      },
+      "engines": {
+        "node": ">=14.16"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/is-map": {
       "version": "2.0.3",
       "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz",
@@ -2231,28 +3285,109 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
-    "node_modules/is-weakset": {
-      "version": "2.0.4",
-      "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz",
-      "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==",
+    "node_modules/is-weakset": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz",
+      "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==",
+      "license": "MIT",
+      "dependencies": {
+        "call-bound": "^1.0.3",
+        "get-intrinsic": "^1.2.6"
+      },
+      "engines": {
+        "node": ">= 0.4"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/is-wsl": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.1.tgz",
+      "integrity": "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==",
+      "license": "MIT",
+      "dependencies": {
+        "is-inside-container": "^1.0.0"
+      },
+      "engines": {
+        "node": ">=16"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/isarray": {
+      "version": "2.0.5",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+      "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
+      "license": "MIT"
+    },
+    "node_modules/jest-diff": {
+      "version": "29.7.0",
+      "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz",
+      "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==",
+      "license": "MIT",
+      "dependencies": {
+        "chalk": "^4.0.0",
+        "diff-sequences": "^29.6.3",
+        "jest-get-type": "^29.6.3",
+        "pretty-format": "^29.7.0"
+      },
+      "engines": {
+        "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+      }
+    },
+    "node_modules/jest-get-type": {
+      "version": "29.6.3",
+      "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz",
+      "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==",
+      "license": "MIT",
+      "engines": {
+        "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+      }
+    },
+    "node_modules/jest-matcher-utils": {
+      "version": "29.7.0",
+      "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz",
+      "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==",
       "license": "MIT",
       "dependencies": {
-        "call-bound": "^1.0.3",
-        "get-intrinsic": "^1.2.6"
+        "chalk": "^4.0.0",
+        "jest-diff": "^29.7.0",
+        "jest-get-type": "^29.6.3",
+        "pretty-format": "^29.7.0"
       },
       "engines": {
-        "node": ">= 0.4"
-      },
-      "funding": {
-        "url": "https://github.com/sponsors/ljharb"
+        "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
       }
     },
-    "node_modules/isarray": {
-      "version": "2.0.5",
-      "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
-      "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
+    "node_modules/js-levenshtein": {
+      "version": "1.1.6",
+      "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz",
+      "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/js-tokens": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+      "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
       "license": "MIT"
     },
+    "node_modules/js-yaml": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+      "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
+      "license": "MIT",
+      "dependencies": {
+        "argparse": "^2.0.1"
+      },
+      "bin": {
+        "js-yaml": "bin/js-yaml.js"
+      }
+    },
     "node_modules/jsep": {
       "version": "1.4.0",
       "resolved": "https://registry.npmjs.org/jsep/-/jsep-1.4.0.tgz",
@@ -2263,6 +3398,15 @@
         "node": ">= 10.16.0"
       }
     },
+    "node_modules/json-pointer": {
+      "version": "0.6.2",
+      "resolved": "https://registry.npmjs.org/json-pointer/-/json-pointer-0.6.2.tgz",
+      "integrity": "sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw==",
+      "license": "MIT",
+      "dependencies": {
+        "foreach": "^2.0.4"
+      }
+    },
     "node_modules/json-schema-traverse": {
       "version": "1.0.0",
       "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
@@ -2341,6 +3485,24 @@
       "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
       "license": "Apache-2.0"
     },
+    "node_modules/loose-envify": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+      "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+      "license": "MIT",
+      "dependencies": {
+        "js-tokens": "^3.0.0 || ^4.0.0"
+      },
+      "bin": {
+        "loose-envify": "cli.js"
+      }
+    },
+    "node_modules/lunr": {
+      "version": "2.3.9",
+      "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz",
+      "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==",
+      "license": "MIT"
+    },
     "node_modules/magic-string": {
       "version": "0.25.9",
       "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.25.9.tgz",
@@ -2350,12 +3512,30 @@
         "sourcemap-codec": "^1.4.8"
       }
     },
+    "node_modules/mark.js": {
+      "version": "8.11.1",
+      "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz",
+      "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==",
+      "license": "MIT"
+    },
     "node_modules/markdown-escape": {
       "version": "2.0.0",
       "resolved": "https://registry.npmjs.org/markdown-escape/-/markdown-escape-2.0.0.tgz",
       "integrity": "sha512-Trz4v0+XWlwy68LJIyw3bLbsJiC8XAbRCKF9DbEtZjyndKOGVx6n+wNB0VfoRmY2LKboQLeniap3xrb6LGSJ8A==",
       "license": "MIT"
     },
+    "node_modules/marked": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz",
+      "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==",
+      "license": "MIT",
+      "bin": {
+        "marked": "bin/marked.js"
+      },
+      "engines": {
+        "node": ">= 12"
+      }
+    },
     "node_modules/math-intrinsics": {
       "version": "1.1.0",
       "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
@@ -2387,6 +3567,27 @@
         "node": ">=8.6"
       }
     },
+    "node_modules/mime-db": {
+      "version": "1.52.0",
+      "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+      "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.6"
+      }
+    },
+    "node_modules/mime-types": {
+      "version": "2.1.35",
+      "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+      "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+      "license": "MIT",
+      "dependencies": {
+        "mime-db": "1.52.0"
+      },
+      "engines": {
+        "node": ">= 0.6"
+      }
+    },
     "node_modules/minimatch": {
       "version": "3.1.2",
       "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
@@ -2399,6 +3600,106 @@
         "node": "*"
       }
     },
+    "node_modules/minimist": {
+      "version": "1.2.8",
+      "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
+      "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
+      "license": "MIT",
+      "funding": {
+        "url": "https://github.com/sponsors/ljharb"
+      }
+    },
+    "node_modules/mobx": {
+      "version": "6.12.3",
+      "resolved": "https://registry.npmjs.org/mobx/-/mobx-6.12.3.tgz",
+      "integrity": "sha512-c8NKkO4R2lShkSXZ2Ongj1ycjugjzFFo/UswHBnS62y07DMcTc9Rvo03/3nRyszIvwPNljlkd4S828zIBv/piw==",
+      "license": "MIT",
+      "peer": true,
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/mobx"
+      }
+    },
+    "node_modules/mobx-react": {
+      "version": "9.2.1",
+      "resolved": "https://registry.npmjs.org/mobx-react/-/mobx-react-9.2.1.tgz",
+      "integrity": "sha512-WJNNm0FB2n0Z0u+jS1QHmmWyV8l2WiAj8V8I/96kbUEN2YbYCoKW+hbbqKKRUBqElu0llxM7nWKehvRIkhBVJw==",
+      "license": "MIT",
+      "dependencies": {
+        "mobx-react-lite": "^4.1.1"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/mobx"
+      },
+      "peerDependencies": {
+        "mobx": "^6.9.0",
+        "react": "^16.8.0 || ^17 || ^18 || ^19"
+      },
+      "peerDependenciesMeta": {
+        "react-dom": {
+          "optional": true
+        },
+        "react-native": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/mobx-react-lite": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/mobx-react-lite/-/mobx-react-lite-4.1.1.tgz",
+      "integrity": "sha512-iUxiMpsvNraCKXU+yPotsOncNNmyeS2B5DKL+TL6Tar/xm+wwNJAubJmtRSeAoYawdZqwv8Z/+5nPRHeQxTiXg==",
+      "license": "MIT",
+      "dependencies": {
+        "use-sync-external-store": "^1.4.0"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/mobx"
+      },
+      "peerDependencies": {
+        "mobx": "^6.9.0",
+        "react": "^16.8.0 || ^17 || ^18 || ^19"
+      },
+      "peerDependenciesMeta": {
+        "react-dom": {
+          "optional": true
+        },
+        "react-native": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/ms": {
+      "version": "2.1.3",
+      "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+      "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+      "license": "MIT"
+    },
+    "node_modules/nanoid": {
+      "version": "3.3.11",
+      "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
+      "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "MIT",
+      "bin": {
+        "nanoid": "bin/nanoid.cjs"
+      },
+      "engines": {
+        "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+      }
+    },
+    "node_modules/neo-async": {
+      "version": "2.6.2",
+      "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+      "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
+      "license": "MIT"
+    },
     "node_modules/nimma": {
       "version": "0.2.3",
       "resolved": "https://registry.npmjs.org/nimma/-/nimma-0.2.3.tgz",
@@ -2438,6 +3739,27 @@
         }
       }
     },
+    "node_modules/node-fetch-h2": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/node-fetch-h2/-/node-fetch-h2-2.3.0.tgz",
+      "integrity": "sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==",
+      "license": "MIT",
+      "dependencies": {
+        "http2-client": "^1.2.5"
+      },
+      "engines": {
+        "node": "4.x || >=6.0.0"
+      }
+    },
+    "node_modules/node-readfiles": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/node-readfiles/-/node-readfiles-0.2.0.tgz",
+      "integrity": "sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA==",
+      "license": "MIT",
+      "dependencies": {
+        "es6-promise": "^3.2.1"
+      }
+    },
     "node_modules/node-sarif-builder": {
       "version": "2.0.3",
       "resolved": "https://registry.npmjs.org/node-sarif-builder/-/node-sarif-builder-2.0.3.tgz",
@@ -2451,6 +3773,94 @@
         "node": ">=14"
       }
     },
+    "node_modules/normalize-path": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+      "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/oas-kit-common": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/oas-kit-common/-/oas-kit-common-1.0.8.tgz",
+      "integrity": "sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "fast-safe-stringify": "^2.0.7"
+      }
+    },
+    "node_modules/oas-linter": {
+      "version": "3.2.2",
+      "resolved": "https://registry.npmjs.org/oas-linter/-/oas-linter-3.2.2.tgz",
+      "integrity": "sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "@exodus/schemasafe": "^1.0.0-rc.2",
+        "should": "^13.2.1",
+        "yaml": "^1.10.0"
+      },
+      "funding": {
+        "url": "https://github.com/Mermade/oas-kit?sponsor=1"
+      }
+    },
+    "node_modules/oas-resolver": {
+      "version": "2.5.6",
+      "resolved": "https://registry.npmjs.org/oas-resolver/-/oas-resolver-2.5.6.tgz",
+      "integrity": "sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "node-fetch-h2": "^2.3.0",
+        "oas-kit-common": "^1.0.8",
+        "reftools": "^1.1.9",
+        "yaml": "^1.10.0",
+        "yargs": "^17.0.1"
+      },
+      "bin": {
+        "resolve": "resolve.js"
+      },
+      "funding": {
+        "url": "https://github.com/Mermade/oas-kit?sponsor=1"
+      }
+    },
+    "node_modules/oas-schema-walker": {
+      "version": "1.1.5",
+      "resolved": "https://registry.npmjs.org/oas-schema-walker/-/oas-schema-walker-1.1.5.tgz",
+      "integrity": "sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ==",
+      "license": "BSD-3-Clause",
+      "funding": {
+        "url": "https://github.com/Mermade/oas-kit?sponsor=1"
+      }
+    },
+    "node_modules/oas-validator": {
+      "version": "5.0.8",
+      "resolved": "https://registry.npmjs.org/oas-validator/-/oas-validator-5.0.8.tgz",
+      "integrity": "sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "call-me-maybe": "^1.0.1",
+        "oas-kit-common": "^1.0.8",
+        "oas-linter": "^3.2.2",
+        "oas-resolver": "^2.5.6",
+        "oas-schema-walker": "^1.1.5",
+        "reftools": "^1.1.9",
+        "should": "^13.2.1",
+        "yaml": "^1.10.0"
+      },
+      "funding": {
+        "url": "https://github.com/Mermade/oas-kit?sponsor=1"
+      }
+    },
+    "node_modules/object-assign": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+      "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
     "node_modules/object-inspect": {
       "version": "1.13.4",
       "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
@@ -2501,6 +3911,41 @@
         "wrappy": "1"
       }
     },
+    "node_modules/open": {
+      "version": "10.1.0",
+      "resolved": "https://registry.npmjs.org/open/-/open-10.1.0.tgz",
+      "integrity": "sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw==",
+      "license": "MIT",
+      "dependencies": {
+        "default-browser": "^5.2.1",
+        "define-lazy-prop": "^3.0.0",
+        "is-inside-container": "^1.0.0",
+        "is-wsl": "^3.1.0"
+      },
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
+    "node_modules/openapi-sampler": {
+      "version": "1.7.0",
+      "resolved": "https://registry.npmjs.org/openapi-sampler/-/openapi-sampler-1.7.0.tgz",
+      "integrity": "sha512-fWq32F5vqGpgRJYIarC/9Y1wC9tKnRDcCOjsDJ7MIcSv2HsE7kNifcXIZ8FVtNStBUWxYrEk/MKqVF0SwZ5gog==",
+      "license": "MIT",
+      "dependencies": {
+        "@types/json-schema": "^7.0.7",
+        "fast-xml-parser": "^5.3.4",
+        "json-pointer": "0.6.2"
+      }
+    },
+    "node_modules/outdent": {
+      "version": "0.8.0",
+      "resolved": "https://registry.npmjs.org/outdent/-/outdent-0.8.0.tgz",
+      "integrity": "sha512-KiOAIsdpUTcAXuykya5fnVVT+/5uS0Q1mrkRHcF89tpieSmY33O/tmc54CqwA+bfhbtEfZUNLHaPUiB9X3jt1A==",
+      "license": "MIT"
+    },
     "node_modules/own-keys": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz",
@@ -2518,6 +3963,27 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/path-browserify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz",
+      "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==",
+      "license": "MIT"
+    },
+    "node_modules/path-expression-matcher": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/path-expression-matcher/-/path-expression-matcher-1.2.0.tgz",
+      "integrity": "sha512-DwmPWeFn+tq7TiyJ2CxezCAirXjFxvaiD03npak3cRjlP9+OjTmSy1EpIrEbh+l6JgUundniloMLDQ/6VTdhLQ==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/NaturalIntelligence"
+        }
+      ],
+      "license": "MIT",
+      "engines": {
+        "node": ">=14.0.0"
+      }
+    },
     "node_modules/path-is-absolute": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
@@ -2533,6 +3999,18 @@
       "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
       "license": "MIT"
     },
+    "node_modules/perfect-scrollbar": {
+      "version": "1.5.6",
+      "resolved": "https://registry.npmjs.org/perfect-scrollbar/-/perfect-scrollbar-1.5.6.tgz",
+      "integrity": "sha512-rixgxw3SxyJbCaSpo1n35A/fwI1r2rdwMKOTCg/AcG+xOEyZcE8UHVjpZMFCVImzsFoCZeJTT+M/rdEIQYO2nw==",
+      "license": "MIT"
+    },
+    "node_modules/picocolors": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+      "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+      "license": "ISC"
+    },
     "node_modules/picomatch": {
       "version": "2.3.2",
       "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
@@ -2545,22 +4023,103 @@
         "url": "https://github.com/sponsors/jonschlinkert"
       }
     },
-    "node_modules/pony-cause": {
-      "version": "1.1.1",
-      "resolved": "https://registry.npmjs.org/pony-cause/-/pony-cause-1.1.1.tgz",
-      "integrity": "sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==",
-      "license": "0BSD",
+    "node_modules/pluralize": {
+      "version": "8.0.0",
+      "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz",
+      "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=4"
+      }
+    },
+    "node_modules/polished": {
+      "version": "4.3.1",
+      "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz",
+      "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==",
+      "license": "MIT",
+      "dependencies": {
+        "@babel/runtime": "^7.17.8"
+      },
+      "engines": {
+        "node": ">=10"
+      }
+    },
+    "node_modules/pony-cause": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/pony-cause/-/pony-cause-1.1.1.tgz",
+      "integrity": "sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==",
+      "license": "0BSD",
+      "engines": {
+        "node": ">=12.0.0"
+      }
+    },
+    "node_modules/possible-typed-array-names": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz",
+      "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">= 0.4"
+      }
+    },
+    "node_modules/postcss": {
+      "version": "8.4.49",
+      "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz",
+      "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==",
+      "funding": [
+        {
+          "type": "opencollective",
+          "url": "https://opencollective.com/postcss/"
+        },
+        {
+          "type": "tidelift",
+          "url": "https://tidelift.com/funding/github/npm/postcss"
+        },
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/ai"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "nanoid": "^3.3.7",
+        "picocolors": "^1.1.1",
+        "source-map-js": "^1.2.1"
+      },
+      "engines": {
+        "node": "^10 || ^12 || >=14"
+      }
+    },
+    "node_modules/postcss-value-parser": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+      "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==",
+      "license": "MIT"
+    },
+    "node_modules/pretty-format": {
+      "version": "29.7.0",
+      "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
+      "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==",
+      "license": "MIT",
+      "dependencies": {
+        "@jest/schemas": "^29.6.3",
+        "ansi-styles": "^5.0.0",
+        "react-is": "^18.0.0"
+      },
       "engines": {
-        "node": ">=12.0.0"
+        "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
       }
     },
-    "node_modules/possible-typed-array-names": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz",
-      "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==",
+    "node_modules/pretty-format/node_modules/ansi-styles": {
+      "version": "5.2.0",
+      "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
+      "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
       "license": "MIT",
       "engines": {
-        "node": ">= 0.4"
+        "node": ">=10"
+      },
+      "funding": {
+        "url": "https://github.com/chalk/ansi-styles?sponsor=1"
       }
     },
     "node_modules/printable-characters": {
@@ -2569,6 +4128,32 @@
       "integrity": "sha512-dKp+C4iXWK4vVYZmYSd0KBH5F/h1HoZRsbJ82AVKRO3PEo8L4lBS/vLwhVtpwwuYcoIsVY+1JYKR268yn480uQ==",
       "license": "Unlicense"
     },
+    "node_modules/prismjs": {
+      "version": "1.30.0",
+      "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz",
+      "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/prop-types": {
+      "version": "15.8.1",
+      "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+      "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+      "license": "MIT",
+      "dependencies": {
+        "loose-envify": "^1.4.0",
+        "object-assign": "^4.1.1",
+        "react-is": "^16.13.1"
+      }
+    },
+    "node_modules/prop-types/node_modules/react-is": {
+      "version": "16.13.1",
+      "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+      "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==",
+      "license": "MIT"
+    },
     "node_modules/protobufjs": {
       "version": "6.11.4",
       "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.11.4.tgz",
@@ -2615,6 +4200,123 @@
       ],
       "license": "MIT"
     },
+    "node_modules/randombytes": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
+      "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
+      "license": "MIT",
+      "dependencies": {
+        "safe-buffer": "^5.1.0"
+      }
+    },
+    "node_modules/react": {
+      "version": "19.2.4",
+      "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
+      "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
+      "license": "MIT",
+      "peer": true,
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
+    "node_modules/react-dom": {
+      "version": "19.2.4",
+      "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz",
+      "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==",
+      "license": "MIT",
+      "peer": true,
+      "dependencies": {
+        "scheduler": "^0.27.0"
+      },
+      "peerDependencies": {
+        "react": "^19.2.4"
+      }
+    },
+    "node_modules/react-is": {
+      "version": "18.3.1",
+      "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
+      "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
+      "license": "MIT"
+    },
+    "node_modules/react-tabs": {
+      "version": "6.1.1",
+      "resolved": "https://registry.npmjs.org/react-tabs/-/react-tabs-6.1.1.tgz",
+      "integrity": "sha512-CPiuKoMFf89B7QlbFfdBD9XmUWiE3qudQputMVZB8GQvPJZRX/gqjDaDWOPDwGinEfpJKEuBCkGt83Tt4efeyA==",
+      "license": "MIT",
+      "dependencies": {
+        "clsx": "^2.0.0",
+        "prop-types": "^15.5.0"
+      },
+      "peerDependencies": {
+        "react": "^18.0.0 || ^19.0.0"
+      }
+    },
+    "node_modules/readable-stream": {
+      "version": "3.6.2",
+      "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
+      "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
+      "license": "MIT",
+      "dependencies": {
+        "inherits": "^2.0.3",
+        "string_decoder": "^1.1.1",
+        "util-deprecate": "^1.0.1"
+      },
+      "engines": {
+        "node": ">= 6"
+      }
+    },
+    "node_modules/readdirp": {
+      "version": "3.6.0",
+      "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+      "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+      "license": "MIT",
+      "dependencies": {
+        "picomatch": "^2.2.1"
+      },
+      "engines": {
+        "node": ">=8.10.0"
+      }
+    },
+    "node_modules/redoc": {
+      "version": "2.5.0",
+      "resolved": "https://registry.npmjs.org/redoc/-/redoc-2.5.0.tgz",
+      "integrity": "sha512-NpYsOZ1PD9qFdjbLVBZJWptqE+4Y6TkUuvEOqPUmoH7AKOmPcE+hYjotLxQNTqVoWL4z0T2uxILmcc8JGDci+Q==",
+      "license": "MIT",
+      "dependencies": {
+        "@redocly/openapi-core": "^1.4.0",
+        "classnames": "^2.3.2",
+        "decko": "^1.2.0",
+        "dompurify": "^3.2.4",
+        "eventemitter3": "^5.0.1",
+        "json-pointer": "^0.6.2",
+        "lunr": "^2.3.9",
+        "mark.js": "^8.11.1",
+        "marked": "^4.3.0",
+        "mobx-react": "^9.1.1",
+        "openapi-sampler": "^1.5.0",
+        "path-browserify": "^1.0.1",
+        "perfect-scrollbar": "^1.5.5",
+        "polished": "^4.2.2",
+        "prismjs": "^1.29.0",
+        "prop-types": "^15.8.1",
+        "react-tabs": "^6.0.2",
+        "slugify": "~1.4.7",
+        "stickyfill": "^1.1.1",
+        "swagger2openapi": "^7.0.8",
+        "url-template": "^2.0.8"
+      },
+      "engines": {
+        "node": ">=6.9",
+        "npm": ">=3.0.0"
+      },
+      "peerDependencies": {
+        "core-js": "^3.1.4",
+        "mobx": "^6.0.4",
+        "react": "^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+        "react-dom": "^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0",
+        "styled-components": "^4.1.1 || ^5.1.1 || ^6.0.5"
+      }
+    },
     "node_modules/reflect.getprototypeof": {
       "version": "1.0.10",
       "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz",
@@ -2637,6 +4339,15 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/reftools": {
+      "version": "1.1.9",
+      "resolved": "https://registry.npmjs.org/reftools/-/reftools-1.1.9.tgz",
+      "integrity": "sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==",
+      "license": "BSD-3-Clause",
+      "funding": {
+        "url": "https://github.com/Mermade/oas-kit?sponsor=1"
+      }
+    },
     "node_modules/regexp.prototype.flags": {
       "version": "1.5.4",
       "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz",
@@ -2729,6 +4440,18 @@
         "fsevents": "~2.3.2"
       }
     },
+    "node_modules/run-applescript": {
+      "version": "7.1.0",
+      "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.1.0.tgz",
+      "integrity": "sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=18"
+      },
+      "funding": {
+        "url": "https://github.com/sponsors/sindresorhus"
+      }
+    },
     "node_modules/run-parallel": {
       "version": "1.2.0",
       "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@@ -2771,6 +4494,26 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/safe-buffer": {
+      "version": "5.2.1",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+      "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/feross"
+        },
+        {
+          "type": "patreon",
+          "url": "https://www.patreon.com/feross"
+        },
+        {
+          "type": "consulting",
+          "url": "https://feross.org/support"
+        }
+      ],
+      "license": "MIT"
+    },
     "node_modules/safe-push-apply": {
       "version": "1.0.0",
       "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz",
@@ -2819,6 +4562,30 @@
         "node": ">=11.0.0"
       }
     },
+    "node_modules/scheduler": {
+      "version": "0.27.0",
+      "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
+      "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==",
+      "license": "MIT"
+    },
+    "node_modules/semver": {
+      "version": "7.7.4",
+      "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz",
+      "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==",
+      "license": "ISC",
+      "bin": {
+        "semver": "bin/semver.js"
+      },
+      "engines": {
+        "node": ">=10"
+      }
+    },
+    "node_modules/set-cookie-parser": {
+      "version": "2.7.1",
+      "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz",
+      "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==",
+      "license": "MIT"
+    },
     "node_modules/set-function-length": {
       "version": "1.2.2",
       "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
@@ -2865,6 +4632,66 @@
         "node": ">= 0.4"
       }
     },
+    "node_modules/shallowequal": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz",
+      "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==",
+      "license": "MIT"
+    },
+    "node_modules/should": {
+      "version": "13.2.3",
+      "resolved": "https://registry.npmjs.org/should/-/should-13.2.3.tgz",
+      "integrity": "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==",
+      "license": "MIT",
+      "dependencies": {
+        "should-equal": "^2.0.0",
+        "should-format": "^3.0.3",
+        "should-type": "^1.4.0",
+        "should-type-adaptors": "^1.0.1",
+        "should-util": "^1.0.0"
+      }
+    },
+    "node_modules/should-equal": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/should-equal/-/should-equal-2.0.0.tgz",
+      "integrity": "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==",
+      "license": "MIT",
+      "dependencies": {
+        "should-type": "^1.4.0"
+      }
+    },
+    "node_modules/should-format": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/should-format/-/should-format-3.0.3.tgz",
+      "integrity": "sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q==",
+      "license": "MIT",
+      "dependencies": {
+        "should-type": "^1.3.0",
+        "should-type-adaptors": "^1.0.1"
+      }
+    },
+    "node_modules/should-type": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/should-type/-/should-type-1.4.0.tgz",
+      "integrity": "sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ==",
+      "license": "MIT"
+    },
+    "node_modules/should-type-adaptors": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz",
+      "integrity": "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==",
+      "license": "MIT",
+      "dependencies": {
+        "should-type": "^1.3.0",
+        "should-util": "^1.0.0"
+      }
+    },
+    "node_modules/should-util": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/should-util/-/should-util-1.0.1.tgz",
+      "integrity": "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==",
+      "license": "MIT"
+    },
     "node_modules/side-channel": {
       "version": "1.1.0",
       "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
@@ -2949,6 +4776,42 @@
         "node": ">=12"
       }
     },
+    "node_modules/simple-websocket": {
+      "version": "9.1.0",
+      "resolved": "https://registry.npmjs.org/simple-websocket/-/simple-websocket-9.1.0.tgz",
+      "integrity": "sha512-8MJPnjRN6A8UCp1I+H/dSFyjwJhp6wta4hsVRhjf8w9qBHRzxYt14RaOcjvQnhD1N4yKOddEjflwMnQM4VtXjQ==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/feross"
+        },
+        {
+          "type": "patreon",
+          "url": "https://www.patreon.com/feross"
+        },
+        {
+          "type": "consulting",
+          "url": "https://feross.org/support"
+        }
+      ],
+      "license": "MIT",
+      "dependencies": {
+        "debug": "^4.3.1",
+        "queue-microtask": "^1.2.2",
+        "randombytes": "^2.1.0",
+        "readable-stream": "^3.6.0",
+        "ws": "^7.4.2"
+      }
+    },
+    "node_modules/slugify": {
+      "version": "1.4.7",
+      "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.4.7.tgz",
+      "integrity": "sha512-tf+h5W1IrjNm/9rKKj0JU2MDMruiopx0jjVA5zCdBtcGjfp0+c5rHw/zADLC3IeKlGHtVbHtpfzvYA0OYT+HKg==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8.0.0"
+      }
+    },
     "node_modules/source-map": {
       "version": "0.6.1",
       "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
@@ -2958,6 +4821,15 @@
         "node": ">=0.10.0"
       }
     },
+    "node_modules/source-map-js": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+      "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+      "license": "BSD-3-Clause",
+      "engines": {
+        "node": ">=0.10.0"
+      }
+    },
     "node_modules/source-map-support": {
       "version": "0.5.21",
       "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
@@ -2985,6 +4857,11 @@
         "get-source": "^2.0.12"
       }
     },
+    "node_modules/stickyfill": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/stickyfill/-/stickyfill-1.1.1.tgz",
+      "integrity": "sha512-GCp7vHAfpao+Qh/3Flh9DXEJ/qSi0KJwJw6zYlZOtRYXWUIpMM6mC2rIep/dK8RQqwW0KxGJIllmjPIBOGN8AA=="
+    },
     "node_modules/stop-iteration-iterator": {
       "version": "1.1.0",
       "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz",
@@ -2998,6 +4875,15 @@
         "node": ">= 0.4"
       }
     },
+    "node_modules/string_decoder": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
+      "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
+      "license": "MIT",
+      "dependencies": {
+        "safe-buffer": "~5.2.0"
+      }
+    },
     "node_modules/string-width": {
       "version": "4.2.3",
       "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
@@ -3089,6 +4975,58 @@
         "node": ">=8"
       }
     },
+    "node_modules/strnum": {
+      "version": "2.2.2",
+      "resolved": "https://registry.npmjs.org/strnum/-/strnum-2.2.2.tgz",
+      "integrity": "sha512-DnR90I+jtXNSTXWdwrEy9FakW7UX+qUZg28gj5fk2vxxl7uS/3bpI4fjFYVmdK9etptYBPNkpahuQnEwhwECqA==",
+      "funding": [
+        {
+          "type": "github",
+          "url": "https://github.com/sponsors/NaturalIntelligence"
+        }
+      ],
+      "license": "MIT"
+    },
+    "node_modules/styled-components": {
+      "version": "6.3.9",
+      "resolved": "https://registry.npmjs.org/styled-components/-/styled-components-6.3.9.tgz",
+      "integrity": "sha512-J72R4ltw0UBVUlEjTzI0gg2STOqlI9JBhQOL4Dxt7aJOnnSesy0qJDn4PYfMCafk9cWOaVg129Pesl5o+DIh0Q==",
+      "license": "MIT",
+      "peer": true,
+      "dependencies": {
+        "@emotion/is-prop-valid": "1.4.0",
+        "@emotion/unitless": "0.10.0",
+        "@types/stylis": "4.2.7",
+        "css-to-react-native": "3.2.0",
+        "csstype": "3.2.3",
+        "postcss": "8.4.49",
+        "shallowequal": "1.1.0",
+        "stylis": "4.3.6",
+        "tslib": "2.8.1"
+      },
+      "engines": {
+        "node": ">= 16"
+      },
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/styled-components"
+      },
+      "peerDependencies": {
+        "react": ">= 16.8.0",
+        "react-dom": ">= 16.8.0"
+      },
+      "peerDependenciesMeta": {
+        "react-dom": {
+          "optional": true
+        }
+      }
+    },
+    "node_modules/stylis": {
+      "version": "4.3.6",
+      "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz",
+      "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==",
+      "license": "MIT"
+    },
     "node_modules/supports-color": {
       "version": "7.2.0",
       "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
@@ -3113,6 +5051,33 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/swagger2openapi": {
+      "version": "7.0.8",
+      "resolved": "https://registry.npmjs.org/swagger2openapi/-/swagger2openapi-7.0.8.tgz",
+      "integrity": "sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==",
+      "license": "BSD-3-Clause",
+      "dependencies": {
+        "call-me-maybe": "^1.0.1",
+        "node-fetch": "^2.6.1",
+        "node-fetch-h2": "^2.3.0",
+        "node-readfiles": "^0.2.0",
+        "oas-kit-common": "^1.0.8",
+        "oas-resolver": "^2.5.6",
+        "oas-schema-walker": "^1.1.5",
+        "oas-validator": "^5.0.8",
+        "reftools": "^1.1.9",
+        "yaml": "^1.10.0",
+        "yargs": "^17.0.1"
+      },
+      "bin": {
+        "boast": "boast.js",
+        "oas-validate": "oas-validate.js",
+        "swagger2openapi": "swagger2openapi.js"
+      },
+      "funding": {
+        "url": "https://github.com/Mermade/oas-kit?sponsor=1"
+      }
+    },
     "node_modules/text-table": {
       "version": "0.2.0",
       "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
@@ -3217,6 +5182,25 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/typedarray": {
+      "version": "0.0.6",
+      "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
+      "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==",
+      "license": "MIT"
+    },
+    "node_modules/uglify-js": {
+      "version": "3.19.3",
+      "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz",
+      "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==",
+      "license": "BSD-2-Clause",
+      "optional": true,
+      "bin": {
+        "uglifyjs": "bin/uglifyjs"
+      },
+      "engines": {
+        "node": ">=0.8.0"
+      }
+    },
     "node_modules/unbox-primitive": {
       "version": "1.1.0",
       "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz",
@@ -3235,6 +5219,15 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/undici": {
+      "version": "6.24.1",
+      "resolved": "https://registry.npmjs.org/undici/-/undici-6.24.1.tgz",
+      "integrity": "sha512-sC+b0tB1whOCzbtlx20fx3WgCXwkW627p4EA9uM+/tNNPkSS+eSEld6pAs9nDv7WbY1UUljBMYPtu9BCOrCWKA==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=18.17"
+      }
+    },
     "node_modules/undici-types": {
       "version": "7.18.2",
       "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz",
@@ -3250,12 +5243,39 @@
         "node": ">= 10.0.0"
       }
     },
+    "node_modules/uri-js-replace": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/uri-js-replace/-/uri-js-replace-1.0.1.tgz",
+      "integrity": "sha512-W+C9NWNLFOoBI2QWDp4UT9pv65r2w5Cx+3sTYFvtMdDBxkKt1syCqsUdSFAChbEe1uK5TfS04wt/nGwmaeIQ0g==",
+      "license": "MIT"
+    },
     "node_modules/urijs": {
       "version": "1.19.11",
       "resolved": "https://registry.npmjs.org/urijs/-/urijs-1.19.11.tgz",
       "integrity": "sha512-HXgFDgDommxn5/bIv0cnQZsPhHDA90NPHD6+c/v21U5+Sx5hoP8+dP9IZXBU1gIfvdRfhG8cel9QNPeionfcCQ==",
       "license": "MIT"
     },
+    "node_modules/url-template": {
+      "version": "2.0.8",
+      "resolved": "https://registry.npmjs.org/url-template/-/url-template-2.0.8.tgz",
+      "integrity": "sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw==",
+      "license": "BSD"
+    },
+    "node_modules/use-sync-external-store": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+      "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
+      "license": "MIT",
+      "peerDependencies": {
+        "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+      }
+    },
+    "node_modules/util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
+      "license": "MIT"
+    },
     "node_modules/utility-types": {
       "version": "3.11.0",
       "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.11.0.tgz",
@@ -3385,6 +5405,12 @@
         "url": "https://github.com/sponsors/ljharb"
       }
     },
+    "node_modules/wordwrap": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+      "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==",
+      "license": "MIT"
+    },
     "node_modules/wrap-ansi": {
       "version": "7.0.0",
       "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
@@ -3408,6 +5434,27 @@
       "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
       "license": "ISC"
     },
+    "node_modules/ws": {
+      "version": "7.5.10",
+      "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz",
+      "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==",
+      "license": "MIT",
+      "engines": {
+        "node": ">=8.3.0"
+      },
+      "peerDependencies": {
+        "bufferutil": "^4.0.1",
+        "utf-8-validate": "^5.0.2"
+      },
+      "peerDependenciesMeta": {
+        "bufferutil": {
+          "optional": true
+        },
+        "utf-8-validate": {
+          "optional": true
+        }
+      }
+    },
     "node_modules/xml-js": {
       "version": "1.6.11",
       "resolved": "https://registry.npmjs.org/xml-js/-/xml-js-1.6.11.tgz",
@@ -3429,6 +5476,21 @@
         "node": ">=10"
       }
     },
+    "node_modules/yaml": {
+      "version": "1.10.3",
+      "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.3.tgz",
+      "integrity": "sha512-vIYeF1u3CjlhAFekPPAk2h/Kv4T3mAkMox5OymRiJQB0spDP10LHvt+K7G9Ny6NuuMAb25/6n1qyUjAcGNf/AA==",
+      "license": "ISC",
+      "engines": {
+        "node": ">= 6"
+      }
+    },
+    "node_modules/yaml-ast-parser": {
+      "version": "0.0.43",
+      "resolved": "https://registry.npmjs.org/yaml-ast-parser/-/yaml-ast-parser-0.0.43.tgz",
+      "integrity": "sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==",
+      "license": "Apache-2.0"
+    },
     "node_modules/yargs": {
       "version": "17.7.2",
       "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
diff --git a/validation/package.json b/validation/package.json
index 77e143ed..8686da9c 100644
--- a/validation/package.json
+++ b/validation/package.json
@@ -3,6 +3,7 @@
   "private": true,
   "description": "Node.js tool dependencies for CAMARA validation framework",
   "dependencies": {
+    "@redocly/cli": "^1.31.0",
     "@stoplight/spectral-cli": "^6.14.0",
     "gherkin-lint": "^4.2.4"
   }
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 81e03e51..865864c3 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -278,7 +278,7 @@ def test_all_engines_called(
         assert "finding(s)" in statuses["spectral"]
         assert "finding(s)" in statuses["python"]
         assert "finding(s)" in statuses["gherkin"]
-        assert statuses["bundling"] == "not yet implemented"
+        assert statuses["bundling"] == "separate workflow step"
 
     @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
@@ -638,7 +638,7 @@ def test_engine_statuses_passed_to_summary(
             "spectral": "3 finding(s)",
             "python": "0 finding(s)",
             "gherkin": "skipped (no test files)",
-            "bundling": "not yet implemented",
+            "bundling": "separate workflow step",
         }
         mock_engines.return_value = ([], statuses)
         mock_postfilter.return_value = _make_post_filter_result()
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index b19f5104..a7c32dca 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -260,6 +260,38 @@ def test_relative_path_unchanged_with_repo_root(self):
         )
         assert finding["path"] == "code/API_definitions/quality-on-demand.yaml"
 
+    def test_external_file_finding_downgraded_to_hint(self):
+        """Findings on files outside API_definitions/ (e.g. common schemas
+        followed via $ref) are downgraded to hint."""
+        raw = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "source": "code/common/CAMARA_common.yaml",
+        }
+        finding = normalize_finding(raw)
+        assert finding["level"] == "hint"
+        assert finding["path"] == "code/common/CAMARA_common.yaml"
+
+    def test_external_file_absolute_path_downgraded_to_hint(self):
+        raw = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "source": "/home/runner/work/R/R/code/common/CAMARA_common.yaml",
+        }
+        finding = normalize_finding(raw, repo_root="/home/runner/work/R/R")
+        assert finding["level"] == "hint"
+
+    def test_empty_source_keeps_original_severity(self):
+        """Findings with empty source (e.g. engine-level errors) keep severity."""
+        raw = {
+            "code": "some-rule",
+            "message": "msg",
+            "severity": 0,
+            "source": "",
+            "range": {"start": {"line": 0}},
+        }
+        finding = normalize_finding(raw)
+        assert finding["level"] == "error"
+        assert finding["path"] == ""
+
 
 # ---------------------------------------------------------------------------
 # TestParseSpectralOutput
@@ -317,6 +349,19 @@ def test_repo_root_normalises_paths(self):
         findings = parse_spectral_output(raw, repo_root="/runner/work")
         assert findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
 
+    def test_external_file_findings_downgraded_to_hint(self):
+        """Findings from common schemas (followed via $ref) become hints."""
+        common_finding = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "source": "code/common/CAMARA_common.yaml",
+            "code": "camara-properties-descriptions",
+        }
+        raw = json.dumps([SAMPLE_SPECTRAL_FINDING, common_finding])
+        findings = parse_spectral_output(raw)
+        assert len(findings) == 2
+        assert findings[0]["level"] == "error"  # original API finding
+        assert findings[1]["level"] == "hint"   # external finding downgraded
+
 
 # ---------------------------------------------------------------------------
 # TestRunSpectral

From be744f4d16921f818dfec6068e2bf2d5baf82650 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sat, 28 Mar 2026 23:32:21 +0100
Subject: [PATCH 026/157] fix(validation): add node_modules to PATH for
 bundling step

The Redocly CLI binary is in .tooling/validation/node_modules/.bin/
which was only on PATH for the orchestrator step. The bundling step
needs it too.
---
 .github/workflows/validation.yml | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 29cf78e9..8af75296 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -343,7 +343,10 @@ jobs:
       # natively (DEC-021), so this is an output step, not validation.
       - name: Bundle API specs
         if: always() && steps.orchestrator.outcome == 'success'
+        env:
+          PATH_NODE_MODULES: ${{ github.workspace }}/.tooling/validation/node_modules/.bin
         run: |
+          export PATH="${PATH_NODE_MODULES}:${PATH}"
           mkdir -p validation-output/bundled
           BUNDLED=0
           for spec in code/API_definitions/*.yaml; do

From 4acc23aa9d38cc78491449f86fc1c31c1e1940f8 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sat, 28 Mar 2026 23:42:03 +0100
Subject: [PATCH 027/157] fix(validation): capture redocly bundle stderr in
 warning message

---
 .github/workflows/validation.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 8af75296..9ee48e98 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -352,7 +352,7 @@ jobs:
           for spec in code/API_definitions/*.yaml; do
             [ -f "$spec" ] || continue
             name=$(basename "$spec")
-            if redocly bundle "$spec" -o "validation-output/bundled/$name" 2>/dev/null; then
+            if err=$(redocly bundle "$spec" -o "validation-output/bundled/$name" 2>&1); then
               if ! diff -q "$spec" "validation-output/bundled/$name" > /dev/null 2>&1; then
                 BUNDLED=$((BUNDLED + 1))
                 echo "Bundled: $name"
@@ -360,7 +360,7 @@ jobs:
                 rm "validation-output/bundled/$name"
               fi
             else
-              echo "::warning::Bundling failed for $name (likely unresolved refs)"
+              echo "::warning::Bundling failed for $name: ${err##*$'\n'}"
             fi
           done
           echo "Bundled $BUNDLED spec(s)"

From 140fb2c7a40f5ea73bf330d75c5373b836b9cb8d Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 30 Mar 2026 22:35:22 +0200
Subject: [PATCH 028/157] feat(validation): shared action, full-scope
 release-review, metadata fallback

Part 1 of release automation handoff (WP-06.13):

- Remove release-review PR skip logic for yamllint/Spectral (DEC-011
  revision: all engines run on release-review PRs for defense-in-depth
  against bundling/transformation errors)

- Add release-metadata.yaml parser with field mapping to ReleasePlanData
  (enriched tag extraction, api_status derivation from version)

- Context builder falls back to release-metadata.yaml when release-plan
  is absent on snapshot branches (ensures correct Spectral ruleset and
  per-API check coverage)

- Create shared-actions/run-validation composite action for reuse by
  both validation.yml and RA workflow (pre-snapshot gate in Part 2)

- Update token resolution comment per DEC-022 architecture

596 tests passing (24 new).
---
 .github/workflows/validation.yml              |   6 +-
 shared-actions/run-validation/action.yml      | 116 +++++++++
 validation/context/context_builder.py         |  16 ++
 validation/context/release_metadata_parser.py | 158 ++++++++++++
 validation/orchestrator.py                    |  62 +++--
 validation/tests/test_context_builder.py      | 136 ++++++++++
 validation/tests/test_orchestrator.py         |  20 +-
 .../tests/test_release_metadata_parser.py     | 233 ++++++++++++++++++
 8 files changed, 704 insertions(+), 43 deletions(-)
 create mode 100644 shared-actions/run-validation/action.yml
 create mode 100644 validation/context/release_metadata_parser.py
 create mode 100644 validation/tests/test_release_metadata_parser.py

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 9ee48e98..52a696a3 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -229,8 +229,10 @@ jobs:
       # Token resolution (design doc section 5.1), PR comment, and commit
       # status in a single step.  Skipped entirely for non-PR events.
       #
-      # Tier 1 (snapshot context): placeholder — RA handoff is via
-      # artifacts (DEC-014).  Reconsidered in WP-06.13.
+      # Pre-snapshot validation runs via the shared run-validation
+      # action inside the RA workflow (DEC-022).  Release-review PRs
+      # use the standard token tiers below (validation app or
+      # GITHUB_TOKEN).  No RA-specific token tier needed.
       - name: Post findings to PR
         if: >-
           always() && steps.orchestrator.outcome == 'success'
diff --git a/shared-actions/run-validation/action.yml b/shared-actions/run-validation/action.yml
new file mode 100644
index 00000000..bac801dc
--- /dev/null
+++ b/shared-actions/run-validation/action.yml
@@ -0,0 +1,116 @@
+name: run-validation
+description: |
+  Run the CAMARA validation orchestrator and return results.
+
+  Installs Python and Node dependencies, runs the validation pipeline
+  (all engines, post-filter, output generation), writes workflow summary,
+  and outputs the overall result.
+
+  Prerequisites: Python 3.11 and Node 24 must be available (caller sets
+  up runtimes via setup-python and setup-node before invoking this action).
+
+inputs:
+  repo_path:
+    description: 'Absolute path to the API repository checkout'
+    required: true
+  tooling_path:
+    description: 'Absolute path to the tooling checkout (parent of validation/ and linting/)'
+    required: true
+  mode:
+    description: 'Execution mode: "" (default) or "pre-snapshot"'
+    required: false
+    default: ''
+  profile:
+    description: 'Profile override: advisory, standard, strict, or "" for auto-select'
+    required: false
+    default: ''
+  release_plan_changed:
+    description: 'Whether release-plan.yaml changed in this PR (true/false)'
+    required: false
+    default: 'false'
+  tooling_ref:
+    description: 'Tooling ref used for this run (for diagnostics)'
+    required: false
+    default: ''
+
+outputs:
+  result:
+    description: 'Validation result: pass, fail, or error'
+    value: ${{ steps.read-result.outputs.result }}
+  should_fail:
+    description: 'Whether the caller should fail the workflow (true/false)'
+    value: ${{ steps.read-result.outputs.should_fail }}
+  summary:
+    description: 'One-line result summary'
+    value: ${{ steps.read-result.outputs.summary }}
+  output_dir:
+    description: 'Absolute path to the validation output directory'
+    value: ${{ steps.run.outputs.output_dir }}
+
+runs:
+  using: composite
+  steps:
+    - name: Install Python dependencies
+      shell: bash
+      run: pip install --quiet pyyaml==6.0.3 jsonschema==4.26.0 yamllint==1.38.0
+
+    - name: Install Node dependencies
+      shell: bash
+      run: npm ci --ignore-scripts
+      working-directory: ${{ inputs.tooling_path }}/validation
+
+    - name: Run validation orchestrator
+      id: run
+      shell: bash
+      env:
+        PYTHONPATH: ${{ inputs.tooling_path }}
+        PATH_NODE_MODULES: ${{ inputs.tooling_path }}/validation/node_modules/.bin
+        VALIDATION_REPO_PATH: ${{ inputs.repo_path }}
+        VALIDATION_TOOLING_PATH: ${{ inputs.tooling_path }}
+        VALIDATION_OUTPUT_DIR: ${{ inputs.repo_path }}/validation-output
+        VALIDATION_REPO_NAME: ${{ github.repository }}
+        VALIDATION_REPO_OWNER: ${{ github.repository_owner }}
+        VALIDATION_EVENT_NAME: ${{ github.event_name }}
+        VALIDATION_REF_NAME: ${{ github.ref_name }}
+        VALIDATION_BASE_REF: ${{ github.base_ref }}
+        VALIDATION_MODE: ${{ inputs.mode }}
+        VALIDATION_PROFILE: ${{ inputs.profile }}
+        VALIDATION_PR_NUMBER: ${{ github.event.pull_request.number }}
+        VALIDATION_RELEASE_PLAN_CHANGED: ${{ inputs.release_plan_changed }}
+        VALIDATION_WORKFLOW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+        VALIDATION_TOOLING_REF: ${{ inputs.tooling_ref }}
+        VALIDATION_COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
+      run: |
+        export PATH="${PATH_NODE_MODULES}:${PATH}"
+        python -m validation.orchestrator
+        echo "output_dir=${VALIDATION_OUTPUT_DIR}" >> "$GITHUB_OUTPUT"
+
+    - name: Write workflow summary
+      if: always() && steps.run.outcome == 'success'
+      shell: bash
+      env:
+        OUTPUT_DIR: ${{ steps.run.outputs.output_dir }}
+      run: |
+        if [ -f "${OUTPUT_DIR}/summary.md" ]; then
+          cat "${OUTPUT_DIR}/summary.md" >> "$GITHUB_STEP_SUMMARY"
+        fi
+
+    - name: Read result
+      id: read-result
+      if: always() && steps.run.outcome == 'success'
+      shell: bash
+      env:
+        OUTPUT_DIR: ${{ steps.run.outputs.output_dir }}
+      run: |
+        if [ -f "${OUTPUT_DIR}/result.json" ]; then
+          RESULT=$(python3 -c "import json; r=json.load(open('${OUTPUT_DIR}/result.json')); print(r.get('result', 'error'))")
+          SHOULD_FAIL=$(python3 -c "import json; r=json.load(open('${OUTPUT_DIR}/result.json')); print(str(r.get('should_fail', False)).lower())")
+          SUMMARY=$(python3 -c "import json; r=json.load(open('${OUTPUT_DIR}/result.json')); print(r.get('summary', ''))")
+          echo "result=${RESULT}" >> "$GITHUB_OUTPUT"
+          echo "should_fail=${SHOULD_FAIL}" >> "$GITHUB_OUTPUT"
+          echo "summary=${SUMMARY}" >> "$GITHUB_OUTPUT"
+        else
+          echo "result=error" >> "$GITHUB_OUTPUT"
+          echo "should_fail=true" >> "$GITHUB_OUTPUT"
+          echo "summary=Orchestrator produced no result file" >> "$GITHUB_OUTPUT"
+        fi
diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py
index 52da7f1b..6245c0ae 100644
--- a/validation/context/context_builder.py
+++ b/validation/context/context_builder.py
@@ -21,6 +21,7 @@
 from typing import Optional, Tuple
 
 from .api_pattern_detector import detect_api_pattern_from_file
+from .release_metadata_parser import load_release_metadata
 from .release_plan_parser import load_release_plan
 
 logger = logging.getLogger(__name__)
@@ -253,6 +254,7 @@ def build_validation_context(
     release_plan_changed: Optional[bool] = None,
     repo_path: Optional[Path] = None,
     release_plan_schema_path: Optional[Path] = None,
+    release_metadata_schema_path: Optional[Path] = None,
     workflow_run_url: str = "",
     tooling_ref: str = "",
 ) -> ValidationContext:
@@ -281,6 +283,20 @@ def build_validation_context(
     if repo_path is not None and release_plan_schema_path is not None:
         plan_path = repo_path / "release-plan.yaml"
         release_plan = load_release_plan(plan_path, release_plan_schema_path)
+
+        # Fallback: on snapshot branches release-plan.yaml is removed and
+        # replaced with release-metadata.yaml.  Use it to populate context
+        # so Spectral gets the correct ruleset and per-API checks run.
+        if release_plan is None and is_review and release_metadata_schema_path is not None:
+            metadata_path = repo_path / "release-metadata.yaml"
+            release_plan = load_release_metadata(
+                metadata_path, release_metadata_schema_path
+            )
+            if release_plan is not None:
+                logger.info(
+                    "Using release-metadata.yaml fallback for snapshot branch context"
+                )
+
         if release_plan is not None:
             target_release_type = release_plan.target_release_type
             commonalities_release = release_plan.commonalities_release
diff --git a/validation/context/release_metadata_parser.py b/validation/context/release_metadata_parser.py
new file mode 100644
index 00000000..13766d98
--- /dev/null
+++ b/validation/context/release_metadata_parser.py
@@ -0,0 +1,158 @@
+"""Release-metadata.yaml parser for the CAMARA validation framework.
+
+On snapshot branches, release-plan.yaml is removed and replaced with
+release-metadata.yaml.  This module extracts the same fields needed by
+the context builder, mapping the metadata schema to the existing
+ReleasePlanData dataclass.
+
+Field mapping:
+  - repository.release_type          → target_release_type
+  - dependencies.commonalities_release  → commonalities_release (tag only)
+  - dependencies.identity_consent_management_release → icm_release (tag only)
+  - apis[].api_name                  → api_name
+  - apis[].api_version               → target_api_version
+  - (derived from api_version)       → target_api_status
+
+The dependencies fields in release-metadata.yaml use the enriched format
+"r4.2 (1.2.0-rc.1)"; only the release tag prefix (e.g. "r4.2") is
+extracted for Spectral ruleset selection.
+"""
+
+from __future__ import annotations
+
+import logging
+import re
+from pathlib import Path
+from typing import Optional
+
+import yaml
+from jsonschema import Draft7Validator
+
+from .release_plan_parser import ReleasePlanApi, ReleasePlanData
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+_RELEASE_TAG_RE = re.compile(r"^(r\d+\.\d+)")
+
+
+def _extract_release_tag(enriched: Optional[str]) -> Optional[str]:
+    """Extract the release tag from the enriched metadata format.
+
+    "r4.2 (1.2.0-rc.1)" → "r4.2"
+    "r4.2"               → "r4.2"
+    None / ""             → None
+    """
+    if not enriched:
+        return None
+    m = _RELEASE_TAG_RE.match(enriched.strip())
+    return m.group(1) if m else None
+
+
+def _derive_api_status(api_version: str) -> str:
+    """Derive target_api_status from the API version string.
+
+    release-metadata.yaml does not carry target_api_status, so we derive
+    it from the version's pre-release suffix:
+      "0.5.0-alpha.1"  → "alpha"
+      "1.0.0-rc.2"     → "rc"
+      "1.0.0"          → "public"
+    """
+    if "-alpha." in api_version:
+        return "alpha"
+    if "-rc." in api_version:
+        return "rc"
+    return "public"
+
+
+# ---------------------------------------------------------------------------
+# Pure parsing
+# ---------------------------------------------------------------------------
+
+
+def parse_release_metadata(data: dict) -> ReleasePlanData:
+    """Extract validation-relevant fields from a parsed release-metadata dict.
+
+    This is a pure function — no I/O.  Expects a dict that has already
+    been loaded from YAML (optionally schema-validated).
+    """
+    repo = data.get("repository", {})
+    deps = data.get("dependencies") or {}
+
+    apis_raw = data.get("apis") or []
+    apis = tuple(
+        ReleasePlanApi(
+            api_name=a["api_name"],
+            target_api_version=a["api_version"],
+            target_api_status=_derive_api_status(a["api_version"]),
+        )
+        for a in apis_raw
+        if "api_name" in a and "api_version" in a
+    )
+
+    return ReleasePlanData(
+        target_release_type=repo.get("release_type", "none"),
+        commonalities_release=_extract_release_tag(
+            deps.get("commonalities_release")
+        ),
+        icm_release=_extract_release_tag(
+            deps.get("identity_consent_management_release")
+        ),
+        apis=apis,
+    )
+
+
+# ---------------------------------------------------------------------------
+# I/O wrapper
+# ---------------------------------------------------------------------------
+
+
+def load_release_metadata(
+    metadata_path: Path, schema_path: Path
+) -> Optional[ReleasePlanData]:
+    """Load release-metadata.yaml, validate, and extract fields.
+
+    Returns None if the file is missing or empty.  If schema validation
+    fails, logs a warning and returns what can be parsed (graceful
+    degradation).
+
+    Args:
+        metadata_path: Path to release-metadata.yaml in the repo checkout.
+        schema_path: Path to release-metadata-schema.yaml.
+    """
+    if not metadata_path.is_file():
+        logger.debug("release-metadata.yaml not found at %s", metadata_path)
+        return None
+
+    try:
+        data = yaml.safe_load(metadata_path.read_text(encoding="utf-8"))
+    except yaml.YAMLError:
+        logger.warning("Failed to parse %s as YAML", metadata_path)
+        return None
+
+    if not data:
+        logger.debug("release-metadata.yaml is empty at %s", metadata_path)
+        return None
+
+    # Schema validation (warn on failure, continue with best-effort parse)
+    try:
+        schema = yaml.safe_load(schema_path.read_text(encoding="utf-8"))
+        validator = Draft7Validator(schema)
+        errors = list(validator.iter_errors(data))
+        if errors:
+            for err in errors[:3]:
+                path = ".".join(str(p) for p in err.absolute_path) or "(root)"
+                logger.warning(
+                    "release-metadata.yaml validation: %s: %s",
+                    path,
+                    err.message,
+                )
+    except Exception:
+        logger.warning(
+            "Could not validate release-metadata.yaml against schema"
+        )
+
+    return parse_release_metadata(data)
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index 9b5c364a..c56da2ab 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -146,6 +146,7 @@ class ToolingPaths:
     config_file: Path
     config_schema: Path
     release_plan_schema: Path
+    release_metadata_schema: Path
     linting_config_dir: Path
     rules_dir: Path
 
@@ -156,6 +157,7 @@ def resolve_tooling_paths(tooling_path: Path) -> ToolingPaths:
         config_file=tooling_path / "validation" / "config" / "validation-config.yaml",
         config_schema=tooling_path / "validation" / "schemas" / "validation-config-schema.yaml",
         release_plan_schema=tooling_path / "validation" / "schemas" / "release-plan-schema.yaml",
+        release_metadata_schema=tooling_path / "validation" / "schemas" / "release-metadata-schema.yaml",
         linting_config_dir=tooling_path / "linting" / "config",
         rules_dir=tooling_path / "validation" / "rules",
     )
@@ -194,44 +196,35 @@ def run_engines(
     """
     all_findings: List[dict] = []
     engine_statuses: Dict[str, str] = {}
-    is_release_review = getattr(context, "is_release_review_pr", False)
 
     # --- yamllint ---
-    if is_release_review:
-        engine_statuses["yamllint"] = "skipped (release review PR)"
-        logger.info("yamllint: skipped (release review PR)")
-    else:
-        try:
-            yamllint_config = paths.linting_config_dir / ".yamllint.yaml"
-            findings = run_yamllint_engine(
-                repo_path=repo_path,
-                config_path=yamllint_config,
-            )
-            all_findings.extend(findings)
-            engine_statuses["yamllint"] = f"{len(findings)} finding(s)"
-            logger.info("yamllint: %d finding(s)", len(findings))
-        except Exception as exc:
-            engine_statuses["yamllint"] = f"error: {exc}"
-            logger.error("yamllint failed: %s", exc)
+    try:
+        yamllint_config = paths.linting_config_dir / ".yamllint.yaml"
+        findings = run_yamllint_engine(
+            repo_path=repo_path,
+            config_path=yamllint_config,
+        )
+        all_findings.extend(findings)
+        engine_statuses["yamllint"] = f"{len(findings)} finding(s)"
+        logger.info("yamllint: %d finding(s)", len(findings))
+    except Exception as exc:
+        engine_statuses["yamllint"] = f"error: {exc}"
+        logger.error("yamllint failed: %s", exc)
 
     # --- Spectral ---
-    if is_release_review:
-        engine_statuses["spectral"] = "skipped (release review PR)"
-        logger.info("Spectral: skipped (release review PR)")
-    else:
-        try:
-            commonalities_release = getattr(context, "commonalities_release", None)
-            findings = run_spectral_engine(
-                repo_path=repo_path,
-                config_dir=paths.linting_config_dir,
-                commonalities_release=commonalities_release,
-            )
-            all_findings.extend(findings)
-            engine_statuses["spectral"] = f"{len(findings)} finding(s)"
-            logger.info("Spectral: %d finding(s)", len(findings))
-        except Exception as exc:
-            engine_statuses["spectral"] = f"error: {exc}"
-            logger.error("Spectral failed: %s", exc)
+    try:
+        commonalities_release = getattr(context, "commonalities_release", None)
+        findings = run_spectral_engine(
+            repo_path=repo_path,
+            config_dir=paths.linting_config_dir,
+            commonalities_release=commonalities_release,
+        )
+        all_findings.extend(findings)
+        engine_statuses["spectral"] = f"{len(findings)} finding(s)"
+        logger.info("Spectral: %d finding(s)", len(findings))
+    except Exception as exc:
+        engine_statuses["spectral"] = f"error: {exc}"
+        logger.error("Spectral failed: %s", exc)
 
     # --- Python checks ---
     try:
@@ -422,6 +415,7 @@ def main() -> int:
         release_plan_changed=args.release_plan_changed,
         repo_path=args.repo_path,
         release_plan_schema_path=paths.release_plan_schema,
+        release_metadata_schema_path=paths.release_metadata_schema,
         workflow_run_url=args.workflow_run_url,
         tooling_ref=args.tooling_ref,
     )
diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py
index 8c00ce81..b6240250 100644
--- a/validation/tests/test_context_builder.py
+++ b/validation/tests/test_context_builder.py
@@ -3,10 +3,12 @@
 from pathlib import Path
 
 import pytest
+import yaml
 
 from validation.context.context_builder import (
     ApiContext,
     ValidationContext,
+    build_validation_context,
     derive_api_maturity,
     derive_branch_type,
     derive_target_branch,
@@ -208,3 +210,137 @@ def test_apis_serialized_as_list(self, sample_context):
     def test_none_values_preserved(self, sample_context):
         d = sample_context.to_dict()
         assert d["icm_release"] is None
+
+
+# ---------------------------------------------------------------------------
+# TestBuildValidationContext — metadata fallback
+# ---------------------------------------------------------------------------
+
+SCHEMAS_DIR = Path(__file__).resolve().parent.parent / "schemas"
+PLAN_SCHEMA = SCHEMAS_DIR / "release-plan-schema.yaml"
+METADATA_SCHEMA = SCHEMAS_DIR / "release-metadata-schema.yaml"
+
+
+def _write_yaml(path: Path, data) -> Path:
+    path.write_text(yaml.dump(data, default_flow_style=False), encoding="utf-8")
+    return path
+
+
+class TestBuildValidationContextMetadataFallback:
+    """Test that build_validation_context falls back to release-metadata.yaml
+    when release-plan.yaml is absent and the PR targets a snapshot branch."""
+
+    @pytest.fixture
+    def repo_with_metadata(self, tmp_path):
+        """Repo checkout with release-metadata.yaml but no release-plan.yaml."""
+        spec_dir = tmp_path / "code" / "API_definitions"
+        spec_dir.mkdir(parents=True)
+        # Minimal spec so api_pattern detection doesn't crash
+        (spec_dir / "quality-on-demand.yaml").write_text(
+            "openapi: '3.0.3'\ninfo:\n  title: QoD\n  version: wip\npaths: {}\n",
+            encoding="utf-8",
+        )
+        _write_yaml(
+            tmp_path / "release-metadata.yaml",
+            {
+                "repository": {
+                    "repository_name": "QualityOnDemand",
+                    "release_tag": "r4.1",
+                    "release_type": "pre-release-rc",
+                    "release_date": None,
+                    "src_commit_sha": "a" * 40,
+                },
+                "dependencies": {
+                    "commonalities_release": "r4.2 (1.2.0-rc.1)",
+                    "identity_consent_management_release": "r4.3 (1.1.0)",
+                },
+                "apis": [
+                    {
+                        "api_name": "quality-on-demand",
+                        "api_version": "1.0.0-rc.2",
+                        "api_title": "Quality On Demand",
+                    },
+                ],
+            },
+        )
+        return tmp_path
+
+    def test_fallback_populates_context(self, repo_with_metadata):
+        ctx = build_validation_context(
+            repo_name="camaraproject/QualityOnDemand",
+            event_name="pull_request",
+            ref_name="release-review/r4.1-abc1234",
+            base_ref="release-snapshot/r4.1-abc1234",
+            repo_path=repo_with_metadata,
+            release_plan_schema_path=PLAN_SCHEMA,
+            release_metadata_schema_path=METADATA_SCHEMA,
+        )
+        assert ctx.is_release_review_pr is True
+        assert ctx.profile == "strict"
+        assert ctx.target_release_type == "pre-release-rc"
+        assert ctx.commonalities_release == "r4.2"
+        assert ctx.icm_release == "r4.3"
+        assert len(ctx.apis) == 1
+        assert ctx.apis[0].api_name == "quality-on-demand"
+        assert ctx.apis[0].target_api_version == "1.0.0-rc.2"
+        assert ctx.apis[0].target_api_status == "rc"
+
+    def test_no_fallback_when_release_plan_exists(self, repo_with_metadata):
+        """When release-plan.yaml exists, metadata fallback is not used."""
+        _write_yaml(
+            repo_with_metadata / "release-plan.yaml",
+            {
+                "repository": {
+                    "release_track": "meta-release",
+                    "meta_release": "Spring26",
+                    "target_release_tag": "r4.1",
+                    "target_release_type": "public-release",
+                },
+                "apis": [
+                    {
+                        "api_name": "quality-on-demand",
+                        "target_api_version": "1.0.0",
+                        "target_api_status": "public",
+                    },
+                ],
+            },
+        )
+        ctx = build_validation_context(
+            repo_name="camaraproject/QualityOnDemand",
+            event_name="pull_request",
+            ref_name="release-review/r4.1-abc1234",
+            base_ref="release-snapshot/r4.1-abc1234",
+            repo_path=repo_with_metadata,
+            release_plan_schema_path=PLAN_SCHEMA,
+            release_metadata_schema_path=METADATA_SCHEMA,
+        )
+        # Should use release-plan.yaml values, not metadata
+        assert ctx.target_release_type == "public-release"
+
+    def test_no_fallback_for_non_review_pr(self, tmp_path):
+        """Metadata fallback only activates for release-review PRs."""
+        _write_yaml(
+            tmp_path / "release-metadata.yaml",
+            {
+                "repository": {
+                    "repository_name": "Foo",
+                    "release_tag": "r4.1",
+                    "release_type": "pre-release-rc",
+                    "release_date": None,
+                    "src_commit_sha": "b" * 40,
+                },
+                "apis": [],
+            },
+        )
+        ctx = build_validation_context(
+            repo_name="camaraproject/Foo",
+            event_name="pull_request",
+            ref_name="fix/something",
+            base_ref="main",
+            repo_path=tmp_path,
+            release_plan_schema_path=PLAN_SCHEMA,
+            release_metadata_schema_path=METADATA_SCHEMA,
+        )
+        # Not a release review → no fallback → target_release_type stays None
+        assert ctx.is_release_review_pr is False
+        assert ctx.target_release_type is None
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 865864c3..ee48385b 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -284,20 +284,26 @@ def test_all_engines_called(
     @patch("validation.orchestrator.run_python_engine")
     @patch("validation.orchestrator.run_spectral_engine")
     @patch("validation.orchestrator.run_yamllint_engine")
-    def test_release_review_skips_yamllint_and_spectral(
+    def test_release_review_runs_all_engines(
         self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
     ):
-        mock_python.return_value = []
-        mock_gherkin.return_value = []
+        """All engines run on release-review PRs (DEC-011 revision)."""
+        mock_yamllint.return_value = [_make_finding(engine="yamllint")]
+        mock_spectral.return_value = [_make_finding(engine="spectral")]
+        mock_python.return_value = [_make_finding(engine="python")]
+        mock_gherkin.return_value = [_make_finding(engine="gherkin")]
         context = _make_context(is_release_review_pr=True)
         test_files = [Path("/repo/code/Test_definitions/test.feature")]
 
         findings, statuses = run_engines(Path("/repo"), paths, context, test_files)
 
-        assert not mock_yamllint.called
-        assert not mock_spectral.called
-        assert "skipped" in statuses["yamllint"]
-        assert "skipped" in statuses["spectral"]
+        assert mock_yamllint.called
+        assert mock_spectral.called
+        assert mock_python.called
+        assert mock_gherkin.called
+        assert len(findings) == 4
+        assert "skipped" not in statuses.get("yamllint", "")
+        assert "skipped" not in statuses.get("spectral", "")
 
     @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
diff --git a/validation/tests/test_release_metadata_parser.py b/validation/tests/test_release_metadata_parser.py
new file mode 100644
index 00000000..1ddf196d
--- /dev/null
+++ b/validation/tests/test_release_metadata_parser.py
@@ -0,0 +1,233 @@
+"""Unit tests for validation.context.release_metadata_parser."""
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from validation.context.release_metadata_parser import (
+    _derive_api_status,
+    _extract_release_tag,
+    load_release_metadata,
+    parse_release_metadata,
+)
+
+SCHEMA_PATH = (
+    Path(__file__).resolve().parent.parent
+    / "schemas"
+    / "release-metadata-schema.yaml"
+)
+
+
+def _write_yaml(path: Path, data) -> Path:
+    path.write_text(yaml.dump(data, default_flow_style=False), encoding="utf-8")
+    return path
+
+
+# ---------------------------------------------------------------------------
+# Fixtures
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture
+def schema_path():
+    return SCHEMA_PATH
+
+
+@pytest.fixture
+def full_metadata_dict():
+    return {
+        "repository": {
+            "repository_name": "QualityOnDemand",
+            "release_tag": "r4.1",
+            "release_type": "pre-release-rc",
+            "release_date": None,
+            "src_commit_sha": "a" * 40,
+        },
+        "dependencies": {
+            "commonalities_release": "r4.2 (1.2.0-rc.1)",
+            "identity_consent_management_release": "r4.3 (1.1.0)",
+        },
+        "apis": [
+            {
+                "api_name": "quality-on-demand",
+                "api_version": "1.0.0-rc.2",
+                "api_title": "Quality On Demand",
+            },
+            {
+                "api_name": "qos-booking",
+                "api_version": "0.5.0-alpha.1",
+                "api_title": "QoS Booking",
+            },
+        ],
+    }
+
+
+# ---------------------------------------------------------------------------
+# TestExtractReleaseTag
+# ---------------------------------------------------------------------------
+
+
+class TestExtractReleaseTag:
+    def test_enriched_format(self):
+        assert _extract_release_tag("r4.2 (1.2.0-rc.1)") == "r4.2"
+
+    def test_plain_tag(self):
+        assert _extract_release_tag("r4.2") == "r4.2"
+
+    def test_double_digit(self):
+        assert _extract_release_tag("r12.34 (2.0.0)") == "r12.34"
+
+    def test_none(self):
+        assert _extract_release_tag(None) is None
+
+    def test_empty(self):
+        assert _extract_release_tag("") is None
+
+    def test_invalid(self):
+        assert _extract_release_tag("not-a-tag") is None
+
+    def test_whitespace(self):
+        assert _extract_release_tag("  r4.2 (1.0.0)") == "r4.2"
+
+
+# ---------------------------------------------------------------------------
+# TestDeriveApiStatus
+# ---------------------------------------------------------------------------
+
+
+class TestDeriveApiStatus:
+    def test_alpha(self):
+        assert _derive_api_status("0.5.0-alpha.1") == "alpha"
+
+    def test_rc(self):
+        assert _derive_api_status("1.0.0-rc.2") == "rc"
+
+    def test_public(self):
+        assert _derive_api_status("1.0.0") == "public"
+
+    def test_public_patch(self):
+        assert _derive_api_status("2.1.3") == "public"
+
+    def test_initial_public(self):
+        assert _derive_api_status("0.1.0") == "public"
+
+
+# ---------------------------------------------------------------------------
+# TestParseReleaseMetadata
+# ---------------------------------------------------------------------------
+
+
+class TestParseReleaseMetadata:
+    def test_full_metadata(self, full_metadata_dict):
+        result = parse_release_metadata(full_metadata_dict)
+        assert result.target_release_type == "pre-release-rc"
+        assert result.commonalities_release == "r4.2"
+        assert result.icm_release == "r4.3"
+        assert len(result.apis) == 2
+        assert result.apis[0].api_name == "quality-on-demand"
+        assert result.apis[0].target_api_version == "1.0.0-rc.2"
+        assert result.apis[0].target_api_status == "rc"
+        assert result.apis[1].api_name == "qos-booking"
+        assert result.apis[1].target_api_version == "0.5.0-alpha.1"
+        assert result.apis[1].target_api_status == "alpha"
+
+    def test_metadata_without_dependencies(self):
+        data = {
+            "repository": {
+                "repository_name": "Foo",
+                "release_tag": "r4.1",
+                "release_type": "public-release",
+                "release_date": None,
+                "src_commit_sha": "b" * 40,
+            },
+            "apis": [
+                {
+                    "api_name": "foo-api",
+                    "api_version": "1.0.0",
+                    "api_title": "Foo API",
+                },
+            ],
+        }
+        result = parse_release_metadata(data)
+        assert result.target_release_type == "public-release"
+        assert result.commonalities_release is None
+        assert result.icm_release is None
+        assert len(result.apis) == 1
+        assert result.apis[0].target_api_status == "public"
+
+    def test_metadata_without_apis(self):
+        data = {
+            "repository": {
+                "repository_name": "Foo",
+                "release_tag": "r4.1",
+                "release_type": "public-release",
+                "release_date": None,
+                "src_commit_sha": "c" * 40,
+            },
+        }
+        result = parse_release_metadata(data)
+        assert result.apis == ()
+
+    def test_api_with_missing_fields_skipped(self):
+        data = {
+            "repository": {
+                "repository_name": "Foo",
+                "release_tag": "r4.1",
+                "release_type": "public-release",
+                "release_date": None,
+                "src_commit_sha": "d" * 40,
+            },
+            "apis": [
+                {"api_name": "good-api", "api_version": "1.0.0", "api_title": "Good"},
+                {"api_name": "no-version"},  # missing api_version
+            ],
+        }
+        result = parse_release_metadata(data)
+        assert len(result.apis) == 1
+        assert result.apis[0].api_name == "good-api"
+
+
+# ---------------------------------------------------------------------------
+# TestLoadReleaseMetadata
+# ---------------------------------------------------------------------------
+
+
+class TestLoadReleaseMetadata:
+    def test_load_valid(self, tmp_path, schema_path, full_metadata_dict):
+        metadata_file = _write_yaml(tmp_path / "release-metadata.yaml", full_metadata_dict)
+        result = load_release_metadata(metadata_file, schema_path)
+        assert result is not None
+        assert result.target_release_type == "pre-release-rc"
+        assert result.commonalities_release == "r4.2"
+        assert len(result.apis) == 2
+
+    def test_missing_file(self, tmp_path, schema_path):
+        result = load_release_metadata(tmp_path / "nonexistent.yaml", schema_path)
+        assert result is None
+
+    def test_empty_file(self, tmp_path, schema_path):
+        (tmp_path / "release-metadata.yaml").write_text("", encoding="utf-8")
+        result = load_release_metadata(tmp_path / "release-metadata.yaml", schema_path)
+        assert result is None
+
+    def test_invalid_yaml(self, tmp_path, schema_path):
+        (tmp_path / "release-metadata.yaml").write_text(
+            "{{invalid yaml", encoding="utf-8"
+        )
+        result = load_release_metadata(tmp_path / "release-metadata.yaml", schema_path)
+        assert result is None
+
+    def test_schema_violation_graceful(self, tmp_path, schema_path):
+        """Schema violations log warnings but still return parsed data."""
+        data = {
+            "repository": {"release_type": "public-release"},
+            # missing required fields — schema violation
+            "apis": [
+                {"api_name": "foo", "api_version": "1.0.0", "api_title": "Foo"},
+            ],
+        }
+        metadata_file = _write_yaml(tmp_path / "release-metadata.yaml", data)
+        result = load_release_metadata(metadata_file, schema_path)
+        assert result is not None
+        assert result.target_release_type == "public-release"

From 3385a82df0b7ac09db077a25df93ea0dcc7e4d10 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 30 Mar 2026 22:41:04 +0200
Subject: [PATCH 029/157] feat(release-automation): pre-snapshot validation
 gate and bundling

Part 2 of release automation handoff:

- Add pre-snapshot validation steps to create-snapshot job: checkout
  API repo, setup runtimes, run shared validation action with mode
  pre-snapshot, gate snapshot creation on validation result

- Add bundling to snapshot creator: detect external $ref in API specs,
  run redocly bundle before mechanical transformations, remove inlined
  common/modules directories

- Add Node 24 + Redocly CLI setup to create-snapshot composite action

- Clean up internal decision identifiers from workflow comments

1161 tests passing (596 validation + 565 release automation).
---
 .../workflows/release-automation-reusable.yml | 55 ++++++++++++++-
 .github/workflows/validation.yml              | 10 +--
 .../scripts/snapshot_creator.py               | 68 +++++++++++++++++++
 shared-actions/create-snapshot/action.yml     | 11 ++-
 validation/orchestrator.py                    |  6 +-
 validation/tests/test_orchestrator.py         |  2 +-
 6 files changed, 140 insertions(+), 12 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 9e247c27..863e39cd 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -787,13 +787,13 @@ jobs:
       needs.validate-command.outputs.allowed == 'true'
     runs-on: ubuntu-latest
     outputs:
-      success: ${{ steps.create.outputs.success }}
+      success: ${{ steps.create.outputs.success || steps.validation-gate.outputs.success }}
       snapshot_id: ${{ steps.create.outputs.snapshot_id }}
       snapshot_branch: ${{ steps.create.outputs.snapshot_branch }}
       release_review_branch: ${{ steps.create.outputs.release_review_branch }}
       release_pr_number: ${{ steps.create.outputs.release_pr_number }}
       release_pr_url: ${{ steps.create.outputs.release_pr_url }}
-      error_message: ${{ steps.create.outputs.error_message }}
+      error_message: ${{ steps.create.outputs.error_message || steps.validation-gate.outputs.error_message }}
       apis_json: ${{ steps.create.outputs.apis_json }}
     steps:
       - name: Generate App Token
@@ -815,6 +815,52 @@ jobs:
             release_automation/templates
             release_automation/config
             shared-actions/create-snapshot
+            shared-actions/run-validation
+            validation
+            linting/config
+
+      # ── Pre-snapshot validation ────────────────────────────────────
+      #
+      # Run the validation framework on the source branch before
+      # creating the snapshot.  Strict profile — errors and warnings
+      # block snapshot creation.
+
+      - name: Checkout repository for validation
+        uses: actions/checkout@v6
+        with:
+          path: _repo
+
+      - name: Setup Python
+        uses: actions/setup-python@v6
+        with:
+          python-version: "3.11"
+
+      - name: Setup Node
+        uses: actions/setup-node@v6
+        with:
+          node-version: "24"
+
+      - name: Run pre-snapshot validation
+        id: validation
+        uses: ./_tooling/shared-actions/run-validation
+        with:
+          repo_path: ${{ github.workspace }}/_repo
+          tooling_path: ${{ github.workspace }}/_tooling
+          mode: pre-snapshot
+          tooling_ref: ${{ needs.check-trigger.outputs.tooling_checkout_ref }}
+
+      - name: Gate on validation result
+        id: validation-gate
+        if: always() && steps.validation.outputs.should_fail == 'true'
+        run: |
+          echo "::error::Pre-snapshot validation failed — fix issues on main before re-running /create-snapshot"
+          echo "success=false" >> "$GITHUB_OUTPUT"
+          SUMMARY="${{ steps.validation.outputs.summary }}"
+          RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
+          echo "error_message=Pre-snapshot validation failed: ${SUMMARY}. See ${RUN_URL}" >> "$GITHUB_OUTPUT"
+          exit 1
+
+      # ── Snapshot creation ─────────────────────────────────────────
 
       - name: Resolve Bot Identity
         id: bot-identity
@@ -846,6 +892,11 @@ jobs:
       - name: Log Result
         if: always()
         run: |
+          echo "Pre-snapshot validation: ${{ steps.validation.outputs.result || 'skipped' }}"
+          if [ "${{ steps.validation-gate.outcome }}" == "failure" ]; then
+            echo "  Validation blocked snapshot creation"
+            echo "  Summary: ${{ steps.validation.outputs.summary }}"
+          fi
           echo "Create snapshot result:"
           echo "  Success: ${{ steps.create.outputs.success }}"
           echo "  Snapshot ID: ${{ steps.create.outputs.snapshot_id }}"
diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 52a696a3..9578cf89 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -230,9 +230,9 @@ jobs:
       # status in a single step.  Skipped entirely for non-PR events.
       #
       # Pre-snapshot validation runs via the shared run-validation
-      # action inside the RA workflow (DEC-022).  Release-review PRs
-      # use the standard token tiers below (validation app or
-      # GITHUB_TOKEN).  No RA-specific token tier needed.
+      # action inside the release automation workflow.  Release-review
+      # PRs use the standard token tiers below (validation app or
+      # GITHUB_TOKEN).  No release-automation-specific token needed.
       - name: Post findings to PR
         if: >-
           always() && steps.orchestrator.outcome == 'success'
@@ -341,8 +341,8 @@ jobs:
       # ── Step 15: Bundle API specs ─────────────────────────────────
       #
       # Produce standalone bundled specs for artifact download and
-      # release automation handoff (DEC-014).  Spectral resolves $ref
-      # natively (DEC-021), so this is an output step, not validation.
+      # release automation handoff.  Spectral resolves $ref natively,
+      # so this is an output step, not a validation prerequisite.
       - name: Bundle API specs
         if: always() && steps.orchestrator.outcome == 'success'
         env:
diff --git a/release_automation/scripts/snapshot_creator.py b/release_automation/scripts/snapshot_creator.py
index 6667fc1e..2c1c4863 100644
--- a/release_automation/scripts/snapshot_creator.py
+++ b/release_automation/scripts/snapshot_creator.py
@@ -5,9 +5,11 @@
 version calculation, mechanical transformations, and metadata generation.
 """
 
+import glob
 import os
 import re
 import shutil
+import subprocess
 import tempfile
 from copy import deepcopy
 from dataclasses import dataclass, field
@@ -239,6 +241,13 @@ def create_snapshot(
                 result.errors.append(wip_result.format_error_message())
                 return result
 
+            # Step 7c: Bundle API specs (resolve external $ref)
+            bundled_count = self._bundle_specs(temp_dir)
+            if bundled_count > 0:
+                result.warnings.append(
+                    f"Bundled {bundled_count} API spec(s) — external $ref resolved"
+                )
+
             # Step 8: Apply transformations
             context = TransformationContext(
                 release_tag=config.release_tag,
@@ -518,6 +527,65 @@ def generate_snapshot_id(self, release_tag: str, commit_sha: str) -> str:
         short_sha = commit_sha[: self.SHORT_SHA_LENGTH]
         return f"{release_tag}-{short_sha}"
 
+    def _bundle_specs(self, repo_path: str) -> int:
+        """Bundle API specs that contain external $ref references.
+
+        Scans each YAML file in ``code/API_definitions/`` for external
+        ``$ref`` (relative paths like ``../common/``).  For each file
+        that has them, runs ``redocly bundle`` to produce a standalone
+        spec (in-place replacement).  After bundling, removes the
+        ``code/common/`` and ``code/modules/`` directories since their
+        content is now inlined.
+
+        Returns the number of specs that were bundled.  Returns 0 if
+        no specs have external refs (no-op for repos using copy-paste).
+        """
+        api_dir = os.path.join(repo_path, "code", "API_definitions")
+        if not os.path.isdir(api_dir):
+            return 0
+
+        specs = glob.glob(os.path.join(api_dir, "*.yaml"))
+        if not specs:
+            return 0
+
+        # Detect which specs have external $ref
+        external_ref_re = re.compile(r'\$ref\s*:\s*["\']?\.\.')
+        specs_to_bundle = []
+        for spec in specs:
+            with open(spec, encoding="utf-8") as f:
+                content = f.read()
+            if external_ref_re.search(content):
+                specs_to_bundle.append(spec)
+
+        if not specs_to_bundle:
+            return 0
+
+        # Bundle each spec in-place
+        for spec in specs_to_bundle:
+            name = os.path.basename(spec)
+            print(f"Bundling {name}...")
+            result = subprocess.run(
+                ["redocly", "bundle", spec, "-o", spec],
+                capture_output=True,
+                text=True,
+                cwd=repo_path,
+                timeout=60,
+            )
+            if result.returncode != 0:
+                stderr = result.stderr.strip()
+                raise RuntimeError(
+                    f"Bundling failed for {name}: {stderr or result.stdout.strip()}"
+                )
+
+        # Remove common/modules directories (content now inlined)
+        for subdir in ("common", "modules"):
+            path = os.path.join(repo_path, "code", subdir)
+            if os.path.isdir(path):
+                shutil.rmtree(path)
+                print(f"Removed code/{subdir}/ (content inlined by bundling)")
+
+        return len(specs_to_bundle)
+
     def _extract_api_titles(
         self,
         release_plan: Dict[str, Any],
diff --git a/shared-actions/create-snapshot/action.yml b/shared-actions/create-snapshot/action.yml
index a7a02e89..3a209678 100644
--- a/shared-actions/create-snapshot/action.yml
+++ b/shared-actions/create-snapshot/action.yml
@@ -68,10 +68,19 @@ runs:
       with:
         python-version: '3.11'
 
-    - name: Install dependencies
+    - name: Install Python dependencies
       shell: bash
       run: pip install --quiet pyyaml pystache
 
+    - name: Setup Node
+      uses: actions/setup-node@v6
+      with:
+        node-version: '24'
+
+    - name: Install Redocly CLI
+      shell: bash
+      run: npm install -g @redocly/cli@^1.31.0
+
     - name: Create Snapshot
       id: create
       shell: python
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index c56da2ab..74e32df8 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -258,9 +258,9 @@ def run_engines(
             logger.error("gherkin-lint failed: %s", exc)
 
     # --- Bundling ---
-    # Spectral resolves external $ref natively (DEC-021), so bundling is not
-    # a validation prerequisite.  Bundled standalone specs are produced by a
-    # separate workflow step for artifact upload and release automation handoff.
+    # Spectral resolves external $ref natively, so bundling is not a
+    # validation prerequisite.  Bundled standalone specs are produced by
+    # a separate workflow step for artifact upload and handoff.
     engine_statuses["bundling"] = "separate workflow step"
 
     return all_findings, engine_statuses
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index ee48385b..357c15a4 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -287,7 +287,7 @@ def test_all_engines_called(
     def test_release_review_runs_all_engines(
         self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
     ):
-        """All engines run on release-review PRs (DEC-011 revision)."""
+        """All engines run on release-review PRs (full-scope validation)."""
         mock_yamllint.return_value = [_make_finding(engine="yamllint")]
         mock_spectral.return_value = [_make_finding(engine="spectral")]
         mock_python.return_value = [_make_finding(engine="python")]

From 36c95c5e0d7324a5822a7543b7c8ae58947ce5bb Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 30 Mar 2026 22:42:41 +0200
Subject: [PATCH 030/157] refactor(validation): use shared run-validation
 action in workflow
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Part 3 of release automation handoff:

- Replace inline dep installation + orchestrator invocation with
  shared-actions/run-validation composite action call
- Remove duplicate summary writing step (handled by action)
- Simplify result check to use action output (should_fail)
- Update all step references from orchestrator to validation

Workflow is thinner: setup → action → PR-specific output surfaces.
Same action is used by release automation for pre-snapshot gate.

596 tests passing (no behavioral change).
---
 .github/workflows/validation.yml | 88 +++++++++++---------------------
 1 file changed, 29 insertions(+), 59 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 9578cf89..265729c2 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -150,16 +150,7 @@ jobs:
         with:
           node-version: "24"
 
-      # ── Step 6: Install Python dependencies ────────────────────────
-      - name: Install Python dependencies
-        run: pip install --quiet pyyaml==6.0.3 jsonschema==4.26.0 yamllint==1.38.0
-
-      # ── Step 7: Install Node dependencies ──────────────────────────
-      - name: Install Node dependencies
-        run: npm ci --ignore-scripts
-        working-directory: .tooling/validation
-
-      # ── Step 8: Detect release-plan changes (PR only) ──────────────
+      # ── Step 6: Detect release-plan changes (PR only) ──────────────
       - name: Detect release-plan changes
         id: detect-changes
         if: github.event_name == 'pull_request'
@@ -170,52 +161,34 @@ jobs:
             echo "release_plan_changed=false" >> "$GITHUB_OUTPUT"
           fi
 
-      # ── Step 9: Run validation orchestrator ────────────────────────
+      # ── Step 7: Run validation (shared action) ─────────────────────
+      #
+      # The run-validation action installs dependencies, runs the Python
+      # orchestrator, writes the workflow summary, and outputs the result.
       - name: Run validation
-        id: orchestrator
-        env:
-          PYTHONPATH: ${{ github.workspace }}/.tooling
-          PATH_NODE_MODULES: ${{ github.workspace }}/.tooling/validation/node_modules/.bin
-          VALIDATION_REPO_PATH: ${{ github.workspace }}
-          VALIDATION_TOOLING_PATH: ${{ github.workspace }}/.tooling
-          VALIDATION_OUTPUT_DIR: ${{ github.workspace }}/validation-output
-          VALIDATION_REPO_NAME: ${{ github.repository }}
-          VALIDATION_REPO_OWNER: ${{ github.repository_owner }}
-          VALIDATION_EVENT_NAME: ${{ github.event_name }}
-          VALIDATION_REF_NAME: ${{ github.ref_name }}
-          VALIDATION_BASE_REF: ${{ github.base_ref }}
-          VALIDATION_MODE: ${{ inputs.mode }}
-          VALIDATION_PROFILE: ${{ inputs.profile }}
-          VALIDATION_PR_NUMBER: ${{ github.event.pull_request.number }}
-          VALIDATION_RELEASE_PLAN_CHANGED: ${{ steps.detect-changes.outputs.release_plan_changed }}
-          VALIDATION_WORKFLOW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
-          VALIDATION_TOOLING_REF: ${{ steps.resolve-ref.outputs.tooling_checkout_ref }}
-          VALIDATION_COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
-        run: |
-          export PATH="${PATH_NODE_MODULES}:${PATH}"
-          python -m validation.orchestrator
-
-      # ── Step 10: Emit annotations (PR only) ────────────────────────
+        id: validation
+        uses: ./.tooling/shared-actions/run-validation
+        with:
+          repo_path: ${{ github.workspace }}
+          tooling_path: ${{ github.workspace }}/.tooling
+          mode: ${{ inputs.mode }}
+          profile: ${{ inputs.profile }}
+          release_plan_changed: ${{ steps.detect-changes.outputs.release_plan_changed || 'false' }}
+          tooling_ref: ${{ steps.resolve-ref.outputs.tooling_checkout_ref }}
+
+      # ── Step 8: Emit annotations (PR only) ────────────────────────
       - name: Emit annotations
-        if: always() && steps.orchestrator.outcome == 'success' && github.event_name == 'pull_request'
+        if: always() && steps.validation.outcome == 'success' && github.event_name == 'pull_request'
         run: |
           if [ -f validation-output/annotations.txt ]; then
             cat validation-output/annotations.txt
           fi
 
-      # ── Step 11: Write workflow summary ────────────────────────────
-      - name: Write workflow summary
-        if: always() && steps.orchestrator.outcome == 'success'
-        run: |
-          if [ -f validation-output/summary.md ]; then
-            cat validation-output/summary.md >> "$GITHUB_STEP_SUMMARY"
-          fi
-
-      # ── Step 12: Mint validation app token (PR only) ─────────────
+      # ── Step 9: Mint validation app token (PR only) ──────────────
       - name: Mint validation app token
         id: mint-token
         if: >-
-          always() && steps.orchestrator.outcome == 'success'
+          always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
           && vars.VALIDATION_APP_ID != ''
         continue-on-error: true
@@ -235,7 +208,7 @@ jobs:
       # GITHUB_TOKEN).  No release-automation-specific token needed.
       - name: Post findings to PR
         if: >-
-          always() && steps.orchestrator.outcome == 'success'
+          always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
         uses: actions/github-script@v8
         with:
@@ -330,7 +303,7 @@ jobs:
 
       # ── Step 14: Upload diagnostics ────────────────────────────────
       - name: Upload diagnostics
-        if: always() && steps.orchestrator.outcome == 'success'
+        if: always() && steps.validation.outcome == 'success'
         uses: actions/upload-artifact@v6
         with:
           name: validation-diagnostics
@@ -344,7 +317,7 @@ jobs:
       # release automation handoff.  Spectral resolves $ref natively,
       # so this is an output step, not a validation prerequisite.
       - name: Bundle API specs
-        if: always() && steps.orchestrator.outcome == 'success'
+        if: always() && steps.validation.outcome == 'success'
         env:
           PATH_NODE_MODULES: ${{ github.workspace }}/.tooling/validation/node_modules/.bin
         run: |
@@ -369,7 +342,7 @@ jobs:
 
       # ── Step 16: Upload bundled specs ──────────────────────────────
       - name: Upload bundled specs
-        if: always() && steps.orchestrator.outcome == 'success'
+        if: always() && steps.validation.outcome == 'success'
         uses: actions/upload-artifact@v6
         with:
           name: validation-bundled-specs
@@ -377,21 +350,18 @@ jobs:
           if-no-files-found: ignore
           retention-days: 90
 
-      # ── Step 17: Check result ──────────────────────────────────────
+      # ── Check result ────────────────────────────────────────────────
       - name: Check result
-        if: always() && steps.orchestrator.outcome == 'success'
+        if: always() && steps.validation.outcome == 'success'
         run: |
-          if [ -f validation-output/result.json ]; then
-            SHOULD_FAIL=$(python3 -c "import json; r=json.load(open('validation-output/result.json')); print(r.get('should_fail', False))")
-            if [ "$SHOULD_FAIL" = "True" ]; then
-              echo "::error::Validation failed — see summary and annotations above"
-              exit 1
-            fi
+          if [ "${{ steps.validation.outputs.should_fail }}" = "true" ]; then
+            echo "::error::Validation failed — see summary and annotations above"
+            exit 1
           fi
 
       # ── Fallback: orchestrator infrastructure error ────────────────
       - name: Report orchestrator failure
-        if: always() && steps.orchestrator.outcome == 'failure'
+        if: always() && steps.validation.outcome == 'failure'
         run: |
           echo "## CAMARA Validation" >> "$GITHUB_STEP_SUMMARY"
           echo "" >> "$GITHUB_STEP_SUMMARY"

From 8f6b7e791e8c494bfc038b91c6f616207b980c84 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 31 Mar 2026 09:24:09 +0200
Subject: [PATCH 031/157] chore(deps): bump actions/create-github-app-token
 from v2 to v3
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Aligns with camaraproject/tooling#141. No breaking change impact —
no proxy usage, GitHub-hosted runners already support Node 24.
Also renames caller workflow to "CAMARA Release Automation".
---
 .../workflows/release-automation-reusable.yml | 22 +++++++++----------
 .github/workflows/validation.yml              |  2 +-
 .../workflows/release-automation-caller.yml   |  2 +-
 3 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 863e39cd..1831e48a 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -423,7 +423,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: steps.decide.outputs.action == 'post_comment' && vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -799,7 +799,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -923,7 +923,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1033,7 +1033,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1161,7 +1161,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1240,7 +1240,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1378,7 +1378,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1597,7 +1597,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1838,7 +1838,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -1947,7 +1947,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
@@ -2118,7 +2118,7 @@ jobs:
       - name: Generate App Token
         id: app-token
         if: vars.RELEASE_APP_ID != ''
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.RELEASE_APP_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 265729c2..6e61c4d3 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -192,7 +192,7 @@ jobs:
           && github.event_name == 'pull_request'
           && vars.VALIDATION_APP_ID != ''
         continue-on-error: true
-        uses: actions/create-github-app-token@v2
+        uses: actions/create-github-app-token@v3
         with:
           app-id: ${{ vars.VALIDATION_APP_ID }}
           private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
diff --git a/release_automation/workflows/release-automation-caller.yml b/release_automation/workflows/release-automation-caller.yml
index 1632410b..e9a8f38a 100644
--- a/release_automation/workflows/release-automation-caller.yml
+++ b/release_automation/workflows/release-automation-caller.yml
@@ -12,7 +12,7 @@
 # - Push to main: when release-plan.yaml changes (auto sync-issue)
 # - Manual: workflow_dispatch triggers sync-issue (reads from release-plan.yaml)
 
-name: Release Automation
+name: CAMARA Release Automation
 
 on:
   # Slash commands via issue comments

From f3a5e28dc87128c92e640fc042d6c0297e72a456 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 31 Mar 2026 11:43:02 +0200
Subject: [PATCH 032/157] feat(validation): configurable profiles and
 simplified stages

Replace hardcoded profile selection with per-repo configuration:
- Stage enum: disabled/advisory/standard -> disabled/advisory/enabled
- New config fields: pr_profile and release_profile per repo and defaults
- select_profile() now config-driven instead of trigger-based
- Fork override forces both profiles to standard
- Release-automation and release-review PRs default to standard profile

This allows repos to graduate to strict individually once rules are
reviewed, rather than blocking all repos with untuned warnings.
---
 .../workflows/release-automation-reusable.yml |   4 +-
 validation/config/config_gate.py              |  29 ++-
 validation/config/validation-config.yaml      |   2 +-
 validation/context/context_builder.py         |  25 ++-
 validation/orchestrator.py                    |   7 +-
 .../schemas/validation-config-schema.yaml     |  32 ++-
 validation/tests/test_config_gate.py          | 186 ++++++++++++++++--
 validation/tests/test_context_builder.py      |  92 +++++++--
 validation/tests/test_orchestrator.py         |  12 +-
 validation/tests/test_output_commit_status.py |   2 +-
 validation/tests/test_output_diagnostics.py   |   2 +-
 validation/tests/test_output_pr_comment.py    |   2 +-
 .../tests/test_output_workflow_summary.py     |   2 +-
 .../tests/test_postfilter_conditions.py       |   2 +-
 validation/tests/test_postfilter_engine.py    |   2 +-
 validation/tests/test_postfilter_levels.py    |   2 +-
 validation/tests/test_python_adapter.py       |   2 +-
 .../tests/test_python_checks_changelog.py     |   2 +-
 .../tests/test_python_checks_filename.py      |   2 +-
 .../tests/test_python_checks_metadata.py      |   2 +-
 .../tests/test_python_checks_release_plan.py  |   2 +-
 .../test_python_checks_release_review.py      |   2 +-
 validation/tests/test_python_checks_test.py   |   2 +-
 .../tests/test_python_checks_version.py       |   2 +-
 24 files changed, 348 insertions(+), 71 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 1831e48a..70cda467 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -822,8 +822,8 @@ jobs:
       # ── Pre-snapshot validation ────────────────────────────────────
       #
       # Run the validation framework on the source branch before
-      # creating the snapshot.  Strict profile — errors and warnings
-      # block snapshot creation.
+      # creating the snapshot.  Profile from per-repo config
+      # (default: standard — errors block snapshot creation).
 
       - name: Checkout repository for validation
         uses: actions/checkout@v6
diff --git a/validation/config/config_gate.py b/validation/config/config_gate.py
index bee25f27..d7a99434 100644
--- a/validation/config/config_gate.py
+++ b/validation/config/config_gate.py
@@ -25,7 +25,7 @@
 
 STAGE_DISABLED = "disabled"
 STAGE_ADVISORY = "advisory"
-STAGE_STANDARD = "standard"
+STAGE_ENABLED = "enabled"
 
 # ---------------------------------------------------------------------------
 # Exceptions
@@ -65,6 +65,8 @@ class StageGateResult:
     reason: str = ""
     is_fork: bool = False
     fork_override_applied: bool = False
+    pr_profile: str = "standard"
+    release_profile: str = "standard"
 
 
 # ---------------------------------------------------------------------------
@@ -147,9 +149,26 @@ def resolve_stage(
     if is_fork:
         fork_owners = config.get("fork_owners") or []
         if repo_owner in fork_owners:
-            stage = STAGE_STANDARD
+            stage = STAGE_ENABLED
             fork_override_applied = True
 
+    # Resolve profiles from config
+    defaults = config.get("defaults") or {}
+    if fork_override_applied:
+        pr_profile = "standard"
+        release_profile = "standard"
+    else:
+        pr_profile = (
+            (repo_entry or {}).get("pr_profile")
+            or defaults.get("pr_profile")
+            or "standard"
+        )
+        release_profile = (
+            (repo_entry or {}).get("release_profile")
+            or defaults.get("release_profile")
+            or "standard"
+        )
+
     # Steps 5-7: gate decisions
     if stage == STAGE_DISABLED:
         return StageGateResult(
@@ -158,6 +177,8 @@ def resolve_stage(
             reason="Validation is not enabled for this repository",
             is_fork=is_fork,
             fork_override_applied=fork_override_applied,
+            pr_profile=pr_profile,
+            release_profile=release_profile,
         )
 
     if stage == STAGE_ADVISORY and trigger_type == "pull_request":
@@ -170,6 +191,8 @@ def resolve_stage(
             ),
             is_fork=is_fork,
             fork_override_applied=fork_override_applied,
+            pr_profile=pr_profile,
+            release_profile=release_profile,
         )
 
     return StageGateResult(
@@ -177,6 +200,8 @@ def resolve_stage(
         should_continue=True,
         is_fork=is_fork,
         fork_override_applied=fork_override_applied,
+        pr_profile=pr_profile,
+        release_profile=release_profile,
     )
 
 
diff --git a/validation/config/validation-config.yaml b/validation/config/validation-config.yaml
index 9bc81586..f47fb572 100644
--- a/validation/config/validation-config.yaml
+++ b/validation/config/validation-config.yaml
@@ -13,4 +13,4 @@ fork_owners:
 
 repositories:
   ReleaseTest:
-    stage: standard
+    stage: enabled
diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py
index 6245c0ae..671c0332 100644
--- a/validation/context/context_builder.py
+++ b/validation/context/context_builder.py
@@ -196,16 +196,18 @@ def select_profile(
     branch_type: str,
     is_release_review_pr: bool,
     profile_override: str = "",
+    pr_profile: str = "standard",
+    release_profile: str = "standard",
 ) -> str:
     """Auto-select the validation profile.
 
     If *profile_override* is a valid profile name it takes precedence.
 
-    Profile selection table (design doc section 8.1):
-        dispatch / local         → advisory
-        release-automation       → strict
-        pr + release + review    → strict
-        pr + any other           → standard
+    Profile selection (DEC-023):
+        dispatch / local              → advisory (hardcoded)
+        release-automation            → release_profile from config
+        pr + release + review         → release_profile from config
+        pr + any other                → pr_profile from config
     """
     if profile_override and profile_override in _VALID_PROFILES:
         return profile_override
@@ -213,11 +215,11 @@ def select_profile(
     if trigger_type in (TRIGGER_DISPATCH, TRIGGER_LOCAL):
         return PROFILE_ADVISORY
     if trigger_type == TRIGGER_RELEASE_AUTOMATION:
-        return PROFILE_STRICT
+        return release_profile
     # trigger_type == TRIGGER_PR
     if branch_type == BRANCH_RELEASE and is_release_review_pr:
-        return PROFILE_STRICT
-    return PROFILE_STANDARD
+        return release_profile
+    return pr_profile
 
 
 def derive_api_maturity(target_api_version: str) -> str:
@@ -250,6 +252,8 @@ def build_validation_context(
     mode: str = "",
     profile_override: str = "",
     stage: str = "",
+    pr_profile: str = "standard",
+    release_profile: str = "standard",
     pr_number: Optional[int] = None,
     release_plan_changed: Optional[bool] = None,
     repo_path: Optional[Path] = None,
@@ -272,7 +276,10 @@ def build_validation_context(
     is_review = is_release_review_pr_check(base_ref) if base_ref else False
 
     # Profile selection
-    profile = select_profile(trigger_type, branch_type, is_review, profile_override)
+    profile = select_profile(
+        trigger_type, branch_type, is_review, profile_override,
+        pr_profile=pr_profile, release_profile=release_profile,
+    )
 
     # Release plan
     target_release_type: Optional[str] = None
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index 74e32df8..7266f12f 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -390,11 +390,14 @@ def main() -> int:
         trigger_type=args.event_name,
     )
     logger.info(
-        "Config gate: stage=%s continue=%s fork=%s override=%s",
+        "Config gate: stage=%s continue=%s fork=%s override=%s "
+        "pr_profile=%s release_profile=%s",
         stage_result.stage,
         stage_result.should_continue,
         stage_result.is_fork,
         stage_result.fork_override_applied,
+        stage_result.pr_profile,
+        stage_result.release_profile,
     )
     if not stage_result.should_continue:
         write_skip_output(args.output_dir, stage_result.reason)
@@ -411,6 +414,8 @@ def main() -> int:
         mode=args.mode,
         profile_override=args.profile,
         stage=stage_result.stage,
+        pr_profile=stage_result.pr_profile,
+        release_profile=stage_result.release_profile,
         pr_number=args.pr_number,
         release_plan_changed=args.release_plan_changed,
         repo_path=args.repo_path,
diff --git a/validation/schemas/validation-config-schema.yaml b/validation/schemas/validation-config-schema.yaml
index 97e0821d..ac30f5dd 100644
--- a/validation/schemas/validation-config-schema.yaml
+++ b/validation/schemas/validation-config-schema.yaml
@@ -25,11 +25,24 @@ properties:
     properties:
       stage:
         type: string
-        enum: [disabled, advisory, standard]
+        enum: [disabled, advisory, enabled]
         description: >
           Default stage for repositories not listed under "repositories".
           disabled = dark (exit immediately), advisory = dispatch only,
-          standard = PRs and dispatch.
+          enabled = PRs and dispatch.
+      pr_profile:
+        type: string
+        enum: [advisory, standard, strict]
+        description: >
+          Default profile for PR-triggered validation.
+          advisory = nothing blocks, standard = errors block,
+          strict = errors and warnings block.
+      release_profile:
+        type: string
+        enum: [advisory, standard, strict]
+        description: >
+          Default profile for release-automation and release-review
+          validation.
     additionalProperties: false
 
   fork_owners:
@@ -39,7 +52,7 @@ properties:
     description: >
       GitHub usernames allowed to test in forks.  When the workflow runs
       in a fork owned by a listed user, the stage is overridden to
-      "standard" regardless of the upstream repo's configured stage.
+      "enabled" regardless of the upstream repo's configured stage.
 
   repositories:
     type: object
@@ -50,7 +63,18 @@ properties:
       properties:
         stage:
           type: string
-          enum: [disabled, advisory, standard]
+          enum: [disabled, advisory, enabled]
+        pr_profile:
+          type: string
+          enum: [advisory, standard, strict]
+          description: >
+            Profile for PR-triggered validation on this repository.
+        release_profile:
+          type: string
+          enum: [advisory, standard, strict]
+          description: >
+            Profile for release-automation and release-review validation
+            on this repository.
       additionalProperties: false
 
 additionalProperties: false
diff --git a/validation/tests/test_config_gate.py b/validation/tests/test_config_gate.py
index 1be22da8..f00d52c7 100644
--- a/validation/tests/test_config_gate.py
+++ b/validation/tests/test_config_gate.py
@@ -36,7 +36,7 @@ def sample_config():
         "fork_owners": ["hdamker", "rartych"],
         "repositories": {
             "ReleaseTest": {"stage": "advisory"},
-            "QualityOnDemand": {"stage": "standard"},
+            "QualityOnDemand": {"stage": "enabled"},
         },
     }
 
@@ -82,6 +82,13 @@ def test_invalid_stage_rejected(self, tmp_path, schema_path):
         with pytest.raises(ConfigValidationError):
             load_and_validate_config(cfg_path, schema_path)
 
+    def test_old_standard_stage_rejected(self, tmp_path, schema_path):
+        """The old 'standard' stage value is no longer valid."""
+        cfg = {"version": 1, "defaults": {"stage": "standard"}}
+        cfg_path = _write_yaml(tmp_path / "config.yaml", cfg)
+        with pytest.raises(ConfigValidationError):
+            load_and_validate_config(cfg_path, schema_path)
+
     def test_missing_defaults_rejected(self, tmp_path, schema_path):
         cfg = {"version": 1}
         cfg_path = _write_yaml(tmp_path / "config.yaml", cfg)
@@ -110,6 +117,47 @@ def test_multiple_errors_collected(self, tmp_path, schema_path):
             load_and_validate_config(cfg_path, schema_path)
         assert len(exc_info.value.errors) >= 2
 
+    def test_profiles_in_defaults_accepted(self, tmp_path, schema_path):
+        """Default profiles are valid optional fields."""
+        cfg = {
+            "version": 1,
+            "defaults": {
+                "stage": "disabled",
+                "pr_profile": "strict",
+                "release_profile": "advisory",
+            },
+        }
+        cfg_path = _write_yaml(tmp_path / "config.yaml", cfg)
+        result = load_and_validate_config(cfg_path, schema_path)
+        assert result["defaults"]["pr_profile"] == "strict"
+
+    def test_profiles_in_repo_accepted(self, tmp_path, schema_path):
+        """Per-repo profiles are valid optional fields."""
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "disabled"},
+            "repositories": {
+                "TestRepo": {
+                    "stage": "enabled",
+                    "pr_profile": "advisory",
+                    "release_profile": "strict",
+                },
+            },
+        }
+        cfg_path = _write_yaml(tmp_path / "config.yaml", cfg)
+        result = load_and_validate_config(cfg_path, schema_path)
+        assert result["repositories"]["TestRepo"]["pr_profile"] == "advisory"
+
+    def test_invalid_profile_value_rejected(self, tmp_path, schema_path):
+        """Profile values must be advisory/standard/strict."""
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "disabled", "pr_profile": "blocking"},
+        }
+        cfg_path = _write_yaml(tmp_path / "config.yaml", cfg)
+        with pytest.raises(ConfigValidationError):
+            load_and_validate_config(cfg_path, schema_path)
+
 
 # ---------------------------------------------------------------------------
 # TestResolveStage
@@ -155,7 +203,7 @@ def test_advisory_dispatch_continues(self, sample_config):
         assert result.should_continue is True
         assert result.stage == "advisory"
 
-    def test_standard_continues(self, sample_config):
+    def test_enabled_continues(self, sample_config):
         result = resolve_stage(
             sample_config,
             "camaraproject/QualityOnDemand",
@@ -163,16 +211,16 @@ def test_standard_continues(self, sample_config):
             "pull_request",
         )
         assert result.should_continue is True
-        assert result.stage == "standard"
+        assert result.stage == "enabled"
 
-    def test_fork_listed_owner_overrides_to_standard(self, sample_config):
+    def test_fork_listed_owner_overrides_to_enabled(self, sample_config):
         result = resolve_stage(
             sample_config,
             "hdamker/QualityOnDemand",
             "hdamker",
             "pull_request",
         )
-        assert result.stage == "standard"
+        assert result.stage == "enabled"
         assert result.is_fork is True
         assert result.fork_override_applied is True
         assert result.should_continue is True
@@ -184,9 +232,9 @@ def test_fork_unlisted_owner_keeps_resolved_stage(self, sample_config):
             "unknown-user",
             "pull_request",
         )
-        # QualityOnDemand is "standard" in config, but no fork override for
+        # QualityOnDemand is "enabled" in config, but no fork override for
         # an unlisted owner — the stage stays as looked up.
-        assert result.stage == "standard"
+        assert result.stage == "enabled"
         assert result.is_fork is True
         assert result.fork_override_applied is False
 
@@ -214,10 +262,10 @@ def test_repo_name_extracted_from_full_name(self, sample_config):
             "camaraproject",
             "pull_request",
         )
-        assert result.stage == "standard"
+        assert result.stage == "enabled"
 
     def test_fork_disabled_unlisted_stays_disabled(self, sample_config):
-        """Unlisted fork owner of a disabled repo → stays disabled."""
+        """Unlisted fork owner of a disabled repo -> stays disabled."""
         result = resolve_stage(
             sample_config,
             "stranger/UnknownRepo",
@@ -229,15 +277,15 @@ def test_fork_disabled_unlisted_stays_disabled(self, sample_config):
         assert result.is_fork is True
         assert result.fork_override_applied is False
 
-    def test_fork_disabled_listed_overrides_to_standard(self, sample_config):
-        """Listed fork owner of a disabled repo → overrides to standard."""
+    def test_fork_disabled_listed_overrides_to_enabled(self, sample_config):
+        """Listed fork owner of a disabled repo -> overrides to enabled."""
         result = resolve_stage(
             sample_config,
             "hdamker/UnknownRepo",
             "hdamker",
             "workflow_dispatch",
         )
-        assert result.stage == "standard"
+        assert result.stage == "enabled"
         assert result.should_continue is True
         assert result.fork_override_applied is True
 
@@ -259,9 +307,119 @@ def test_no_fork_owners_key(self):
         assert result.fork_override_applied is False
 
     def test_no_repositories_key(self):
-        cfg = {"version": 1, "defaults": {"stage": "standard"}}
+        cfg = {"version": 1, "defaults": {"stage": "enabled"}}
         result = resolve_stage(
             cfg, "camaraproject/AnyRepo", "camaraproject", "pull_request"
         )
-        assert result.stage == "standard"
+        assert result.stage == "enabled"
         assert result.should_continue is True
+
+
+# ---------------------------------------------------------------------------
+# TestResolveStageProfiles
+# ---------------------------------------------------------------------------
+
+
+class TestResolveStageProfiles:
+    """Tests for profile resolution in resolve_stage."""
+
+    def test_defaults_to_standard_when_absent(self):
+        """No profiles anywhere -> both default to standard."""
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "enabled"},
+            "repositories": {"Repo": {"stage": "enabled"}},
+        }
+        result = resolve_stage(cfg, "camaraproject/Repo", "camaraproject", "pull_request")
+        assert result.pr_profile == "standard"
+        assert result.release_profile == "standard"
+
+    def test_pr_profile_from_repo_config(self):
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "disabled"},
+            "repositories": {
+                "Repo": {"stage": "enabled", "pr_profile": "advisory"},
+            },
+        }
+        result = resolve_stage(cfg, "camaraproject/Repo", "camaraproject", "pull_request")
+        assert result.pr_profile == "advisory"
+        assert result.release_profile == "standard"  # not set -> default
+
+    def test_release_profile_from_repo_config(self):
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "disabled"},
+            "repositories": {
+                "Repo": {"stage": "enabled", "release_profile": "strict"},
+            },
+        }
+        result = resolve_stage(cfg, "camaraproject/Repo", "camaraproject", "pull_request")
+        assert result.release_profile == "strict"
+        assert result.pr_profile == "standard"  # not set -> default
+
+    def test_profiles_fallback_to_defaults(self):
+        """Repo entry has no profiles -> fall back to defaults section."""
+        cfg = {
+            "version": 1,
+            "defaults": {
+                "stage": "disabled",
+                "pr_profile": "advisory",
+                "release_profile": "strict",
+            },
+            "repositories": {"Repo": {"stage": "enabled"}},
+        }
+        result = resolve_stage(cfg, "camaraproject/Repo", "camaraproject", "pull_request")
+        assert result.pr_profile == "advisory"
+        assert result.release_profile == "strict"
+
+    def test_repo_profile_overrides_defaults(self):
+        """Repo-level profile wins over defaults."""
+        cfg = {
+            "version": 1,
+            "defaults": {
+                "stage": "disabled",
+                "pr_profile": "advisory",
+                "release_profile": "advisory",
+            },
+            "repositories": {
+                "Repo": {
+                    "stage": "enabled",
+                    "pr_profile": "strict",
+                    "release_profile": "strict",
+                },
+            },
+        }
+        result = resolve_stage(cfg, "camaraproject/Repo", "camaraproject", "pull_request")
+        assert result.pr_profile == "strict"
+        assert result.release_profile == "strict"
+
+    def test_fork_override_forces_profiles_to_standard(self):
+        """Fork override sets both profiles to standard regardless of config."""
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "disabled"},
+            "fork_owners": ["hdamker"],
+            "repositories": {
+                "Repo": {
+                    "stage": "enabled",
+                    "pr_profile": "strict",
+                    "release_profile": "strict",
+                },
+            },
+        }
+        result = resolve_stage(cfg, "hdamker/Repo", "hdamker", "pull_request")
+        assert result.fork_override_applied is True
+        assert result.pr_profile == "standard"
+        assert result.release_profile == "standard"
+
+    def test_profiles_present_even_when_disabled(self):
+        """Profiles are resolved even when stage is disabled (for diagnostics)."""
+        cfg = {
+            "version": 1,
+            "defaults": {"stage": "disabled", "pr_profile": "strict"},
+            "repositories": {},
+        }
+        result = resolve_stage(cfg, "camaraproject/Repo", "camaraproject", "pull_request")
+        assert result.should_continue is False
+        assert result.pr_profile == "strict"
diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py
index b6240250..75b86e52 100644
--- a/validation/tests/test_context_builder.py
+++ b/validation/tests/test_context_builder.py
@@ -74,27 +74,69 @@ def test_unknown_event_fallback(self):
 
 
 class TestSelectProfile:
-    def test_dispatch_gets_advisory(self):
-        assert select_profile("dispatch", "main", False) == "advisory"
-
-    def test_release_automation_gets_strict(self):
-        assert select_profile("release-automation", "main", False) == "strict"
+    """Profile selection tests per DEC-023: config-driven profiles."""
 
-    def test_pr_release_review_gets_strict(self):
-        assert select_profile("pr", "release", True) == "strict"
+    def test_dispatch_gets_advisory(self):
+        """Dispatch always returns advisory regardless of config profiles."""
+        assert select_profile(
+            "dispatch", "main", False,
+            pr_profile="strict", release_profile="strict",
+        ) == "advisory"
 
-    def test_pr_main_gets_standard(self):
+    def test_local_gets_advisory(self):
+        """Local always returns advisory regardless of config profiles."""
+        assert select_profile(
+            "local", "main", False,
+            pr_profile="strict", release_profile="strict",
+        ) == "advisory"
+
+    def test_release_automation_uses_release_profile(self):
+        assert select_profile(
+            "release-automation", "main", False,
+            release_profile="strict",
+        ) == "strict"
+
+    def test_release_automation_defaults_to_standard(self):
+        assert select_profile("release-automation", "main", False) == "standard"
+
+    def test_pr_release_review_uses_release_profile(self):
+        assert select_profile(
+            "pr", "release", True,
+            release_profile="strict",
+        ) == "strict"
+
+    def test_pr_release_review_defaults_to_standard(self):
+        assert select_profile("pr", "release", True) == "standard"
+
+    def test_pr_main_uses_pr_profile(self):
+        assert select_profile(
+            "pr", "main", False,
+            pr_profile="advisory",
+        ) == "advisory"
+
+    def test_pr_feature_uses_pr_profile(self):
+        assert select_profile(
+            "pr", "feature", False,
+            pr_profile="strict",
+        ) == "strict"
+
+    def test_pr_defaults_to_standard(self):
         assert select_profile("pr", "main", False) == "standard"
 
-    def test_pr_feature_gets_standard(self):
-        assert select_profile("pr", "feature", False) == "standard"
-
-    def test_pr_maintenance_gets_standard(self):
-        assert select_profile("pr", "maintenance", False) == "standard"
+    def test_pr_maintenance_uses_pr_profile(self):
+        assert select_profile(
+            "pr", "maintenance", False,
+            pr_profile="advisory",
+        ) == "advisory"
 
     def test_profile_override_wins(self):
+        """Override takes precedence over config profiles."""
         assert (
-            select_profile("dispatch", "main", False, profile_override="strict")
+            select_profile(
+                "pr", "main", False,
+                profile_override="strict",
+                pr_profile="advisory",
+            )
             == "strict"
         )
 
@@ -104,9 +146,6 @@ def test_invalid_profile_override_ignored(self):
             == "advisory"
         )
 
-    def test_local_gets_advisory(self):
-        assert select_profile("local", "main", False) == "advisory"
-
 
 # ---------------------------------------------------------------------------
 # TestDeriveApiMaturity
@@ -169,7 +208,7 @@ def sample_context(self):
             branch_type="main",
             trigger_type="pr",
             profile="standard",
-            stage="standard",
+            stage="enabled",
             target_release_type="pre-release-rc",
             commonalities_release="r4.1",
             icm_release=None,
@@ -271,6 +310,7 @@ def test_fallback_populates_context(self, repo_with_metadata):
             event_name="pull_request",
             ref_name="release-review/r4.1-abc1234",
             base_ref="release-snapshot/r4.1-abc1234",
+            release_profile="strict",
             repo_path=repo_with_metadata,
             release_plan_schema_path=PLAN_SCHEMA,
             release_metadata_schema_path=METADATA_SCHEMA,
@@ -285,6 +325,20 @@ def test_fallback_populates_context(self, repo_with_metadata):
         assert ctx.apis[0].target_api_version == "1.0.0-rc.2"
         assert ctx.apis[0].target_api_status == "rc"
 
+    def test_fallback_defaults_to_standard_profile(self, repo_with_metadata):
+        """Without explicit release_profile, release-review PR defaults to standard."""
+        ctx = build_validation_context(
+            repo_name="camaraproject/QualityOnDemand",
+            event_name="pull_request",
+            ref_name="release-review/r4.1-abc1234",
+            base_ref="release-snapshot/r4.1-abc1234",
+            repo_path=repo_with_metadata,
+            release_plan_schema_path=PLAN_SCHEMA,
+            release_metadata_schema_path=METADATA_SCHEMA,
+        )
+        assert ctx.is_release_review_pr is True
+        assert ctx.profile == "standard"
+
     def test_no_fallback_when_release_plan_exists(self, repo_with_metadata):
         """When release-plan.yaml exists, metadata fallback is not used."""
         _write_yaml(
@@ -341,6 +395,6 @@ def test_no_fallback_for_non_review_pr(self, tmp_path):
             release_plan_schema_path=PLAN_SCHEMA,
             release_metadata_schema_path=METADATA_SCHEMA,
         )
-        # Not a release review → no fallback → target_release_type stays None
+        # Not a release review -> no fallback -> target_release_type stays None
         assert ctx.is_release_review_pr is False
         assert ctx.target_release_type is None
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 357c15a4..8bb2845f 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -90,7 +90,7 @@ def _make_context(**overrides):
         "branch_type": "main",
         "trigger_type": "pr",
         "profile": "standard",
-        "stage": "standard",
+        "stage": "enabled",
         "target_release_type": None,
         "commonalities_release": None,
         "icm_release": None,
@@ -550,11 +550,13 @@ def test_full_pipeline_pass(
         (tmp_path / "tooling").mkdir()
 
         mock_gate.return_value = MagicMock(
-            stage="standard",
+            stage="enabled",
             should_continue=True,
             is_fork=False,
             fork_override_applied=False,
             reason="",
+            pr_profile="standard",
+            release_profile="standard",
         )
         ctx = _make_context()
         mock_context.return_value = ctx
@@ -585,8 +587,9 @@ def test_full_pipeline_fail(
         (tmp_path / "tooling").mkdir()
 
         mock_gate.return_value = MagicMock(
-            stage="standard", should_continue=True, is_fork=False,
+            stage="enabled", should_continue=True, is_fork=False,
             fork_override_applied=False, reason="",
+            pr_profile="standard", release_profile="standard",
         )
         mock_context.return_value = _make_context()
         findings = [_make_finding(level="error", blocks=True)]
@@ -635,8 +638,9 @@ def test_engine_statuses_passed_to_summary(
         (tmp_path / "tooling").mkdir()
 
         mock_gate.return_value = MagicMock(
-            stage="standard", should_continue=True, is_fork=False,
+            stage="enabled", should_continue=True, is_fork=False,
             fork_override_applied=False, reason="",
+            pr_profile="standard", release_profile="standard",
         )
         mock_context.return_value = _make_context()
         statuses = {
diff --git a/validation/tests/test_output_commit_status.py b/validation/tests/test_output_commit_status.py
index 482a5def..9a5c9c0e 100644
--- a/validation/tests/test_output_commit_status.py
+++ b/validation/tests/test_output_commit_status.py
@@ -24,7 +24,7 @@ def _make_context(
         branch_type="main",
         trigger_type="pr",
         profile="standard",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_output_diagnostics.py b/validation/tests/test_output_diagnostics.py
index 77d6f7d8..0fcd1e4d 100644
--- a/validation/tests/test_output_diagnostics.py
+++ b/validation/tests/test_output_diagnostics.py
@@ -21,7 +21,7 @@ def _make_context() -> ValidationContext:
         branch_type="main",
         trigger_type="pr",
         profile="standard",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_output_pr_comment.py b/validation/tests/test_output_pr_comment.py
index fa1b0ddb..3f546506 100644
--- a/validation/tests/test_output_pr_comment.py
+++ b/validation/tests/test_output_pr_comment.py
@@ -21,7 +21,7 @@ def _make_context(
         branch_type="main",
         trigger_type="pr",
         profile=profile,
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_output_workflow_summary.py b/validation/tests/test_output_workflow_summary.py
index 05e3d701..7b432682 100644
--- a/validation/tests/test_output_workflow_summary.py
+++ b/validation/tests/test_output_workflow_summary.py
@@ -28,7 +28,7 @@ def _make_context(
         branch_type=branch_type,
         trigger_type=trigger_type,
         profile=profile,
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_postfilter_conditions.py b/validation/tests/test_postfilter_conditions.py
index 613dffc2..14ca9a89 100644
--- a/validation/tests/test_postfilter_conditions.py
+++ b/validation/tests/test_postfilter_conditions.py
@@ -33,7 +33,7 @@ def _make_context(
         branch_type=branch_type,
         trigger_type=trigger_type,
         profile=profile,
-        stage="standard",
+        stage="enabled",
         target_release_type=target_release_type,
         commonalities_release=commonalities_release,
         icm_release=None,
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index 0d2abb86..149e29ed 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -36,7 +36,7 @@ def _make_context(
         branch_type=branch_type,
         trigger_type=trigger_type,
         profile=profile,
-        stage="standard",
+        stage="enabled",
         target_release_type=target_release_type,
         commonalities_release=commonalities_release,
         icm_release=None,
diff --git a/validation/tests/test_postfilter_levels.py b/validation/tests/test_postfilter_levels.py
index 6ff24296..ca964f01 100644
--- a/validation/tests/test_postfilter_levels.py
+++ b/validation/tests/test_postfilter_levels.py
@@ -33,7 +33,7 @@ def _make_context(
         branch_type=branch_type,
         trigger_type=trigger_type,
         profile=profile,
-        stage="standard",
+        stage="enabled",
         target_release_type=target_release_type,
         commonalities_release=commonalities_release,
         icm_release=None,
diff --git a/validation/tests/test_python_adapter.py b/validation/tests/test_python_adapter.py
index ee15d24f..7f4e3721 100644
--- a/validation/tests/test_python_adapter.py
+++ b/validation/tests/test_python_adapter.py
@@ -31,7 +31,7 @@ def _make_context(
         branch_type=branch_type,
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_changelog.py b/validation/tests/test_python_checks_changelog.py
index 55dffa4b..0f12d263 100644
--- a/validation/tests/test_python_checks_changelog.py
+++ b/validation/tests/test_python_checks_changelog.py
@@ -33,7 +33,7 @@ def _make_context(
         branch_type="main",
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=target_release_type,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_filename.py b/validation/tests/test_python_checks_filename.py
index 8e2a7634..5fd62b6d 100644
--- a/validation/tests/test_python_checks_filename.py
+++ b/validation/tests/test_python_checks_filename.py
@@ -32,7 +32,7 @@ def _make_context(api_name: str) -> ValidationContext:
         branch_type="main",
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_metadata.py b/validation/tests/test_python_checks_metadata.py
index 61d203b9..c6b45422 100644
--- a/validation/tests/test_python_checks_metadata.py
+++ b/validation/tests/test_python_checks_metadata.py
@@ -36,7 +36,7 @@ def _make_context(api_names: list[str]) -> ValidationContext:
         branch_type="main",
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_release_plan.py b/validation/tests/test_python_checks_release_plan.py
index a7055bf8..58f2471c 100644
--- a/validation/tests/test_python_checks_release_plan.py
+++ b/validation/tests/test_python_checks_release_plan.py
@@ -28,7 +28,7 @@ def _make_context() -> ValidationContext:
         branch_type="main",
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_release_review.py b/validation/tests/test_python_checks_release_review.py
index 70f808c0..a7c0aa63 100644
--- a/validation/tests/test_python_checks_release_review.py
+++ b/validation/tests/test_python_checks_release_review.py
@@ -25,7 +25,7 @@ def _make_context(is_release_review: bool = True) -> ValidationContext:
         branch_type="release",
         trigger_type="pr",
         profile="strict",
-        stage="standard",
+        stage="enabled",
         target_release_type="public-release",
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py
index 35de0174..23fece81 100644
--- a/validation/tests/test_python_checks_test.py
+++ b/validation/tests/test_python_checks_test.py
@@ -39,7 +39,7 @@ def _make_context(
         branch_type="main",
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,
diff --git a/validation/tests/test_python_checks_version.py b/validation/tests/test_python_checks_version.py
index 3ff7bacd..66c99a2f 100644
--- a/validation/tests/test_python_checks_version.py
+++ b/validation/tests/test_python_checks_version.py
@@ -39,7 +39,7 @@ def _make_context(
         branch_type=branch_type,
         trigger_type="dispatch",
         profile="advisory",
-        stage="standard",
+        stage="enabled",
         target_release_type=None,
         commonalities_release=None,
         icm_release=None,

From 51ab2d351fb5d896f6c55333b71d8884b3aea477 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 31 Mar 2026 13:05:14 +0200
Subject: [PATCH 033/157] =?UTF-8?q?feat(validation):=20pre-RC=20fixes=20ba?=
 =?UTF-8?q?tch=20=E2=80=94=20P-007,=20P-013,=20engine=20table,=20Checks=20?=
 =?UTF-8?q?API?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

P-007: Rewrite check-test-file-version to parse Feature line content
instead of filename suffix. Unmute with conditional level (hint default,
warn for stable releases).

P-013: New check-readme-placeholder-removal detects template README
files that should be removed when API specs are present.

Engine summary table: Replace separate per-API summary and engine
status tables with a single per-engine table showing post-filter
error/warning/hint counts alongside skip/error status.

Checks API: Add Check Run creation for inline PR annotations (no
display cap). Workflow commands retained as fork PR fallback. Commit
status kept alongside Check Run during RC period.

15 files changed, 645 tests passing.
---
 .github/workflows/validation.yml              | 101 ++++++-
 validation/engines/python_checks/__init__.py  |   2 +
 .../engines/python_checks/readme_checks.py    |  75 +++++
 .../engines/python_checks/test_checks.py      |  69 +++--
 validation/orchestrator.py                    |  16 +-
 validation/output/__init__.py                 |   1 +
 validation/output/check_run.py                | 174 +++++++++++
 validation/output/formatting.py               |  14 +
 validation/output/workflow_summary.py         |  72 ++---
 validation/rules/python-rules.yaml            |  22 +-
 validation/tests/test_output_check_run.py     | 279 ++++++++++++++++++
 .../tests/test_output_workflow_summary.py     | 104 ++++---
 validation/tests/test_python_checks_readme.py | 116 ++++++++
 validation/tests/test_python_checks_test.py   |  83 +++++-
 .../tests/test_rule_metadata_integrity.py     |   2 +-
 15 files changed, 1009 insertions(+), 121 deletions(-)
 create mode 100644 validation/engines/python_checks/readme_checks.py
 create mode 100644 validation/output/check_run.py
 create mode 100644 validation/tests/test_output_check_run.py
 create mode 100644 validation/tests/test_python_checks_readme.py

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 6e61c4d3..63abd37a 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -176,15 +176,7 @@ jobs:
           release_plan_changed: ${{ steps.detect-changes.outputs.release_plan_changed || 'false' }}
           tooling_ref: ${{ steps.resolve-ref.outputs.tooling_checkout_ref }}
 
-      # ── Step 8: Emit annotations (PR only) ────────────────────────
-      - name: Emit annotations
-        if: always() && steps.validation.outcome == 'success' && github.event_name == 'pull_request'
-        run: |
-          if [ -f validation-output/annotations.txt ]; then
-            cat validation-output/annotations.txt
-          fi
-
-      # ── Step 9: Mint validation app token (PR only) ──────────────
+      # ── Step 8: Mint validation app token (PR only) ──────────────
       - name: Mint validation app token
         id: mint-token
         if: >-
@@ -197,7 +189,96 @@ jobs:
           app-id: ${{ vars.VALIDATION_APP_ID }}
           private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
 
-      # ── Step 13: Post findings to PR ───────────────────────────────
+      # ── Step 9: Create Check Run (PR only) ─────────────────────
+      #
+      # Creates a GitHub Check Run with inline annotations via the
+      # Checks API.  Unlike workflow commands (::error etc.), Check
+      # Run annotations have no display cap in the PR Files tab.
+      #
+      # Token resolution: app token → GITHUB_TOKEN (same-repo PRs
+      # have checks:write).  Fork PRs without app token fall back
+      # to workflow command annotations in the next step.
+      - name: Create Check Run
+        id: check-run
+        if: >-
+          always() && steps.validation.outcome == 'success'
+          && github.event_name == 'pull_request'
+        continue-on-error: true
+        uses: actions/github-script@v8
+        with:
+          github-token: ${{ steps.mint-token.outputs.token || github.token }}
+          script: |
+            const fs = require('fs');
+            const path = 'validation-output/check-run.json';
+            if (!fs.existsSync(path)) {
+              core.info('No check-run.json — skipping Check Run creation');
+              return;
+            }
+
+            const payload = JSON.parse(fs.readFileSync(path, 'utf8'));
+            const owner = context.repo.owner;
+            const repo = context.repo.repo;
+            const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
+            const allAnnotations = payload.annotations || [];
+
+            // Checks API accepts max 50 annotations per call.
+            // Create the Check Run with the first batch, then update
+            // with remaining batches.
+            const BATCH_SIZE = 50;
+            const firstBatch = allAnnotations.slice(0, BATCH_SIZE);
+
+            try {
+              const checkRun = await github.rest.checks.create({
+                owner, repo,
+                name: 'CAMARA Validation',
+                head_sha: sha,
+                status: 'completed',
+                conclusion: payload.conclusion,
+                output: {
+                  title: payload.title,
+                  summary: payload.summary,
+                  annotations: firstBatch,
+                },
+              });
+              core.info(`Check Run created: id=${checkRun.data.id}, conclusion=${payload.conclusion}`);
+
+              // Batch remaining annotations
+              for (let i = BATCH_SIZE; i < allAnnotations.length; i += BATCH_SIZE) {
+                const batch = allAnnotations.slice(i, i + BATCH_SIZE);
+                await github.rest.checks.update({
+                  owner, repo,
+                  check_run_id: checkRun.data.id,
+                  output: {
+                    title: payload.title,
+                    summary: payload.summary,
+                    annotations: batch,
+                  },
+                });
+                core.info(`Check Run updated: batch ${Math.floor(i / BATCH_SIZE) + 1}, ${batch.length} annotations`);
+              }
+
+              core.info(`Total annotations: ${allAnnotations.length}`);
+            } catch (e) {
+              core.warning(`Check Run creation failed: ${e.message}`);
+            }
+
+      # ── Step 10: Emit annotations fallback (fork PR only) ──────
+      #
+      # Workflow command annotations (::error etc.) as fallback when
+      # Check Run creation is not available.  Limited to ~10 per
+      # severity level in the PR view.
+      - name: Emit annotations (fallback)
+        if: >-
+          always() && steps.validation.outcome == 'success'
+          && github.event_name == 'pull_request'
+          && steps.check-run.outcome != 'success'
+        run: |
+          if [ -f validation-output/annotations.txt ]; then
+            echo "::notice::Check Run unavailable — using workflow command annotations (limited display)"
+            cat validation-output/annotations.txt
+          fi
+
+      # ── Step 11: Post findings to PR ───────────────────────────────
       #
       # Token resolution (design doc section 5.1), PR comment, and commit
       # status in a single step.  Skipped entirely for non-PR events.
diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py
index e7e757c8..c02c2d93 100644
--- a/validation/engines/python_checks/__init__.py
+++ b/validation/engines/python_checks/__init__.py
@@ -9,6 +9,7 @@
 from .changelog_checks import check_changelog_format
 from .filename_checks import check_filename_kebab_case, check_filename_matches_api_name
 from .metadata_checks import check_license_commonalities_consistency
+from .readme_checks import check_readme_placeholder_removal
 from .release_plan_checks import check_release_plan_semantics
 from .release_review_checks import check_release_review_file_restriction
 from .test_checks import (
@@ -38,6 +39,7 @@
     CheckDescriptor("check-release-plan-semantics", CheckScope.REPO, check_release_plan_semantics),
     CheckDescriptor("check-changelog-format", CheckScope.REPO, check_changelog_format),
     CheckDescriptor("check-license-commonalities-consistency", CheckScope.REPO, check_license_commonalities_consistency),
+    CheckDescriptor("check-readme-placeholder-removal", CheckScope.REPO, check_readme_placeholder_removal),
     CheckDescriptor("check-release-review-file-restriction", CheckScope.REPO, check_release_review_file_restriction),
 ]
 
diff --git a/validation/engines/python_checks/readme_checks.py b/validation/engines/python_checks/readme_checks.py
new file mode 100644
index 00000000..ed41da69
--- /dev/null
+++ b/validation/engines/python_checks/readme_checks.py
@@ -0,0 +1,75 @@
+"""README placeholder checks.
+
+Detects template placeholder README files that should be removed once
+real API specification files have been added.
+"""
+
+from __future__ import annotations
+
+from pathlib import Path
+from typing import List
+
+from validation.context import ValidationContext
+
+from ._types import make_finding
+
+_API_DEFS_DIR = "code/API_definitions"
+
+# Key phrase present in both known CAMARA template variants:
+#   "Here you can add your definition file(s). Delete this README.MD ..."
+#   "Here you can add your definitions and delete this README.MD file"
+_PLACEHOLDER_PHRASE = "delete this readme"
+
+
+def check_readme_placeholder_removal(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Check that the template placeholder README is removed.
+
+    Repo-level check.  If ``code/API_definitions/`` contains both a
+    README file with placeholder text *and* real ``.yaml``/``.yml`` spec
+    files, the placeholder should be deleted.
+    """
+    api_dir = repo_path / _API_DEFS_DIR
+    if not api_dir.is_dir():
+        return []
+
+    # Find README file (case-insensitive)
+    readme_file = None
+    for entry in api_dir.iterdir():
+        if entry.is_file() and entry.name.lower() == "readme.md":
+            readme_file = entry
+            break
+
+    if readme_file is None:
+        return []
+
+    # Check if spec files exist alongside the README
+    has_specs = any(
+        f.is_file() and f.suffix in (".yaml", ".yml")
+        for f in api_dir.iterdir()
+    )
+    if not has_specs:
+        return []
+
+    # Read and check for placeholder content
+    try:
+        content = readme_file.read_text(encoding="utf-8")
+    except (OSError, UnicodeDecodeError):
+        return []
+
+    if _PLACEHOLDER_PHRASE not in content.lower():
+        return []
+
+    return [
+        make_finding(
+            engine_rule="check-readme-placeholder-removal",
+            level="warn",
+            message=(
+                f"Placeholder README '{readme_file.name}' should be removed "
+                f"from {_API_DEFS_DIR}/ — API specification files are present"
+            ),
+            path=f"{_API_DEFS_DIR}/{readme_file.name}",
+            line=1,
+        )
+    ]
diff --git a/validation/engines/python_checks/test_checks.py b/validation/engines/python_checks/test_checks.py
index 2ee106fb..de9ed392 100644
--- a/validation/engines/python_checks/test_checks.py
+++ b/validation/engines/python_checks/test_checks.py
@@ -1,13 +1,14 @@
 """Test file checks.
 
 Validates that test files exist for each API, are located in
-``code/Test_definitions/``, and have version-aligned filenames.
+``code/Test_definitions/``, and carry version-aligned Feature lines.
 """
 
 from __future__ import annotations
 
+import re
 from pathlib import Path
-from typing import List
+from typing import List, Optional
 
 from validation.context import ValidationContext
 
@@ -106,17 +107,44 @@ def check_test_files_exist(
     ]
 
 
+# Regex to extract version from CAMARA Feature line.
+# Matches ", v{segment}" where segment runs until whitespace or " -".
+# Examples:
+#   "Feature: CAMARA Quality On Demand API, vwip - Operation deleteSession"  → "vwip"
+#   "Feature: CAMARA QoD API, v0.2alpha2"                                    → "v0.2alpha2"
+_FEATURE_VERSION_RE = re.compile(r",\s*v([^\s-]+)")
+
+
+def _extract_feature_version(file_path: Path) -> Optional[str]:
+    """Read the first line and extract the version segment.
+
+    Returns the version segment (e.g. ``"vwip"``, ``"v1"``) or ``None``
+    if no version could be parsed from the Feature line.
+    """
+    try:
+        with open(file_path, encoding="utf-8") as fh:
+            first_line = fh.readline()
+    except (OSError, UnicodeDecodeError):
+        return None
+
+    m = _FEATURE_VERSION_RE.search(first_line)
+    if m:
+        return f"v{m.group(1)}"
+    return None
+
+
 def check_test_file_version(
     repo_path: Path, context: ValidationContext
 ) -> List[dict]:
-    """Validate test file version suffix matches API version.
+    """Validate that the version in test Feature lines matches the API version.
+
+    Per-API check.  Reads the ``Feature:`` line of each ``.feature`` file
+    and extracts the version segment (e.g. ``vwip``, ``v1``).  Compares
+    against the expected version derived from the API's ``info.version``.
 
-    Per-API check.  Uses CAMARA version-to-URL mapping rules to derive
-    the expected version suffix.  Test files should be named like:
-    ``{api-name}.{version-suffix}.feature`` or
-    ``{api-name}-{operationId}.{version-suffix}.feature``.
+    Example Feature line::
 
-    Example: ``quality-on-demand.v0.2alpha2.feature``
+        Feature: CAMARA Quality On Demand API, vwip - Operation deleteSession
     """
     api = context.apis[0]
     test_dir = repo_path / _TEST_DIR
@@ -133,8 +161,7 @@ def check_test_file_version(
         f for f in test_dir.iterdir()
         if f.is_file()
         and f.suffix == ".feature"
-        and (f.stem == api.api_name or f.stem.startswith(f"{api.api_name}-")
-             or f.stem.startswith(f"{api.api_name}."))
+        and _stem_matches_api(f.stem, api.api_name)
     ]
 
     if not matching:
@@ -143,20 +170,16 @@ def check_test_file_version(
 
     findings: List[dict] = []
     for test_file in matching:
-        # Extract version suffix: everything after the first dot in the stem.
-        # e.g. "quality-on-demand.v1" -> "v1"
-        # e.g. "quality-on-demand-createSession.v0.3" -> "v0.3"
-        stem = test_file.stem
-        dot_idx = stem.find(".")
-        if dot_idx == -1:
-            # No version suffix in filename — report as finding.
+        actual_version = _extract_feature_version(test_file)
+
+        if actual_version is None:
             findings.append(
                 make_finding(
                     engine_rule="check-test-file-version",
                     level="error",
                     message=(
-                        f"Test file '{test_file.name}' has no version suffix "
-                        f"(expected '.{expected_segment}' before .feature)"
+                        f"Test file '{test_file.name}' has no version in its "
+                        f"Feature line (expected '{expected_segment}')"
                     ),
                     path=f"{_TEST_DIR}/{test_file.name}",
                     line=1,
@@ -165,15 +188,15 @@ def check_test_file_version(
             )
             continue
 
-        actual_suffix = stem[dot_idx + 1:]
-        if actual_suffix.lower() != expected_segment.lower():
+        if actual_version.lower() != expected_segment.lower():
             findings.append(
                 make_finding(
                     engine_rule="check-test-file-version",
                     level="error",
                     message=(
-                        f"Test file '{test_file.name}' has version suffix "
-                        f"'{actual_suffix}' but expected '{expected_segment}' "
+                        f"Test file '{test_file.name}' has version "
+                        f"'{actual_version}' in Feature line but expected "
+                        f"'{expected_segment}' "
                         f"(from API version '{api.target_api_version}')"
                     ),
                     path=f"{_TEST_DIR}/{test_file.name}",
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index 7266f12f..2d15236a 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -33,6 +33,7 @@
 )
 from validation.output import (
     generate_annotations,
+    generate_check_run_payload,
     generate_commit_status,
     generate_pr_comment,
     generate_workflow_summary,
@@ -305,14 +306,25 @@ def write_outputs(
     """Write all output files for the workflow to consume."""
     output_dir.mkdir(parents=True, exist_ok=True)
 
-    # --- Annotations ---
+    # --- Check Run payload (Checks API — primary) ---
+    check_run = generate_check_run_payload(post_filter_result, context)
+    (output_dir / "check-run.json").write_text(
+        json.dumps(check_run.to_dict(), indent=2) + "\n"
+    )
+    logger.info(
+        "Check Run: conclusion=%s, %d annotations",
+        check_run.conclusion,
+        len(check_run.annotations),
+    )
+
+    # --- Annotations (workflow commands — fork PR fallback) ---
     annotation_result = generate_annotations(post_filter_result)
     if annotation_result.commands:
         (output_dir / "annotations.txt").write_text(
             "\n".join(annotation_result.commands) + "\n"
         )
     logger.info(
-        "Annotations: %d emitted (of %d total, truncated=%s)",
+        "Annotations fallback: %d emitted (of %d total, truncated=%s)",
         annotation_result.annotations_emitted,
         annotation_result.total_findings,
         annotation_result.truncated,
diff --git a/validation/output/__init__.py b/validation/output/__init__.py
index 9340b496..e707dce7 100644
--- a/validation/output/__init__.py
+++ b/validation/output/__init__.py
@@ -3,6 +3,7 @@
 # commit status, and diagnostic artifacts.
 
 from .annotations import AnnotationResult, generate_annotations  # noqa: F401
+from .check_run import CheckRunPayload, generate_check_run_payload  # noqa: F401
 from .commit_status import (  # noqa: F401
     CommitStatusPayload,
     generate_commit_status,
diff --git a/validation/output/check_run.py b/validation/output/check_run.py
new file mode 100644
index 00000000..be31d5b3
--- /dev/null
+++ b/validation/output/check_run.py
@@ -0,0 +1,174 @@
+"""Check Run payload generation for the GitHub Checks API.
+
+Produces a structured payload that the workflow step uses to create a
+Check Run with inline annotations.  Unlike workflow commands (which
+have a ~10-per-level display cap), Check Run annotations are all
+visible in the PR Files tab.
+
+The workflow handles batching (50 annotations per API call) and token
+resolution.  This module produces the full payload without truncation.
+
+Design doc references:
+  - Section 9.2: check annotations (Checks API migration)
+"""
+
+from __future__ import annotations
+
+import logging
+from dataclasses import asdict, dataclass, field
+from typing import List
+
+from validation.context import ValidationContext
+from validation.postfilter.engine import PostFilterResult
+
+from .formatting import count_findings, format_rule_label, sort_findings_by_priority
+
+logger = logging.getLogger(__name__)
+
+# ---------------------------------------------------------------------------
+# Constants
+# ---------------------------------------------------------------------------
+
+# Checks API annotation levels (different from workflow command levels)
+_LEVEL_TO_ANNOTATION = {
+    "error": "failure",
+    "warn": "warning",
+    "hint": "notice",
+}
+
+_RESULT_TO_CONCLUSION = {
+    "pass": "success",
+    "fail": "failure",
+    "error": "failure",
+}
+
+
+# ---------------------------------------------------------------------------
+# Result type
+# ---------------------------------------------------------------------------
+
+
+@dataclass(frozen=True)
+class CheckRunPayload:
+    """Structured payload for creating a GitHub Check Run.
+
+    Attributes:
+        conclusion: Check Run conclusion (success, failure, neutral).
+        title: Short summary for the Check Run header.
+        summary: Brief markdown for the Check Run output.
+        annotations: All findings as Checks API annotation dicts.
+    """
+
+    conclusion: str
+    title: str
+    summary: str
+    annotations: List[dict] = field(default_factory=list)
+
+    def to_dict(self) -> dict:
+        """Serialize to a plain dict for JSON output."""
+        return asdict(self)
+
+
+# ---------------------------------------------------------------------------
+# Internal helpers
+# ---------------------------------------------------------------------------
+
+
+def _build_annotation(finding: dict) -> dict:
+    """Convert a single finding to a Checks API annotation dict."""
+    level = finding.get("level", "hint")
+    annotation_level = _LEVEL_TO_ANNOTATION.get(level, "notice")
+
+    path = finding.get("path", "")
+    line = finding.get("line", 1)
+    title = format_rule_label(finding)
+
+    message = finding.get("message", "")
+    hint = finding.get("hint")
+    if hint:
+        message = f"{message}\n\nHint: {hint}"
+
+    return {
+        "path": path,
+        "start_line": line,
+        "end_line": line,
+        "annotation_level": annotation_level,
+        "title": title,
+        "message": message,
+    }
+
+
+def _resolve_conclusion(
+    result: str,
+    profile: str,
+    has_findings: bool,
+) -> str:
+    """Map validation result to Checks API conclusion.
+
+    - pass → success
+    - fail → failure
+    - error → failure
+    - advisory override: pass + advisory profile + findings → neutral
+    """
+    if result == "pass" and profile == "advisory" and has_findings:
+        return "neutral"
+    return _RESULT_TO_CONCLUSION.get(result, "failure")
+
+
+# ---------------------------------------------------------------------------
+# Public API
+# ---------------------------------------------------------------------------
+
+
+def generate_check_run_payload(
+    post_filter_result: PostFilterResult,
+    context: ValidationContext,
+) -> CheckRunPayload:
+    """Generate the full Check Run payload from validation results.
+
+    All findings are included as annotations (no truncation).
+    The workflow step handles batching (50 per API call).
+
+    Args:
+        post_filter_result: Output of the post-filter engine.
+        context: Unified validation context.
+
+    Returns:
+        :class:`CheckRunPayload` ready for JSON serialization.
+    """
+    findings = post_filter_result.findings
+    counts = count_findings(findings)
+
+    conclusion = _resolve_conclusion(
+        post_filter_result.result,
+        context.profile,
+        bool(findings),
+    )
+
+    title = (
+        f"{counts.errors} error{'s' if counts.errors != 1 else ''}, "
+        f"{counts.warnings} warning{'s' if counts.warnings != 1 else ''}, "
+        f"{counts.hints} hint{'s' if counts.hints != 1 else ''}"
+    )
+
+    summary = (
+        f"Profile: {context.profile} | "
+        f"Branch: {context.branch_type} | "
+        f"Trigger: {context.trigger_type}"
+    )
+
+    sorted_findings = sort_findings_by_priority(findings)
+    annotations = [_build_annotation(f) for f in sorted_findings]
+
+    logger.info(
+        "Check Run payload: conclusion=%s, %d annotations",
+        conclusion,
+        len(annotations),
+    )
+
+    return CheckRunPayload(
+        conclusion=conclusion,
+        title=title,
+        summary=summary,
+        annotations=annotations,
+    )
diff --git a/validation/output/formatting.py b/validation/output/formatting.py
index 30b7a6a7..0f5de51b 100644
--- a/validation/output/formatting.py
+++ b/validation/output/formatting.py
@@ -88,6 +88,20 @@ def count_findings_by_api(
     return {api: count_findings(fs) for api, fs in groups.items()}
 
 
+def count_findings_by_engine(
+    findings: List[dict],
+) -> Dict[str, FindingCounts]:
+    """Group findings by ``engine`` and count each group.
+
+    Keys are returned in insertion order (first-seen engine).
+    """
+    groups: Dict[str, List[dict]] = {}
+    for f in findings:
+        key = f.get("engine", "unknown")
+        groups.setdefault(key, []).append(f)
+    return {engine: count_findings(fs) for engine, fs in groups.items()}
+
+
 # ---------------------------------------------------------------------------
 # Sorting
 # ---------------------------------------------------------------------------
diff --git a/validation/output/workflow_summary.py b/validation/output/workflow_summary.py
index f23eab32..29d11a84 100644
--- a/validation/output/workflow_summary.py
+++ b/validation/output/workflow_summary.py
@@ -22,7 +22,7 @@
 from .formatting import (
     REPO_LEVEL_LABEL,
     count_findings,
-    count_findings_by_api,
+    count_findings_by_engine,
     format_rule_label,
     sort_findings_by_priority,
 )
@@ -103,21 +103,45 @@ def _render_header(
     )
 
 
-def _render_api_table(findings: List[dict]) -> str:
-    """Render the per-API summary table."""
-    by_api = count_findings_by_api(findings)
-    if not by_api:
+def _render_engine_summary_table(
+    findings: List[dict],
+    engine_statuses: Optional[Dict[str, str]],
+) -> str:
+    """Render a per-engine summary table with post-filter counts.
+
+    For engines that produced findings: show error/warning/hint counts.
+    For engines that ran clean: show 0/0/0.
+    For engines that were skipped or errored: show status text instead.
+    """
+    if not engine_statuses:
         return ""
 
+    by_engine = count_findings_by_engine(findings)
+
     lines = [
         "\n### Summary\n",
-        "| API / Test | Errors | Warnings | Hints |",
-        "|------------|--------|----------|-------|",
+        "| Engine | Errors | Warnings | Hints | Status |",
+        "|--------|--------|----------|-------|--------|",
     ]
-    for api_name, counts in by_api.items():
-        lines.append(
-            f"| {api_name} | {counts.errors} | {counts.warnings} | {counts.hints} |"
-        )
+
+    for engine, status in engine_statuses.items():
+        counts = by_engine.get(engine)
+        # Engine ran and produced findings, or ran clean with "N finding(s)"
+        ran = status.endswith("finding(s)") or status.startswith("0 ")
+        if ran:
+            c = counts if counts else None
+            errors = c.errors if c else 0
+            warnings = c.warnings if c else 0
+            hints = c.hints if c else 0
+            lines.append(
+                f"| {engine} | {errors} | {warnings} | {hints} | — |"
+            )
+        else:
+            # Skipped, error, or special status
+            lines.append(
+                f"| {engine} | — | — | — | {status} |"
+            )
+
     lines.append("")
     return "\n".join(lines)
 
@@ -149,24 +173,6 @@ def _render_findings_table(
     return "\n".join(lines)
 
 
-def _render_engine_table(
-    engine_statuses: Optional[Dict[str, str]],
-) -> str:
-    """Render the engine status table."""
-    if not engine_statuses:
-        return ""
-
-    lines = [
-        "\n### Engine Status\n",
-        "| Engine | Status |",
-        "|--------|--------|",
-    ]
-    for engine, status in engine_statuses.items():
-        lines.append(f"| {engine} | {status} |")
-    lines.append("")
-    return "\n".join(lines)
-
-
 def _render_footer(
     context: ValidationContext,
     commit_sha: str,
@@ -238,12 +244,11 @@ def generate_workflow_summary(
 
     # Fixed sections (always rendered)
     header = _render_header(post_filter_result.result, context, findings)
-    api_table = _render_api_table(findings)
-    engine_table = _render_engine_table(engine_statuses)
+    engine_summary = _render_engine_summary_table(findings, engine_statuses)
     footer = _render_footer(context, commit_sha)
 
     fixed_size = sum(
-        _byte_size(s) for s in (header, api_table, engine_table, footer)
+        _byte_size(s) for s in (header, engine_summary, footer)
     )
 
     # Budget for findings sections
@@ -289,11 +294,10 @@ def generate_workflow_summary(
     # Assemble
     markdown = (
         header
-        + api_table
+        + engine_summary
         + errors_section
         + warnings_section
         + hints_section
-        + engine_table
         + footer
     )
 
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 3da52388..e68b7706 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -56,15 +56,20 @@
           target_release_type: [pre-release-rc, public-release]
         level: warn
 
-# P-007: check-test-file-version — SUPPRESSED pending fix
-# Current check validates filename version suffix, but the version belongs
-# inside the .feature file (first line), not the filename. Needs rewrite
-# to parse file content (same approach as P-003 for spec info.version).
+# P-007: check-test-file-version
+# Parses the Feature line of .feature files to extract the version
+# (e.g. "Feature: CAMARA API, vwip - Operation foo") and compares
+# against the expected version from info.version.
 - id: P-007
   engine: python
   engine_rule: check-test-file-version
   conditional_level:
-    default: muted
+    default: hint
+    overrides:
+      - condition:
+          target_api_maturity: [stable]
+          target_release_type: [pre-release-rc, public-release]
+        level: warn
 
 # P-008: check-test-directory-exists
 - id: P-008
@@ -108,3 +113,10 @@
     is_release_review_pr: true
   conditional_level:
     default: error
+
+# P-013: check-readme-placeholder-removal
+- id: P-013
+  engine: python
+  engine_rule: check-readme-placeholder-removal
+  conditional_level:
+    default: warn
diff --git a/validation/tests/test_output_check_run.py b/validation/tests/test_output_check_run.py
new file mode 100644
index 00000000..cb597203
--- /dev/null
+++ b/validation/tests/test_output_check_run.py
@@ -0,0 +1,279 @@
+"""Unit tests for validation.output.check_run."""
+
+from __future__ import annotations
+
+from validation.context import ValidationContext
+from validation.output.check_run import (
+    CheckRunPayload,
+    generate_check_run_payload,
+)
+from validation.postfilter.engine import PostFilterResult
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    profile: str = "standard",
+    branch_type: str = "main",
+    trigger_type: str = "pr",
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type=branch_type,
+        trigger_type=trigger_type,
+        profile=profile,
+        stage="enabled",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_finding(
+    level: str = "warn",
+    path: str = "code/API_definitions/quality-on-demand.yaml",
+    line: int = 10,
+    message: str = "Something is wrong",
+    rule_id: str | None = None,
+    engine_rule: str = "some-rule",
+    hint: str | None = None,
+) -> dict:
+    f: dict = {
+        "engine": "spectral",
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": message,
+        "path": path,
+        "line": line,
+        "api_name": "quality-on-demand",
+        "blocks": False,
+    }
+    if rule_id is not None:
+        f["rule_id"] = rule_id
+    if hint is not None:
+        f["hint"] = hint
+    return f
+
+
+def _make_result(
+    findings: list[dict] | None = None,
+    result: str = "pass",
+) -> PostFilterResult:
+    return PostFilterResult(
+        findings=findings or [],
+        result=result,
+        summary="test summary",
+    )
+
+
+# ---------------------------------------------------------------------------
+# Conclusion mapping
+# ---------------------------------------------------------------------------
+
+
+class TestConclusion:
+    def test_pass_result(self):
+        payload = generate_check_run_payload(_make_result(), _make_context())
+        assert payload.conclusion == "success"
+
+    def test_fail_result(self):
+        findings = [_make_finding(level="error")]
+        payload = generate_check_run_payload(
+            _make_result(findings, result="fail"),
+            _make_context(),
+        )
+        assert payload.conclusion == "failure"
+
+    def test_error_result(self):
+        payload = generate_check_run_payload(
+            _make_result(result="error"),
+            _make_context(),
+        )
+        assert payload.conclusion == "failure"
+
+    def test_advisory_with_findings(self):
+        """Advisory profile + pass + findings → neutral."""
+        findings = [_make_finding()]
+        payload = generate_check_run_payload(
+            _make_result(findings, result="pass"),
+            _make_context(profile="advisory"),
+        )
+        assert payload.conclusion == "neutral"
+
+    def test_advisory_no_findings(self):
+        """Advisory profile + pass + no findings → success."""
+        payload = generate_check_run_payload(
+            _make_result(),
+            _make_context(profile="advisory"),
+        )
+        assert payload.conclusion == "success"
+
+
+# ---------------------------------------------------------------------------
+# Annotation level mapping
+# ---------------------------------------------------------------------------
+
+
+class TestAnnotationLevel:
+    def test_error_to_failure(self):
+        findings = [_make_finding(level="error")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert payload.annotations[0]["annotation_level"] == "failure"
+
+    def test_warn_to_warning(self):
+        findings = [_make_finding(level="warn")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert payload.annotations[0]["annotation_level"] == "warning"
+
+    def test_hint_to_notice(self):
+        findings = [_make_finding(level="hint")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert payload.annotations[0]["annotation_level"] == "notice"
+
+
+# ---------------------------------------------------------------------------
+# Annotation content
+# ---------------------------------------------------------------------------
+
+
+class TestAnnotationContent:
+    def test_path_and_line(self):
+        findings = [_make_finding(path="spec.yaml", line=42)]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        ann = payload.annotations[0]
+        assert ann["path"] == "spec.yaml"
+        assert ann["start_line"] == 42
+        assert ann["end_line"] == 42
+
+    def test_title_uses_rule_id(self):
+        findings = [_make_finding(rule_id="S-042")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert payload.annotations[0]["title"] == "S-042"
+
+    def test_title_falls_back_to_engine_rule(self):
+        findings = [_make_finding(engine_rule="my-check")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert payload.annotations[0]["title"] == "my-check"
+
+    def test_message_content(self):
+        findings = [_make_finding(message="Bad pattern")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert "Bad pattern" in payload.annotations[0]["message"]
+
+    def test_hint_appended_to_message(self):
+        findings = [_make_finding(message="Bad", hint="Use kebab-case")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        msg = payload.annotations[0]["message"]
+        assert "Bad" in msg
+        assert "Hint: Use kebab-case" in msg
+
+
+# ---------------------------------------------------------------------------
+# Title and summary
+# ---------------------------------------------------------------------------
+
+
+class TestTitleAndSummary:
+    def test_title_counts(self):
+        findings = [
+            _make_finding(level="error"),
+            _make_finding(level="warn"),
+            _make_finding(level="warn"),
+            _make_finding(level="hint"),
+        ]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert "1 error" in payload.title
+        assert "2 warnings" in payload.title
+        assert "1 hint" in payload.title
+
+    def test_title_singular(self):
+        findings = [_make_finding(level="error")]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert "1 error," in payload.title
+        assert "0 warnings" in payload.title
+        assert "0 hints" in payload.title
+
+    def test_empty_findings_title(self):
+        payload = generate_check_run_payload(_make_result(), _make_context())
+        assert "0 errors" in payload.title
+
+    def test_summary_contains_profile(self):
+        payload = generate_check_run_payload(
+            _make_result(),
+            _make_context(profile="strict"),
+        )
+        assert "strict" in payload.summary
+
+
+# ---------------------------------------------------------------------------
+# All findings included (no truncation)
+# ---------------------------------------------------------------------------
+
+
+class TestNoTruncation:
+    def test_all_findings_included(self):
+        findings = [_make_finding(line=i) for i in range(100)]
+        payload = generate_check_run_payload(
+            _make_result(findings), _make_context(),
+        )
+        assert len(payload.annotations) == 100
+
+    def test_empty_findings(self):
+        payload = generate_check_run_payload(_make_result(), _make_context())
+        assert payload.annotations == []
+        assert payload.conclusion == "success"
+
+
+# ---------------------------------------------------------------------------
+# Serialization
+# ---------------------------------------------------------------------------
+
+
+class TestSerialization:
+    def test_to_dict(self):
+        payload = generate_check_run_payload(_make_result(), _make_context())
+        d = payload.to_dict()
+        assert isinstance(d, dict)
+        assert "conclusion" in d
+        assert "title" in d
+        assert "summary" in d
+        assert "annotations" in d
+
+    def test_frozen(self):
+        payload = CheckRunPayload(
+            conclusion="success", title="t", summary="s", annotations=[],
+        )
+        try:
+            payload.conclusion = "failure"  # type: ignore[misc]
+            assert False, "Should not mutate frozen dataclass"
+        except AttributeError:
+            pass
diff --git a/validation/tests/test_output_workflow_summary.py b/validation/tests/test_output_workflow_summary.py
index 7b432682..baddac5d 100644
--- a/validation/tests/test_output_workflow_summary.py
+++ b/validation/tests/test_output_workflow_summary.py
@@ -116,30 +116,83 @@ def test_metadata_in_header(self):
 
 
 # ---------------------------------------------------------------------------
-# API summary table
+# Engine summary table
 # ---------------------------------------------------------------------------
 
 
-class TestApiTable:
-    def test_multi_api(self):
+class TestEngineSummaryTable:
+    def test_engine_with_findings(self):
         findings = [
-            _make_finding(level="error", api_name="api-a"),
-            _make_finding(level="warn", api_name="api-a"),
-            _make_finding(level="hint", api_name="api-b"),
+            _make_finding(level="error"),
+            _make_finding(level="warn"),
         ]
-        ctx = _make_context()
-        sr = generate_workflow_summary(_make_result(findings), ctx)
-        assert "| api-a |" in sr.markdown
-        assert "| api-b |" in sr.markdown
+        statuses = {"spectral": "2 finding(s)"}
+        sr = generate_workflow_summary(
+            _make_result(findings), _make_context(), engine_statuses=statuses,
+        )
+        assert "### Summary" in sr.markdown
+        assert "| spectral | 1 | 1 | 0 | — |" in sr.markdown
+
+    def test_engine_ran_clean(self):
+        statuses = {"yamllint": "0 finding(s)"}
+        sr = generate_workflow_summary(
+            _make_result(), _make_context(), engine_statuses=statuses,
+        )
+        assert "| yamllint | 0 | 0 | 0 | — |" in sr.markdown
+
+    def test_engine_skipped(self):
+        statuses = {"gherkin": "skipped (no test files)"}
+        sr = generate_workflow_summary(
+            _make_result(), _make_context(), engine_statuses=statuses,
+        )
+        assert "| gherkin | — | — | — | skipped (no test files) |" in sr.markdown
 
-    def test_empty_findings_no_table(self):
+    def test_engine_errored(self):
+        statuses = {"spectral": "error: timeout"}
+        sr = generate_workflow_summary(
+            _make_result(), _make_context(), engine_statuses=statuses,
+        )
+        assert "| spectral | — | — | — | error: timeout |" in sr.markdown
+
+    def test_mixed_engines(self):
+        findings = [
+            _make_finding(level="error"),  # engine=spectral
+            _make_finding(level="warn"),   # engine=spectral
+        ]
+        statuses = {
+            "yamllint": "0 finding(s)",
+            "spectral": "2 finding(s)",
+            "python": "0 finding(s)",
+            "gherkin": "skipped (no test files)",
+            "bundling": "separate workflow step",
+        }
+        sr = generate_workflow_summary(
+            _make_result(findings), _make_context(), engine_statuses=statuses,
+        )
+        assert "| yamllint | 0 | 0 | 0 | — |" in sr.markdown
+        assert "| spectral | 1 | 1 | 0 | — |" in sr.markdown
+        assert "| python | 0 | 0 | 0 | — |" in sr.markdown
+        assert "| gherkin | — | — | — | skipped (no test files) |" in sr.markdown
+        assert "| bundling | — | — | — | separate workflow step |" in sr.markdown
+
+    def test_no_statuses_no_table(self):
         sr = generate_workflow_summary(_make_result(), _make_context())
         assert "### Summary" not in sr.markdown
 
-    def test_repo_level_findings(self):
-        findings = [_make_finding(api_name=None, level="warn")]
-        sr = generate_workflow_summary(_make_result(findings), _make_context())
-        assert "(repository)" in sr.markdown
+    def test_all_findings_filtered_by_postfilter(self):
+        """Engine ran and reported findings but all were filtered → 0/0/0."""
+        statuses = {"python": "3 finding(s)"}
+        sr = generate_workflow_summary(
+            _make_result(), _make_context(), engine_statuses=statuses,
+        )
+        assert "| python | 0 | 0 | 0 | — |" in sr.markdown
+
+    def test_table_header(self):
+        statuses = {"spectral": "0 finding(s)"}
+        sr = generate_workflow_summary(
+            _make_result(), _make_context(), engine_statuses=statuses,
+        )
+        assert "| Engine | Errors | Warnings | Hints | Status |" in sr.markdown
 
 
 # ---------------------------------------------------------------------------
@@ -195,27 +248,6 @@ def test_absent_levels_not_rendered(self):
         assert "### Hints" not in sr.markdown
 
 
-# ---------------------------------------------------------------------------
-# Engine status table
-# ---------------------------------------------------------------------------
-
-
-class TestEngineTable:
-    def test_with_statuses(self):
-        sr = generate_workflow_summary(
-            _make_result(),
-            _make_context(),
-            engine_statuses={"Spectral": "completed", "yamllint": "error"},
-        )
-        assert "### Engine Status" in sr.markdown
-        assert "| Spectral | completed |" in sr.markdown
-        assert "| yamllint | error |" in sr.markdown
-
-    def test_none_statuses(self):
-        sr = generate_workflow_summary(_make_result(), _make_context())
-        assert "### Engine Status" not in sr.markdown
-
-
 # ---------------------------------------------------------------------------
 # Footer
 # ---------------------------------------------------------------------------
diff --git a/validation/tests/test_python_checks_readme.py b/validation/tests/test_python_checks_readme.py
new file mode 100644
index 00000000..0aeb66c0
--- /dev/null
+++ b/validation/tests/test_python_checks_readme.py
@@ -0,0 +1,116 @@
+"""Unit tests for validation.engines.python_checks.readme_checks."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+from validation.context import ValidationContext
+from validation.engines.python_checks.readme_checks import (
+    check_readme_placeholder_removal,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context() -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="enabled",
+        target_release_type=None,
+        commonalities_release=None,
+        icm_release=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _make_api_defs(tmp_path: Path) -> Path:
+    api_dir = tmp_path / "code" / "API_definitions"
+    api_dir.mkdir(parents=True)
+    return api_dir
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestCheckReadmePlaceholderRemoval:
+    def test_placeholder_with_specs(self, tmp_path: Path):
+        """Placeholder README + spec files → finding."""
+        api_dir = _make_api_defs(tmp_path)
+        (api_dir / "README.MD").write_text(
+            "Here you can add your definition file(s). "
+            "Delete this README.MD file after the first file is added.\n"
+        )
+        (api_dir / "quality-on-demand.yaml").touch()
+
+        findings = check_readme_placeholder_removal(tmp_path, _make_context())
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert findings[0]["engine_rule"] == "check-readme-placeholder-removal"
+        assert "README.MD" in findings[0]["path"]
+
+    def test_placeholder_variant_with_specs(self, tmp_path: Path):
+        """Second placeholder variant also detected."""
+        api_dir = _make_api_defs(tmp_path)
+        (api_dir / "README.MD").write_text(
+            "Here you can add your definitions and delete this README.MD file\n"
+        )
+        (api_dir / "my-api.yaml").touch()
+
+        findings = check_readme_placeholder_removal(tmp_path, _make_context())
+        assert len(findings) == 1
+
+    def test_placeholder_no_specs(self, tmp_path: Path):
+        """Placeholder README but no spec files → no finding (expected state)."""
+        api_dir = _make_api_defs(tmp_path)
+        (api_dir / "README.MD").write_text(
+            "Here you can add your definition file(s). "
+            "Delete this README.MD file after the first file is added.\n"
+        )
+
+        assert check_readme_placeholder_removal(tmp_path, _make_context()) == []
+
+    def test_no_readme(self, tmp_path: Path):
+        """No README at all → no finding."""
+        api_dir = _make_api_defs(tmp_path)
+        (api_dir / "quality-on-demand.yaml").touch()
+
+        assert check_readme_placeholder_removal(tmp_path, _make_context()) == []
+
+    def test_real_readme_content(self, tmp_path: Path):
+        """README with real content (not placeholder) → no finding."""
+        api_dir = _make_api_defs(tmp_path)
+        (api_dir / "README.md").write_text(
+            "# API Definitions\n\nThis directory contains the OpenAPI specs.\n"
+        )
+        (api_dir / "quality-on-demand.yaml").touch()
+
+        assert check_readme_placeholder_removal(tmp_path, _make_context()) == []
+
+    def test_case_insensitive_filename(self, tmp_path: Path):
+        """Lowercase readme.md is also detected."""
+        api_dir = _make_api_defs(tmp_path)
+        (api_dir / "readme.md").write_text(
+            "Delete this README.MD file after adding specs.\n"
+        )
+        (api_dir / "my-api.yml").touch()
+
+        findings = check_readme_placeholder_removal(tmp_path, _make_context())
+        assert len(findings) == 1
+        assert "readme.md" in findings[0]["path"]
+
+    def test_no_api_defs_directory(self, tmp_path: Path):
+        """No API_definitions directory → no finding."""
+        assert check_readme_placeholder_removal(tmp_path, _make_context()) == []
diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py
index 23fece81..f3f30523 100644
--- a/validation/tests/test_python_checks_test.py
+++ b/validation/tests/test_python_checks_test.py
@@ -128,46 +128,77 @@ def test_non_feature_file_ignored(self, tmp_path: Path):
 
 
 class TestCheckTestFileVersion:
+    """Tests for check_test_file_version — parses Feature line content."""
+
+    def _write_feature(self, path: Path, feature_line: str) -> None:
+        path.write_text(f"{feature_line}\n  Background: setup\n")
+
     def test_matching_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod.v1.feature").touch()
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, v1 - Operation createSession",
+        )
         ctx = _make_context("qod", version="1.0.0")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_matching_initial_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod.v0.3.feature").touch()
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, v0.3 - Operation createSession",
+        )
         ctx = _make_context("qod", version="0.3.0")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_matching_wip_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod.vwip.feature").touch()
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, vwip - Operation createSession",
+        )
         ctx = _make_context("qod", version="wip")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_matching_alpha_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod.v0.2alpha2.feature").touch()
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, v0.2alpha2 - Operation createSession",
+        )
         ctx = _make_context("qod", version="0.2.0-alpha.2")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_mismatched_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod.v2.feature").touch()
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, v2 - Operation createSession",
+        )
         ctx = _make_context("qod", version="1.0.0")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
         assert "v2" in findings[0]["message"]
         assert "v1" in findings[0]["message"]
 
-    def test_no_version_suffix(self, tmp_path: Path):
+    def test_no_version_in_feature_line(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod.feature").touch()
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: QoD API tests",
+        )
         ctx = _make_context("qod", version="1.0.0")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "no version suffix" in findings[0]["message"]
+        assert "no version" in findings[0]["message"]
+
+    def test_empty_file(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        (test_dir / "qod.feature").write_text("")
+        ctx = _make_context("qod", version="1.0.0")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "no version" in findings[0]["message"]
 
     def test_no_test_dir(self, tmp_path: Path):
         ctx = _make_context("qod")
@@ -176,12 +207,44 @@ def test_no_test_dir(self, tmp_path: Path):
     def test_no_matching_files(self, tmp_path: Path):
         """No test files for this API => skip (other check reports it)."""
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "other-api.v1.feature").touch()
+        self._write_feature(
+            test_dir / "other-api.feature",
+            "Feature: CAMARA Other API, v1 - Operation foo",
+        )
         ctx = _make_context("qod")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_operation_specific_file(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
-        (test_dir / "qod-createSession.v1.feature").touch()
+        self._write_feature(
+            test_dir / "qod-createSession.feature",
+            "Feature: CAMARA QoD API, v1 - Operation createSession",
+        )
+        ctx = _make_context("qod", version="1.0.0")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_feature_line_without_operation(self, tmp_path: Path):
+        """Feature line with version but no operation suffix."""
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, v1",
+        )
         ctx = _make_context("qod", version="1.0.0")
         assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_multiple_files_mixed(self, tmp_path: Path):
+        """Two files: one matching, one mismatched."""
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod-createSession.feature",
+            "Feature: CAMARA QoD API, v1 - Operation createSession",
+        )
+        self._write_feature(
+            test_dir / "qod-deleteSession.feature",
+            "Feature: CAMARA QoD API, v2 - Operation deleteSession",
+        )
+        ctx = _make_context("qod", version="1.0.0")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "deleteSession" in findings[0]["path"]
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index 9e76b174..1f113486 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -75,7 +75,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 12
+        assert counts["python"] == 13
         assert counts["spectral"] == 46
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13

From f72e274d28965a730a28eb7cf19b78caac7dbbfe Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 31 Mar 2026 15:46:00 +0200
Subject: [PATCH 034/157] fix(validation): P-007 use branch-based version
 expectation

On main/maintenance branches, Feature line must always carry vwip.
On release branches, must match target_api_version from
release-metadata.yaml. Feature branches skipped.

Previously P-007 used target_api_version on all branches, causing
false positives on main (expected v1 instead of vwip).
---
 .../engines/python_checks/test_checks.py      |  27 +++--
 validation/tests/test_python_checks_test.py   | 106 ++++++++++++++----
 2 files changed, 102 insertions(+), 31 deletions(-)

diff --git a/validation/engines/python_checks/test_checks.py b/validation/engines/python_checks/test_checks.py
index de9ed392..d48ba628 100644
--- a/validation/engines/python_checks/test_checks.py
+++ b/validation/engines/python_checks/test_checks.py
@@ -12,7 +12,7 @@
 
 from validation.context import ValidationContext
 
-from ._types import make_finding
+from ._types import load_yaml_safe, make_finding
 from .version_checks import build_version_segment
 
 _TEST_DIR = "code/Test_definitions"
@@ -136,11 +136,16 @@ def _extract_feature_version(file_path: Path) -> Optional[str]:
 def check_test_file_version(
     repo_path: Path, context: ValidationContext
 ) -> List[dict]:
-    """Validate that the version in test Feature lines matches the API version.
+    """Validate that the version in test Feature lines matches the branch.
 
-    Per-API check.  Reads the ``Feature:`` line of each ``.feature`` file
-    and extracts the version segment (e.g. ``vwip``, ``v1``).  Compares
-    against the expected version derived from the API's ``info.version``.
+    Per-API check.  On main and maintenance the Feature line must carry
+    ``vwip``.  On release branches it must match the version derived
+    from ``target_api_version`` (sourced from release-metadata.yaml).
+    Feature branches are skipped.
+
+    This avoids cascading with P-003 (info.version format): on
+    main/maintenance the expected value is hardcoded, not derived from
+    the spec.
 
     Example Feature line::
 
@@ -152,8 +157,14 @@ def check_test_file_version(
     if not test_dir.is_dir():
         return []
 
-    expected_segment = build_version_segment(api.target_api_version)
-    if expected_segment is None:
+    if context.branch_type in ("main", "maintenance"):
+        expected_segment = "vwip"
+    elif context.branch_type == "release":
+        expected_segment = build_version_segment(api.target_api_version)
+        if expected_segment is None:
+            return []
+    else:
+        # Feature branches: no constraint.
         return []
 
     # Find all .feature files matching this API.
@@ -197,7 +208,7 @@ def check_test_file_version(
                         f"Test file '{test_file.name}' has version "
                         f"'{actual_version}' in Feature line but expected "
                         f"'{expected_segment}' "
-                        f"(from API version '{api.target_api_version}')"
+                        f"(on {context.branch_type} branch)"
                     ),
                     path=f"{_TEST_DIR}/{test_file.name}",
                     line=1,
diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py
index f3f30523..b06c64c4 100644
--- a/validation/tests/test_python_checks_test.py
+++ b/validation/tests/test_python_checks_test.py
@@ -23,6 +23,7 @@ def _make_context(
     api_name: str = "quality-on-demand",
     version: str = "1.0.0",
     apis: tuple[ApiContext, ...] | None = None,
+    branch_type: str = "main",
 ) -> ValidationContext:
     if apis is None:
         api = ApiContext(
@@ -36,7 +37,7 @@ def _make_context(
         apis = (api,)
     return ValidationContext(
         repository="TestRepo",
-        branch_type="main",
+        branch_type=branch_type,
         trigger_type="dispatch",
         profile="advisory",
         stage="enabled",
@@ -128,66 +129,125 @@ def test_non_feature_file_ignored(self, tmp_path: Path):
 
 
 class TestCheckTestFileVersion:
-    """Tests for check_test_file_version — parses Feature line content."""
+    """Tests for check_test_file_version — parses Feature line content.
+
+    Branch rules:
+    - main/maintenance: Feature line must have vwip
+    - release: Feature line must match target_api_version
+    - feature: skipped (no constraint)
+    """
 
     def _write_feature(self, path: Path, feature_line: str) -> None:
         path.write_text(f"{feature_line}\n  Background: setup\n")
 
-    def test_matching_version(self, tmp_path: Path):
+    # --- main branch: always vwip ---
+
+    def test_main_vwip_passes(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, vwip - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="main")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_main_real_version_fails(self, tmp_path: Path):
+        """On main, v1 is wrong even when target_api_version is 1.0.0."""
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
             "Feature: CAMARA QoD API, v1 - Operation createSession",
         )
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", version="1.0.0", branch_type="main")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "v1" in findings[0]["message"]
+        assert "vwip" in findings[0]["message"]
+
+    # --- maintenance branch: always vwip ---
+
+    def test_maintenance_vwip_passes(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, vwip - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="maintenance")
         assert check_test_file_version(tmp_path, ctx) == []
 
-    def test_matching_initial_version(self, tmp_path: Path):
+    # --- release branch: must match target_api_version ---
+
+    def test_release_matching_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, v0.3 - Operation createSession",
+            "Feature: CAMARA QoD API, v1 - Operation createSession",
         )
-        ctx = _make_context("qod", version="0.3.0")
+        ctx = _make_context("qod", version="1.0.0", branch_type="release")
         assert check_test_file_version(tmp_path, ctx) == []
 
-    def test_matching_wip_version(self, tmp_path: Path):
+    def test_release_matching_initial_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, vwip - Operation createSession",
+            "Feature: CAMARA QoD API, v0.3 - Operation createSession",
         )
-        ctx = _make_context("qod", version="wip")
+        ctx = _make_context("qod", version="0.3.0", branch_type="release")
         assert check_test_file_version(tmp_path, ctx) == []
 
-    def test_matching_alpha_version(self, tmp_path: Path):
+    def test_release_matching_alpha_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
             "Feature: CAMARA QoD API, v0.2alpha2 - Operation createSession",
         )
-        ctx = _make_context("qod", version="0.2.0-alpha.2")
+        ctx = _make_context("qod", version="0.2.0-alpha.2", branch_type="release")
         assert check_test_file_version(tmp_path, ctx) == []
 
-    def test_mismatched_version(self, tmp_path: Path):
+    def test_release_mismatched_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
             "Feature: CAMARA QoD API, v2 - Operation createSession",
         )
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", version="1.0.0", branch_type="release")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
         assert "v2" in findings[0]["message"]
         assert "v1" in findings[0]["message"]
 
+    def test_release_vwip_fails(self, tmp_path: Path):
+        """On release, vwip is wrong — must be the release version."""
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, vwip - Operation createSession",
+        )
+        ctx = _make_context("qod", version="1.0.0", branch_type="release")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "vwip" in findings[0]["message"]
+
+    # --- feature branch: skipped ---
+
+    def test_feature_branch_skipped(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, v999 - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="feature")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    # --- common edge cases ---
+
     def test_no_version_in_feature_line(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
             "Feature: QoD API tests",
         )
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", branch_type="main")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
         assert "no version" in findings[0]["message"]
@@ -195,7 +255,7 @@ def test_no_version_in_feature_line(self, tmp_path: Path):
     def test_empty_file(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         (test_dir / "qod.feature").write_text("")
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", branch_type="main")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
         assert "no version" in findings[0]["message"]
@@ -209,7 +269,7 @@ def test_no_matching_files(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "other-api.feature",
-            "Feature: CAMARA Other API, v1 - Operation foo",
+            "Feature: CAMARA Other API, vwip - Operation foo",
         )
         ctx = _make_context("qod")
         assert check_test_file_version(tmp_path, ctx) == []
@@ -218,9 +278,9 @@ def test_operation_specific_file(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod-createSession.feature",
-            "Feature: CAMARA QoD API, v1 - Operation createSession",
+            "Feature: CAMARA QoD API, vwip - Operation createSession",
         )
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", branch_type="main")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_feature_line_without_operation(self, tmp_path: Path):
@@ -228,13 +288,13 @@ def test_feature_line_without_operation(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, v1",
+            "Feature: CAMARA QoD API, vwip",
         )
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", branch_type="main")
         assert check_test_file_version(tmp_path, ctx) == []
 
     def test_multiple_files_mixed(self, tmp_path: Path):
-        """Two files: one matching, one mismatched."""
+        """Two files on release: one matching, one mismatched."""
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod-createSession.feature",
@@ -244,7 +304,7 @@ def test_multiple_files_mixed(self, tmp_path: Path):
             test_dir / "qod-deleteSession.feature",
             "Feature: CAMARA QoD API, v2 - Operation deleteSession",
         )
-        ctx = _make_context("qod", version="1.0.0")
+        ctx = _make_context("qod", version="1.0.0", branch_type="release")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
         assert "deleteSession" in findings[0]["path"]

From 5a3bd2392aef89a209784ffca2897394496c717d Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 31 Mar 2026 15:49:14 +0200
Subject: [PATCH 035/157] fix(validation): remove bundling row from engine
 summary table

Bundling runs as a separate workflow step (Redocly CLI), not as a
validation engine. Showing it in the engine table was confusing.
If bundling fails, the workflow step itself blocks the snapshot.
---
 validation/orchestrator.py                       | 6 ------
 validation/tests/test_orchestrator.py            | 2 --
 validation/tests/test_output_workflow_summary.py | 2 --
 3 files changed, 10 deletions(-)

diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index 2d15236a..cd2b0b0c 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -258,12 +258,6 @@ def run_engines(
             engine_statuses["gherkin"] = f"error: {exc}"
             logger.error("gherkin-lint failed: %s", exc)
 
-    # --- Bundling ---
-    # Spectral resolves external $ref natively, so bundling is not a
-    # validation prerequisite.  Bundled standalone specs are produced by
-    # a separate workflow step for artifact upload and handoff.
-    engine_statuses["bundling"] = "separate workflow step"
-
     return all_findings, engine_statuses
 
 
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 8bb2845f..a228432b 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -278,7 +278,6 @@ def test_all_engines_called(
         assert "finding(s)" in statuses["spectral"]
         assert "finding(s)" in statuses["python"]
         assert "finding(s)" in statuses["gherkin"]
-        assert statuses["bundling"] == "separate workflow step"
 
     @patch("validation.orchestrator.run_gherkin_engine")
     @patch("validation.orchestrator.run_python_engine")
@@ -648,7 +647,6 @@ def test_engine_statuses_passed_to_summary(
             "spectral": "3 finding(s)",
             "python": "0 finding(s)",
             "gherkin": "skipped (no test files)",
-            "bundling": "separate workflow step",
         }
         mock_engines.return_value = ([], statuses)
         mock_postfilter.return_value = _make_post_filter_result()
diff --git a/validation/tests/test_output_workflow_summary.py b/validation/tests/test_output_workflow_summary.py
index baddac5d..848c25df 100644
--- a/validation/tests/test_output_workflow_summary.py
+++ b/validation/tests/test_output_workflow_summary.py
@@ -164,7 +164,6 @@ def test_mixed_engines(self):
             "spectral": "2 finding(s)",
             "python": "0 finding(s)",
             "gherkin": "skipped (no test files)",
-            "bundling": "separate workflow step",
         }
         sr = generate_workflow_summary(
             _make_result(findings), _make_context(), engine_statuses=statuses,
@@ -173,7 +172,6 @@ def test_mixed_engines(self):
         assert "| spectral | 1 | 1 | 0 | — |" in sr.markdown
         assert "| python | 0 | 0 | 0 | — |" in sr.markdown
         assert "| gherkin | — | — | — | skipped (no test files) |" in sr.markdown
-        assert "| bundling | — | — | — | separate workflow step |" in sr.markdown
 
     def test_no_statuses_no_table(self):
         sr = generate_workflow_summary(_make_result(), _make_context())

From 4bb30ec601e353ccd51ae64ead5e604ae97a9d45 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 31 Mar 2026 16:11:37 +0200
Subject: [PATCH 036/157] fix(validation): T1b transformer and P-007 use
 v{api_version} format

T1b snapshot transformer was producing "2.2.0-alpha.5" (no v prefix)
in Feature lines. Fixed to produce "v2.2.0-alpha.5".

P-007 on release branches now expects "v{api_version}" (full semver
with v prefix), matching the T1b output. Regex updated to capture
versions with dots and hyphens (e.g., v2.2.0-alpha.5).
---
 .../config/transformations.yaml               |  4 +--
 .../engines/python_checks/test_checks.py      | 28 ++++++++++---------
 validation/tests/test_python_checks_test.py   | 18 ++++++------
 3 files changed, 26 insertions(+), 24 deletions(-)

diff --git a/release_automation/config/transformations.yaml b/release_automation/config/transformations.yaml
index be08f69b..e5597acd 100644
--- a/release_automation/config/transformations.yaml
+++ b/release_automation/config/transformations.yaml
@@ -49,14 +49,14 @@ transformations:
     replacement: "/{url_version}"
 
   # T1b: Test definition API version in Feature line
-  # Replaces "vwip" with full api_version (e.g., "1.1.0") in Feature declarations
+  # Replaces "vwip" with "v{api_version}" (e.g., "v1.1.0") in Feature declarations
   # Handles variations: "CAMARA/Camara", "Operation/Operation:", comments before Feature
   - name: test_def_api_version
     description: Replace vwip in test definition Feature line
     type: regex
     file_pattern: "code/Test_definitions/*.feature"
     pattern: "(Feature: [^,]+, )vwip"
-    replacement: "\\g<1>{api_version}"
+    replacement: "\\g<1>v{api_version}"
 
   # T3: Commonalities reference in x-camara-commonalities
   - name: commonalities_ref
diff --git a/validation/engines/python_checks/test_checks.py b/validation/engines/python_checks/test_checks.py
index d48ba628..ab790c64 100644
--- a/validation/engines/python_checks/test_checks.py
+++ b/validation/engines/python_checks/test_checks.py
@@ -12,8 +12,7 @@
 
 from validation.context import ValidationContext
 
-from ._types import load_yaml_safe, make_finding
-from .version_checks import build_version_segment
+from ._types import make_finding
 
 _TEST_DIR = "code/Test_definitions"
 
@@ -108,28 +107,30 @@ def check_test_files_exist(
 
 
 # Regex to extract version from CAMARA Feature line.
-# Matches ", v{segment}" where segment runs until whitespace or " -".
+# Matches ", v{segment}" where segment runs until " - " or end of line.
+# On main: "vwip".  On release: "v2.2.0-alpha.5" (full semver with v).
 # Examples:
-#   "Feature: CAMARA Quality On Demand API, vwip - Operation deleteSession"  → "vwip"
-#   "Feature: CAMARA QoD API, v0.2alpha2"                                    → "v0.2alpha2"
-_FEATURE_VERSION_RE = re.compile(r",\s*v([^\s-]+)")
+#   "Feature: CAMARA QoD API, vwip - Operation deleteSession"       → "vwip"
+#   "Feature: CAMARA QoD API, v2.2.0-alpha.5 - Operation create"    → "v2.2.0-alpha.5"
+#   "Feature: CAMARA QoD API, v1.0.0"                               → "v1.0.0"
+_FEATURE_VERSION_RE = re.compile(r",\s*(v\S+?)(?:\s+-\s| *$)")
 
 
 def _extract_feature_version(file_path: Path) -> Optional[str]:
     """Read the first line and extract the version segment.
 
-    Returns the version segment (e.g. ``"vwip"``, ``"v1"``) or ``None``
-    if no version could be parsed from the Feature line.
+    Returns the version string (e.g. ``"vwip"``, ``"v2.2.0-alpha.5"``)
+    or ``None`` if no version could be parsed from the Feature line.
     """
     try:
         with open(file_path, encoding="utf-8") as fh:
-            first_line = fh.readline()
+            first_line = fh.readline().rstrip()
     except (OSError, UnicodeDecodeError):
         return None
 
     m = _FEATURE_VERSION_RE.search(first_line)
     if m:
-        return f"v{m.group(1)}"
+        return m.group(1)
     return None
 
 
@@ -160,9 +161,10 @@ def check_test_file_version(
     if context.branch_type in ("main", "maintenance"):
         expected_segment = "vwip"
     elif context.branch_type == "release":
-        expected_segment = build_version_segment(api.target_api_version)
-        if expected_segment is None:
-            return []
+        # Snapshot transformer T1b produces "v{api_version}" in Feature
+        # lines.  api.target_api_version holds the full calculated
+        # version (incl. pre-release extension) from release-metadata.
+        expected_segment = f"v{api.target_api_version}"
     else:
         # Feature branches: no constraint.
         return []
diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py
index b06c64c4..f69a0d87 100644
--- a/validation/tests/test_python_checks_test.py
+++ b/validation/tests/test_python_checks_test.py
@@ -175,13 +175,13 @@ def test_maintenance_vwip_passes(self, tmp_path: Path):
         ctx = _make_context("qod", branch_type="maintenance")
         assert check_test_file_version(tmp_path, ctx) == []
 
-    # --- release branch: must match target_api_version ---
+    # --- release branch: must match v{api_version} from T1b transformer ---
 
     def test_release_matching_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, v1 - Operation createSession",
+            "Feature: CAMARA QoD API, v1.0.0 - Operation createSession",
         )
         ctx = _make_context("qod", version="1.0.0", branch_type="release")
         assert check_test_file_version(tmp_path, ctx) == []
@@ -190,7 +190,7 @@ def test_release_matching_initial_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, v0.3 - Operation createSession",
+            "Feature: CAMARA QoD API, v0.3.0 - Operation createSession",
         )
         ctx = _make_context("qod", version="0.3.0", branch_type="release")
         assert check_test_file_version(tmp_path, ctx) == []
@@ -199,7 +199,7 @@ def test_release_matching_alpha_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, v0.2alpha2 - Operation createSession",
+            "Feature: CAMARA QoD API, v0.2.0-alpha.2 - Operation createSession",
         )
         ctx = _make_context("qod", version="0.2.0-alpha.2", branch_type="release")
         assert check_test_file_version(tmp_path, ctx) == []
@@ -208,13 +208,13 @@ def test_release_mismatched_version(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
-            "Feature: CAMARA QoD API, v2 - Operation createSession",
+            "Feature: CAMARA QoD API, v2.0.0 - Operation createSession",
         )
         ctx = _make_context("qod", version="1.0.0", branch_type="release")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "v2" in findings[0]["message"]
-        assert "v1" in findings[0]["message"]
+        assert "v2.0.0" in findings[0]["message"]
+        assert "v1.0.0" in findings[0]["message"]
 
     def test_release_vwip_fails(self, tmp_path: Path):
         """On release, vwip is wrong — must be the release version."""
@@ -298,11 +298,11 @@ def test_multiple_files_mixed(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod-createSession.feature",
-            "Feature: CAMARA QoD API, v1 - Operation createSession",
+            "Feature: CAMARA QoD API, v1.0.0 - Operation createSession",
         )
         self._write_feature(
             test_dir / "qod-deleteSession.feature",
-            "Feature: CAMARA QoD API, v2 - Operation deleteSession",
+            "Feature: CAMARA QoD API, v2.0.0 - Operation deleteSession",
         )
         ctx = _make_context("qod", version="1.0.0", branch_type="release")
         findings = check_test_file_version(tmp_path, ctx)

From 6ec21969c80a1b78c33fec8b9bd0383ddfc3cb44 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 1 Apr 2026 07:47:02 +0200
Subject: [PATCH 037/157] chore(validation): remove private tracking references
 from code

---
 validation/context/context_builder.py    | 2 +-
 validation/rules/README.md               | 6 +++---
 validation/rules/python-rules.yaml       | 2 +-
 validation/rules/rule-inventory.yaml     | 2 +-
 validation/rules/spectral-rules.yaml     | 2 +-
 validation/tests/test_context_builder.py | 2 +-
 6 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py
index 671c0332..93a09b66 100644
--- a/validation/context/context_builder.py
+++ b/validation/context/context_builder.py
@@ -203,7 +203,7 @@ def select_profile(
 
     If *profile_override* is a valid profile name it takes precedence.
 
-    Profile selection (DEC-023):
+    Profile selection:
         dispatch / local              → advisory (hardcoded)
         release-automation            → release_profile from config
         pr + release + review         → release_profile from config
diff --git a/validation/rules/README.md b/validation/rules/README.md
index 06adb8fd..ec7b8ad5 100644
--- a/validation/rules/README.md
+++ b/validation/rules/README.md
@@ -7,9 +7,9 @@ Schema: [../schemas/rule-metadata-schema.yaml](../schemas/rule-metadata-schema.y
 
 ## Files
 
-- `spectral-rules.yaml` — Spectral rule metadata (WP-06.14)
-- `gherkin-rules.yaml` — gherkin-lint rule metadata (WP-06.14)
-- `python-rules.yaml` — Python check rule metadata (WP-06.14)
+- `spectral-rules.yaml` — Spectral rule metadata
+- `gherkin-rules.yaml` — gherkin-lint rule metadata
+- `python-rules.yaml` — Python check rule metadata
 
 ## ID Assignment
 
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index e68b7706..cc525732 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -3,7 +3,7 @@
 # branch type, API maturity, and release context; applicability conditions
 # for release-review and release-plan-changed contexts.
 # Engine messages from make_finding() are preserved by default (design doc 8.4.1).
-# message_override and hint can be added per DEC-018 when engine messages are insufficient.
+# message_override and hint can be added when engine messages are insufficient.
 
 # P-001: check-filename-kebab-case
 - id: P-001
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index c4bdb9e6..53bb15aa 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -177,7 +177,7 @@ pending_rules:
   - source: tooling#95
     description: OWASP API security rules (17 rules)
     target_engine: spectral
-    notes: Parked per DEC-013. Introduce with v1 + bundling.
+    notes: Parked. Introduce with v1 + bundling.
     estimated_count: 17
 
 # ---------------------------------------------------------------------------
diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml
index d029dae6..93830cf2 100644
--- a/validation/rules/spectral-rules.yaml
+++ b/validation/rules/spectral-rules.yaml
@@ -2,7 +2,7 @@
 # S-001 through S-199: reserved for CAMARA custom rules
 # S-200 through S-xxx: Built-in OAS rules
 # All entries are identity-only initially, can be extended if needed.
-# Engine messages are preserved by default; message_override and hint can be added per DEC-018.
+# Engine messages are preserved by default; message_override and hint can be added as needed.
 
 # ===== CAMARA custom rules (S-001+) =====
 
diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py
index 75b86e52..5dd53ca6 100644
--- a/validation/tests/test_context_builder.py
+++ b/validation/tests/test_context_builder.py
@@ -74,7 +74,7 @@ def test_unknown_event_fallback(self):
 
 
 class TestSelectProfile:
-    """Profile selection tests per DEC-023: config-driven profiles."""
+    """Profile selection tests: config-driven profiles."""
 
     def test_dispatch_gets_advisory(self):
         """Dispatch always returns advisory regardless of config profiles."""

From ebc92cb5c6280359cfcf7f188f7e253b1a3e3950 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 1 Apr 2026 07:47:36 +0200
Subject: [PATCH 038/157] chore(validation): update Tier 3 fallback to upstream
 for integration testing

---
 .github/workflows/validation.yml | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 63abd37a..eb135ce8 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -118,13 +118,13 @@ jobs:
 
             // Tier 3: Hardcoded fallback tag
             // ┌──────────────────────────────────────────────────────────┐
-            // │ TESTING ONLY — replace before production deployment:     │
-            // │   repo: camaraproject/tooling    ref: v1-rc             │
+            // │ PRE-RC: branch ref — replace with v1-rc tag after tagging │
+            // │   repo: camaraproject/tooling  ref: validation-framework│
             // └──────────────────────────────────────────────────────────┘
-            core.setOutput('tooling_checkout_repo', 'hdamker/tooling');
+            core.setOutput('tooling_checkout_repo', 'camaraproject/tooling');
             core.setOutput('tooling_checkout_ref', 'validation-framework');
             core.setOutput('tooling_ref_source', 'fallback_tag');
-            core.info('Tooling ref: fallback (TESTING) hdamker/tooling@validation-framework');
+            core.info('Tooling ref: fallback camaraproject/tooling@validation-framework');
 
       # ── Step 3: Checkout tooling (sparse) ──────────────────────────
       - name: Checkout tooling

From fd6efd20fa366b17b4687e7a346f74018eefa659 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 1 Apr 2026 10:35:29 +0200
Subject: [PATCH 039/157] fix(validation): restructure PR output for clean
 write-access split
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Replaces the 4-step output sequence (mint → Check Run → annotations
fallback → post findings) with a 3-step design:

1. Mint validation app token (unchanged)
2. Resolve write access (app token or upstream GITHUB_TOKEN → write;
   fork PR without app → no write)
3a. Post findings to PR (write): Check Run + PR comment
3b. Emit annotations and status (no write): workflow commands + commit
    status (best effort)

Fixes:
- Annotations fallback never triggered (continue-on-error masked
  Check Run failure)
- Commit status unnecessarily skipped for fork PRs
- Duplicate merge-area entries (Check Run + commit status) on
  upstream PRs — commit status now only in no-write path
- Noisy "Resource not accessible by integration" warning on fork PRs
---
 .github/workflows/validation.yml | 228 +++++++++++++++----------------
 1 file changed, 109 insertions(+), 119 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index eb135ce8..a68fc8c3 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -189,150 +189,106 @@ jobs:
           app-id: ${{ vars.VALIDATION_APP_ID }}
           private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
 
-      # ── Step 9: Create Check Run (PR only) ─────────────────────
+      # ── Step 9: Resolve write access ───────────────────────────
       #
-      # Creates a GitHub Check Run with inline annotations via the
-      # Checks API.  Unlike workflow commands (::error etc.), Check
-      # Run annotations have no display cap in the PR Files tab.
-      #
-      # Token resolution: app token → GITHUB_TOKEN (same-repo PRs
-      # have checks:write).  Fork PRs without app token fall back
-      # to workflow command annotations in the next step.
-      - name: Create Check Run
-        id: check-run
+      # Determines whether the workflow has write access to the
+      # Checks and PR APIs.  Write access is available when:
+      #   - App token was minted (any PR type), or
+      #   - Upstream PR (GITHUB_TOKEN has write permissions)
+      # Fork PRs without app token have read-only GITHUB_TOKEN.
+      - name: Resolve write access
+        id: write-access
         if: >-
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
-        continue-on-error: true
         uses: actions/github-script@v8
         with:
-          github-token: ${{ steps.mint-token.outputs.token || github.token }}
           script: |
-            const fs = require('fs');
-            const path = 'validation-output/check-run.json';
-            if (!fs.existsSync(path)) {
-              core.info('No check-run.json — skipping Check Run creation');
+            const mintOutcome = '${{ steps.mint-token.outcome }}';
+            const appToken = '${{ steps.mint-token.outputs.token }}';
+
+            if (mintOutcome === 'success' && appToken) {
+              core.setOutput('has_write', 'true');
+              core.setOutput('source', 'validation_app');
+              core.info('Write access: validation app token');
               return;
             }
 
-            const payload = JSON.parse(fs.readFileSync(path, 'utf8'));
-            const owner = context.repo.owner;
-            const repo = context.repo.repo;
-            const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
-            const allAnnotations = payload.annotations || [];
-
-            // Checks API accepts max 50 annotations per call.
-            // Create the Check Run with the first batch, then update
-            // with remaining batches.
-            const BATCH_SIZE = 50;
-            const firstBatch = allAnnotations.slice(0, BATCH_SIZE);
+            const pr = context.payload.pull_request;
+            const isForkPR = pr.head.repo.full_name !== pr.base.repo.full_name;
 
-            try {
-              const checkRun = await github.rest.checks.create({
-                owner, repo,
-                name: 'CAMARA Validation',
-                head_sha: sha,
-                status: 'completed',
-                conclusion: payload.conclusion,
-                output: {
-                  title: payload.title,
-                  summary: payload.summary,
-                  annotations: firstBatch,
-                },
-              });
-              core.info(`Check Run created: id=${checkRun.data.id}, conclusion=${payload.conclusion}`);
-
-              // Batch remaining annotations
-              for (let i = BATCH_SIZE; i < allAnnotations.length; i += BATCH_SIZE) {
-                const batch = allAnnotations.slice(i, i + BATCH_SIZE);
-                await github.rest.checks.update({
-                  owner, repo,
-                  check_run_id: checkRun.data.id,
-                  output: {
-                    title: payload.title,
-                    summary: payload.summary,
-                    annotations: batch,
-                  },
-                });
-                core.info(`Check Run updated: batch ${Math.floor(i / BATCH_SIZE) + 1}, ${batch.length} annotations`);
-              }
-
-              core.info(`Total annotations: ${allAnnotations.length}`);
-            } catch (e) {
-              core.warning(`Check Run creation failed: ${e.message}`);
+            if (isForkPR) {
+              core.setOutput('has_write', 'false');
+              core.setOutput('source', 'none');
+              core.info('No write access: fork PR with read-only GITHUB_TOKEN');
+            } else {
+              core.setOutput('has_write', 'true');
+              core.setOutput('source', 'github_token');
+              core.info('Write access: GITHUB_TOKEN (upstream PR)');
             }
 
-      # ── Step 10: Emit annotations fallback (fork PR only) ──────
-      #
-      # Workflow command annotations (::error etc.) as fallback when
-      # Check Run creation is not available.  Limited to ~10 per
-      # severity level in the PR view.
-      - name: Emit annotations (fallback)
-        if: >-
-          always() && steps.validation.outcome == 'success'
-          && github.event_name == 'pull_request'
-          && steps.check-run.outcome != 'success'
-        run: |
-          if [ -f validation-output/annotations.txt ]; then
-            echo "::notice::Check Run unavailable — using workflow command annotations (limited display)"
-            cat validation-output/annotations.txt
-          fi
-
-      # ── Step 11: Post findings to PR ───────────────────────────────
+      # ── Step 10: Post findings to PR (write access) ────────────
       #
-      # Token resolution (design doc section 5.1), PR comment, and commit
-      # status in a single step.  Skipped entirely for non-PR events.
-      #
-      # Pre-snapshot validation runs via the shared run-validation
-      # action inside the release automation workflow.  Release-review
-      # PRs use the standard token tiers below (validation app or
-      # GITHUB_TOKEN).  No release-automation-specific token needed.
+      # Creates a Check Run with inline annotations (Checks API,
+      # no display cap) and posts/updates the PR comment.  The
+      # Check Run replaces commit status in the merge area.
       - name: Post findings to PR
         if: >-
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
+          && steps.write-access.outputs.has_write == 'true'
         uses: actions/github-script@v8
         with:
           github-token: ${{ steps.mint-token.outputs.token || github.token }}
           script: |
             const fs = require('fs');
+            const owner = context.repo.owner;
+            const repo = context.repo.repo;
 
-            // ── Token source detection ────────────────────────────
-            // The github-token input already resolved via || :
-            //   tier 2 (app token) when mint succeeded, else GITHUB_TOKEN.
-            // For fork PRs GITHUB_TOKEN lacks write access → early exit.
-            const mintOutcome = '${{ steps.mint-token.outcome }}';
-            const appToken = '${{ steps.mint-token.outputs.token }}';
-            let tokenSource;
+            // ── Check Run with annotations ────────────────────────
+            const checkRunPath = 'validation-output/check-run.json';
+            if (fs.existsSync(checkRunPath)) {
+              try {
+                const payload = JSON.parse(fs.readFileSync(checkRunPath, 'utf8'));
+                const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
+                const allAnnotations = payload.annotations || [];
 
-            if (mintOutcome === 'success' && appToken) {
-              tokenSource = 'validation_app';
-            } else {
-              if (mintOutcome === 'failure') {
-                core.info('Validation app token minting failed — falling back');
-              } else {
-                core.info('Validation app not configured — falling back');
-              }
-              const pr = context.payload.pull_request;
-              const isForkPR = pr.head.repo.full_name !== pr.base.repo.full_name;
-              if (isForkPR) {
-                core.info('Fork PR — GITHUB_TOKEN write access restricted');
-                const notice = [
-                  '',
-                  '> **Note**: No write permissions available (expected for fork PRs',
-                  '> without validation app) — PR comment and commit status skipped.',
-                  '> Showing findings in workflow summary only.',
-                ].join('\n');
-                fs.appendFileSync(process.env.GITHUB_STEP_SUMMARY, notice);
-                return;
+                const BATCH_SIZE = 50;
+                const firstBatch = allAnnotations.slice(0, BATCH_SIZE);
+
+                const checkRun = await github.rest.checks.create({
+                  owner, repo,
+                  name: 'CAMARA Validation',
+                  head_sha: sha,
+                  status: 'completed',
+                  conclusion: payload.conclusion,
+                  output: {
+                    title: payload.title,
+                    summary: payload.summary,
+                    annotations: firstBatch,
+                  },
+                });
+                core.info(`Check Run created: id=${checkRun.data.id}, conclusion=${payload.conclusion}`);
+
+                for (let i = BATCH_SIZE; i < allAnnotations.length; i += BATCH_SIZE) {
+                  const batch = allAnnotations.slice(i, i + BATCH_SIZE);
+                  await github.rest.checks.update({
+                    owner, repo,
+                    check_run_id: checkRun.data.id,
+                    output: {
+                      title: payload.title,
+                      summary: payload.summary,
+                      annotations: batch,
+                    },
+                  });
+                  core.info(`Check Run batch ${Math.floor(i / BATCH_SIZE) + 1}: ${batch.length} annotations`);
+                }
+                core.info(`Total annotations: ${allAnnotations.length}`);
+              } catch (e) {
+                core.warning(`Check Run failed: ${e.message}`);
               }
-              tokenSource = 'github_token';
             }
 
-            core.info(`Token source: ${tokenSource}`);
-            const owner = context.repo.owner;
-            const repo = context.repo.repo;
-
             // ── PR comment (create-or-update) ─────────────────────
             const commentPath = 'validation-output/pr-comment.md';
             if (fs.existsSync(commentPath)) {
@@ -363,14 +319,48 @@ jobs:
               }
             }
 
-            // ── Commit status ─────────────────────────────────────
+      # ── Step 11: Emit annotations and status (no write access) ─
+      #
+      # Fallback for fork PRs without app token: workflow command
+      # annotations (limited to ~10 per severity in PR view) and
+      # commit status via GITHUB_TOKEN (may fail for fork PRs with
+      # read-only token — fails gracefully).
+      - name: Emit annotations and status
+        if: >-
+          always() && steps.validation.outcome == 'success'
+          && github.event_name == 'pull_request'
+          && steps.write-access.outputs.has_write != 'true'
+        uses: actions/github-script@v8
+        with:
+          script: |
+            const fs = require('fs');
+
+            // ── Workflow command annotations ───────────────────────
+            const annotationsPath = 'validation-output/annotations.txt';
+            if (fs.existsSync(annotationsPath)) {
+              const lines = fs.readFileSync(annotationsPath, 'utf8').trim();
+              if (lines) {
+                core.info('Emitting workflow command annotations (limited display)');
+                console.log(lines);
+              }
+            }
+
+            // ── Summary note ──────────────────────────────────────
+            const notice = [
+              '',
+              '> **Note**: No write permissions available (expected for fork PRs)',
+              '> — PR comment skipped, showing findings in workflow summary and annotations.',
+            ].join('\n');
+            fs.appendFileSync(process.env.GITHUB_STEP_SUMMARY, notice);
+
+            // ── Commit status (best effort) ───────────────────────
             const statusPath = 'validation-output/commit-status.json';
             if (fs.existsSync(statusPath)) {
               try {
                 const payload = JSON.parse(fs.readFileSync(statusPath, 'utf8'));
                 const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
                 await github.rest.repos.createCommitStatus({
-                  owner, repo, sha,
+                  ...context.repo, sha,
                   state: payload.state,
                   description: payload.description,
                   context: payload.context,
@@ -378,7 +368,7 @@ jobs:
                 });
                 core.info(`Commit status: ${payload.state}`);
               } catch (e) {
-                core.warning(`Commit status failed: ${e.message}`);
+                core.info(`Commit status skipped: ${e.message}`);
               }
             }
 

From 18d46844042324d0981b3606764587ea187df485 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 1 Apr 2026 10:49:26 +0200
Subject: [PATCH 040/157] fix(validation): remove commit status from
 no-write-access path

GITHUB_TOKEN on fork PRs cannot create commit statuses either.
The workflow exit status is the only pass/fail signal for fork PRs.
---
 .github/workflows/validation.yml | 28 ++++------------------------
 1 file changed, 4 insertions(+), 24 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index a68fc8c3..173c6fad 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -319,13 +319,12 @@ jobs:
               }
             }
 
-      # ── Step 11: Emit annotations and status (no write access) ─
+      # ── Step 11: Emit annotations (no write access) ────────────
       #
       # Fallback for fork PRs without app token: workflow command
-      # annotations (limited to ~10 per severity in PR view) and
-      # commit status via GITHUB_TOKEN (may fail for fork PRs with
-      # read-only token — fails gracefully).
-      - name: Emit annotations and status
+      # annotations (limited to ~10 per severity in PR view).
+      # The workflow exit status is the only pass/fail signal.
+      - name: Emit annotations
         if: >-
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
@@ -353,25 +352,6 @@ jobs:
             ].join('\n');
             fs.appendFileSync(process.env.GITHUB_STEP_SUMMARY, notice);
 
-            // ── Commit status (best effort) ───────────────────────
-            const statusPath = 'validation-output/commit-status.json';
-            if (fs.existsSync(statusPath)) {
-              try {
-                const payload = JSON.parse(fs.readFileSync(statusPath, 'utf8'));
-                const sha = '${{ github.event.pull_request.head.sha || github.sha }}';
-                await github.rest.repos.createCommitStatus({
-                  ...context.repo, sha,
-                  state: payload.state,
-                  description: payload.description,
-                  context: payload.context,
-                  target_url: payload.target_url,
-                });
-                core.info(`Commit status: ${payload.state}`);
-              } catch (e) {
-                core.info(`Commit status skipped: ${e.message}`);
-              }
-            }
-
       # ── Step 14: Upload diagnostics ────────────────────────────────
       - name: Upload diagnostics
         if: always() && steps.validation.outcome == 'success'

From fc8f18caffac410bf6216e0d5aa1f07c28b2e4af Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 1 Apr 2026 15:45:39 +0200
Subject: [PATCH 041/157] fix(validation): phase 0 quick fixes for annotation,
 severity, diff, and comment presentation

- P-004 (check-server-url-version): remove conditional override, always error
  (vwip mismatch on any branch is a release blocker)
- Annotation title: move rule ID from title to message body to reduce visual
  weight in Check Run annotations (title shows human-readable message instead)
- File restriction git diff: use merge-base diff (origin/{base_ref}...HEAD)
  instead of fragile HEAD~1; add base_ref to ValidationContext
- Snapshot failed template: replace code block with blockquote for word-wrap;
  use markdown link for workflow URL instead of bare URL
---
 .../workflows/release-automation-reusable.yml |  2 +-
 .../templates/bot_messages/snapshot_failed.md |  4 +-
 validation/context/context_builder.py         |  2 +
 .../python_checks/release_review_checks.py    | 41 +++++++++++++++----
 validation/output/annotations.py              |  7 ++--
 validation/output/check_run.py                |  7 +++-
 validation/rules/python-rules.yaml            |  4 --
 validation/tests/test_context_builder.py      |  5 ++-
 validation/tests/test_orchestrator.py         |  1 +
 validation/tests/test_output_annotations.py   | 17 +++++---
 validation/tests/test_output_check_run.py     | 20 ++++++---
 validation/tests/test_output_commit_status.py |  1 +
 validation/tests/test_output_diagnostics.py   |  1 +
 validation/tests/test_output_pr_comment.py    |  1 +
 .../tests/test_output_workflow_summary.py     |  1 +
 .../tests/test_postfilter_conditions.py       |  1 +
 validation/tests/test_postfilter_engine.py    |  1 +
 validation/tests/test_postfilter_levels.py    |  1 +
 validation/tests/test_python_adapter.py       |  1 +
 .../tests/test_python_checks_changelog.py     |  1 +
 .../tests/test_python_checks_filename.py      |  1 +
 .../tests/test_python_checks_metadata.py      |  1 +
 validation/tests/test_python_checks_readme.py |  1 +
 .../tests/test_python_checks_release_plan.py  |  1 +
 .../test_python_checks_release_review.py      |  6 ++-
 validation/tests/test_python_checks_test.py   |  1 +
 .../tests/test_python_checks_version.py       |  1 +
 27 files changed, 96 insertions(+), 35 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 70cda467..82b92b84 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -857,7 +857,7 @@ jobs:
           echo "success=false" >> "$GITHUB_OUTPUT"
           SUMMARY="${{ steps.validation.outputs.summary }}"
           RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
-          echo "error_message=Pre-snapshot validation failed: ${SUMMARY}. See ${RUN_URL}" >> "$GITHUB_OUTPUT"
+          echo "error_message=Pre-snapshot validation failed: ${SUMMARY}. [View workflow logs](${RUN_URL})" >> "$GITHUB_OUTPUT"
           exit 1
 
       # ── Snapshot creation ─────────────────────────────────────────
diff --git a/release_automation/templates/bot_messages/snapshot_failed.md b/release_automation/templates/bot_messages/snapshot_failed.md
index 7859874f..0f860b33 100644
--- a/release_automation/templates/bot_messages/snapshot_failed.md
+++ b/release_automation/templates/bot_messages/snapshot_failed.md
@@ -2,9 +2,7 @@
 {{#workflow_run_url}}[View workflow logs]({{workflow_run_url}}){{/workflow_run_url}}
 
 {{#error_message}}
-```
-{{error_message}}
-```
+> {{error_message}}
 {{/error_message}}
 
 **Valid actions:**
→ Fix issues on `main`, or contact Release Management for unexpected errors
→ **`/create-snapshot` — retry after fixes are merged** diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py index 93a09b66..a9555acf 100644 --- a/validation/context/context_builder.py +++ b/validation/context/context_builder.py @@ -110,6 +110,7 @@ class ValidationContext: icm_release: Optional[str] # PR-specific (None / False for non-PR triggers) + base_ref: Optional[str] is_release_review_pr: bool release_plan_changed: Optional[bool] pr_number: Optional[int] @@ -337,6 +338,7 @@ def build_validation_context( target_release_type=target_release_type, commonalities_release=commonalities_release, icm_release=icm_release, + base_ref=base_ref or None, is_release_review_pr=is_review, release_plan_changed=release_plan_changed, pr_number=pr_number, diff --git a/validation/engines/python_checks/release_review_checks.py b/validation/engines/python_checks/release_review_checks.py index 8b2ff687..87b96a47 100644 --- a/validation/engines/python_checks/release_review_checks.py +++ b/validation/engines/python_checks/release_review_checks.py @@ -9,7 +9,7 @@ import logging import subprocess from pathlib import Path -from typing import List +from typing import List, Optional from validation.context import ValidationContext @@ -22,15 +22,42 @@ _ALLOWED_PREFIXES = ("CHANGELOG/",) -def _get_changed_files(repo_path: Path) -> List[str]: +def _get_changed_files( + repo_path: Path, base_ref: Optional[str] = None +) -> List[str]: """Get files changed in the current PR via git diff. - Compares HEAD against the merge-base with the target branch. - Falls back to diffing HEAD~1 if git operations fail. + Uses three-dot diff against ``origin/{base_ref}`` when available + (merge-base comparison — works regardless of checkout merge strategy). + Falls back to ``HEAD~1`` when base_ref is not provided. """ + # Primary: merge-base diff against the target branch + if base_ref: + try: + result = subprocess.run( + [ + "git", "diff", "--name-only", "--diff-filter=ACMR", + f"origin/{base_ref}...HEAD", + ], + capture_output=True, + text=True, + cwd=str(repo_path), + timeout=30, + ) + if result.returncode == 0: + return [ + f.strip() for f in result.stdout.strip().split("\n") + if f.strip() + ] + logger.warning( + "Merge-base diff failed (rc=%d), falling back to HEAD~1", + result.returncode, + ) + except (FileNotFoundError, subprocess.TimeoutExpired, OSError) as exc: + logger.warning("Merge-base diff error: %s, falling back to HEAD~1", exc) + + # Fallback: diff against first parent (assumes merge commit) try: - # In a PR context, the diff against origin/base shows changed files. - # Use --diff-filter=ACMR to only show added/copied/modified/renamed. result = subprocess.run( ["git", "diff", "--name-only", "--diff-filter=ACMR", "HEAD~1"], capture_output=True, @@ -73,7 +100,7 @@ def check_release_review_file_restriction( if not context.is_release_review_pr: return [] - changed_files = _get_changed_files(repo_path) + changed_files = _get_changed_files(repo_path, context.base_ref) if not changed_files: return [] diff --git a/validation/output/annotations.py b/validation/output/annotations.py index 8a558279..ff23cd22 100644 --- a/validation/output/annotations.py +++ b/validation/output/annotations.py @@ -83,10 +83,11 @@ def _build_command(finding: dict) -> str: line = finding.get("line", 1) col = finding.get("column") - title = format_rule_label(finding) + rule_label = format_rule_label(finding) - # Message: main message + optional hint - message = finding.get("message", "") + # Title: human-readable message. Rule ID in message body. + title = finding.get("message", "") + message = f"[{rule_label}] {title}" hint = finding.get("hint") if hint: message = f"{message} | Hint: {hint}" diff --git a/validation/output/check_run.py b/validation/output/check_run.py index be31d5b3..41b97376 100644 --- a/validation/output/check_run.py +++ b/validation/output/check_run.py @@ -81,9 +81,12 @@ def _build_annotation(finding: dict) -> dict: path = finding.get("path", "") line = finding.get("line", 1) - title = format_rule_label(finding) + rule_label = format_rule_label(finding) - message = finding.get("message", "") + # Title: human-readable message (rendered as bold heading in Check Run). + # Rule ID goes into the message body to reduce visual weight. + title = finding.get("message", "") + message = f"[{rule_label}] {title}" hint = finding.get("hint") if hint: message = f"{message}\n\nHint: {hint}" diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml index cc525732..c993c17a 100644 --- a/validation/rules/python-rules.yaml +++ b/validation/rules/python-rules.yaml @@ -32,10 +32,6 @@ engine_rule: check-server-url-version conditional_level: default: error - overrides: - - condition: - branch_types: [main, feature] - level: warn # P-005: check-server-url-api-name - id: P-005 diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py index 5dd53ca6..ada2af45 100644 --- a/validation/tests/test_context_builder.py +++ b/validation/tests/test_context_builder.py @@ -212,6 +212,7 @@ def sample_context(self): target_release_type="pre-release-rc", commonalities_release="r4.1", icm_release=None, + base_ref="main", is_release_review_pr=False, release_plan_changed=True, pr_number=42, @@ -234,8 +235,8 @@ def test_all_keys_present(self, sample_context): expected_keys = { "repository", "branch_type", "trigger_type", "profile", "stage", "target_release_type", "commonalities_release", "icm_release", - "is_release_review_pr", "release_plan_changed", "pr_number", - "apis", "workflow_run_url", "tooling_ref", + "base_ref", "is_release_review_pr", "release_plan_changed", + "pr_number", "apis", "workflow_run_url", "tooling_ref", } assert set(d.keys()) == expected_keys diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py index a228432b..6851ad2a 100644 --- a/validation/tests/test_orchestrator.py +++ b/validation/tests/test_orchestrator.py @@ -94,6 +94,7 @@ def _make_context(**overrides): "target_release_type": None, "commonalities_release": None, "icm_release": None, + "base_ref": None, "is_release_review_pr": False, "release_plan_changed": False, "pr_number": 42, diff --git a/validation/tests/test_output_annotations.py b/validation/tests/test_output_annotations.py index 651df71c..f384793a 100644 --- a/validation/tests/test_output_annotations.py +++ b/validation/tests/test_output_annotations.py @@ -114,15 +114,20 @@ def test_column_omitted_when_none(self): cmd = _build_command(f) assert "col=" not in cmd - def test_title_uses_rule_id(self): - f = _make_finding(rule_id="S-042", engine_rule="some-spectral-rule") + def test_title_uses_message(self): + f = _make_finding(rule_id="S-042", message="Bad path") cmd = _build_command(f) - assert "title=S-042" in cmd + assert "title=Bad path" in cmd - def test_title_falls_back_to_engine_rule(self): - f = _make_finding(engine_rule="camara-path-casing") + def test_rule_id_in_message_body(self): + f = _make_finding(rule_id="S-042", message="Bad path") cmd = _build_command(f) - assert "title=camara-path-casing" in cmd + assert "[S-042] Bad path" in cmd + + def test_rule_id_fallback_in_message_body(self): + f = _make_finding(engine_rule="camara-path-casing", message="Bad path") + cmd = _build_command(f) + assert "[camara-path-casing] Bad path" in cmd def test_hint_appended(self): f = _make_finding(message="Bad path", hint="Use kebab-case") diff --git a/validation/tests/test_output_check_run.py b/validation/tests/test_output_check_run.py index cb597203..d3f1201e 100644 --- a/validation/tests/test_output_check_run.py +++ b/validation/tests/test_output_check_run.py @@ -29,6 +29,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, @@ -162,19 +163,26 @@ def test_path_and_line(self): assert ann["start_line"] == 42 assert ann["end_line"] == 42 - def test_title_uses_rule_id(self): - findings = [_make_finding(rule_id="S-042")] + def test_title_uses_message(self): + findings = [_make_finding(rule_id="S-042", message="Bad pattern")] payload = generate_check_run_payload( _make_result(findings), _make_context(), ) - assert payload.annotations[0]["title"] == "S-042" + assert payload.annotations[0]["title"] == "Bad pattern" - def test_title_falls_back_to_engine_rule(self): - findings = [_make_finding(engine_rule="my-check")] + def test_rule_id_in_message_body(self): + findings = [_make_finding(rule_id="S-042", message="Bad pattern")] payload = generate_check_run_payload( _make_result(findings), _make_context(), ) - assert payload.annotations[0]["title"] == "my-check" + assert "[S-042] Bad pattern" in payload.annotations[0]["message"] + + def test_rule_id_fallback_in_message_body(self): + findings = [_make_finding(engine_rule="my-check", message="Bad pattern")] + payload = generate_check_run_payload( + _make_result(findings), _make_context(), + ) + assert "[my-check] Bad pattern" in payload.annotations[0]["message"] def test_message_content(self): findings = [_make_finding(message="Bad pattern")] diff --git a/validation/tests/test_output_commit_status.py b/validation/tests/test_output_commit_status.py index 9a5c9c0e..02674f04 100644 --- a/validation/tests/test_output_commit_status.py +++ b/validation/tests/test_output_commit_status.py @@ -28,6 +28,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_output_diagnostics.py b/validation/tests/test_output_diagnostics.py index 0fcd1e4d..d1d17800 100644 --- a/validation/tests/test_output_diagnostics.py +++ b/validation/tests/test_output_diagnostics.py @@ -25,6 +25,7 @@ def _make_context() -> ValidationContext: target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_output_pr_comment.py b/validation/tests/test_output_pr_comment.py index 3f546506..f7c9da6d 100644 --- a/validation/tests/test_output_pr_comment.py +++ b/validation/tests/test_output_pr_comment.py @@ -25,6 +25,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_output_workflow_summary.py b/validation/tests/test_output_workflow_summary.py index 848c25df..95e214ee 100644 --- a/validation/tests/test_output_workflow_summary.py +++ b/validation/tests/test_output_workflow_summary.py @@ -32,6 +32,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_postfilter_conditions.py b/validation/tests/test_postfilter_conditions.py index 14ca9a89..3498dc03 100644 --- a/validation/tests/test_postfilter_conditions.py +++ b/validation/tests/test_postfilter_conditions.py @@ -37,6 +37,7 @@ def _make_context( target_release_type=target_release_type, commonalities_release=commonalities_release, icm_release=None, + base_ref=None, is_release_review_pr=is_release_review_pr, release_plan_changed=release_plan_changed, pr_number=None, diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py index 149e29ed..18eb72ba 100644 --- a/validation/tests/test_postfilter_engine.py +++ b/validation/tests/test_postfilter_engine.py @@ -40,6 +40,7 @@ def _make_context( target_release_type=target_release_type, commonalities_release=commonalities_release, icm_release=None, + base_ref=None, is_release_review_pr=is_release_review_pr, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_postfilter_levels.py b/validation/tests/test_postfilter_levels.py index ca964f01..3114ff54 100644 --- a/validation/tests/test_postfilter_levels.py +++ b/validation/tests/test_postfilter_levels.py @@ -37,6 +37,7 @@ def _make_context( target_release_type=target_release_type, commonalities_release=commonalities_release, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_adapter.py b/validation/tests/test_python_adapter.py index 7f4e3721..2353bb34 100644 --- a/validation/tests/test_python_adapter.py +++ b/validation/tests/test_python_adapter.py @@ -35,6 +35,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_changelog.py b/validation/tests/test_python_checks_changelog.py index 0f12d263..3b297936 100644 --- a/validation/tests/test_python_checks_changelog.py +++ b/validation/tests/test_python_checks_changelog.py @@ -37,6 +37,7 @@ def _make_context( target_release_type=target_release_type, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_filename.py b/validation/tests/test_python_checks_filename.py index 5fd62b6d..66017cb5 100644 --- a/validation/tests/test_python_checks_filename.py +++ b/validation/tests/test_python_checks_filename.py @@ -36,6 +36,7 @@ def _make_context(api_name: str) -> ValidationContext: target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_metadata.py b/validation/tests/test_python_checks_metadata.py index c6b45422..172ecdbd 100644 --- a/validation/tests/test_python_checks_metadata.py +++ b/validation/tests/test_python_checks_metadata.py @@ -40,6 +40,7 @@ def _make_context(api_names: list[str]) -> ValidationContext: target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_readme.py b/validation/tests/test_python_checks_readme.py index 0aeb66c0..526a0d7e 100644 --- a/validation/tests/test_python_checks_readme.py +++ b/validation/tests/test_python_checks_readme.py @@ -25,6 +25,7 @@ def _make_context() -> ValidationContext: target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_release_plan.py b/validation/tests/test_python_checks_release_plan.py index 58f2471c..2dff698f 100644 --- a/validation/tests/test_python_checks_release_plan.py +++ b/validation/tests/test_python_checks_release_plan.py @@ -32,6 +32,7 @@ def _make_context() -> ValidationContext: target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_release_review.py b/validation/tests/test_python_checks_release_review.py index a7c0aa63..5e957217 100644 --- a/validation/tests/test_python_checks_release_review.py +++ b/validation/tests/test_python_checks_release_review.py @@ -19,7 +19,10 @@ # --------------------------------------------------------------------------- -def _make_context(is_release_review: bool = True) -> ValidationContext: +def _make_context( + is_release_review: bool = True, + base_ref: str = "release-snapshot/r1.0", +) -> ValidationContext: return ValidationContext( repository="TestRepo", branch_type="release", @@ -29,6 +32,7 @@ def _make_context(is_release_review: bool = True) -> ValidationContext: target_release_type="public-release", commonalities_release=None, icm_release=None, + base_ref=base_ref, is_release_review_pr=is_release_review, release_plan_changed=None, pr_number=42, diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py index f69a0d87..2e5622ca 100644 --- a/validation/tests/test_python_checks_test.py +++ b/validation/tests/test_python_checks_test.py @@ -44,6 +44,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, diff --git a/validation/tests/test_python_checks_version.py b/validation/tests/test_python_checks_version.py index 66c99a2f..90b9315e 100644 --- a/validation/tests/test_python_checks_version.py +++ b/validation/tests/test_python_checks_version.py @@ -43,6 +43,7 @@ def _make_context( target_release_type=None, commonalities_release=None, icm_release=None, + base_ref=None, is_release_review_pr=False, release_plan_changed=None, pr_number=None, From 58345caf48964433197d43eeca9abf11a0784295 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 1 Apr 2026 19:02:59 +0200 Subject: [PATCH 042/157] feat(validation): add version-specific Spectral rulesets and annotation dedup Create .spectral-r3.4.yaml (frozen Fall25) and .spectral-r4.yaml (r4.1 baseline) for version-specific rule selection per DEC-004. Both start identical with discriminator muted; r4.x evolves with gap rules in Phase 2. Fix ruleset fallback logic: repos without release-plan.yaml now default to r3.4 (conservative), unrecognised versions default to latest (r4). Add finding deduplication in the output pipeline: findings with the same (path, line, engine_rule) are merged with highest severity and concatenated messages, reducing annotation noise from oas3-schema. Add tested_rules tracking section to rule-inventory.yaml for Phase 1b regression coverage. Add metadata coverage tests for both version- specific rulesets. 663 tests passed. --- linting/config/.spectral-r3.4.yaml | 283 +++++++++++++++++ linting/config/.spectral-r4.yaml | 285 ++++++++++++++++++ validation/engines/spectral_adapter.py | 26 +- validation/output/annotations.py | 5 +- validation/output/check_run.py | 10 +- validation/output/formatting.py | 70 +++++ validation/rules/rule-inventory.yaml | 13 + validation/tests/test_output_annotations.py | 10 +- validation/tests/test_output_formatting.py | 110 +++++++ .../tests/test_rule_metadata_integrity.py | 37 ++- validation/tests/test_spectral_adapter.py | 8 +- 11 files changed, 833 insertions(+), 24 deletions(-) create mode 100644 linting/config/.spectral-r3.4.yaml create mode 100644 linting/config/.spectral-r4.yaml diff --git a/linting/config/.spectral-r3.4.yaml b/linting/config/.spectral-r3.4.yaml new file mode 100644 index 00000000..9d42924c --- /dev/null +++ b/linting/config/.spectral-r3.4.yaml @@ -0,0 +1,283 @@ +# CAMARA Project - Spectral linting ruleset for Commonalities r3.4 (Fall25, 0.6.x) +# Frozen — only maintenance fixes allowed. +# +# Based on: https://github.com/camaraproject/Commonalities/blob/r3.4/artifacts/linting_rules/.spectral.yml +# Changelog: +# - 31.01.2024: Initial version +# - 19.03.2024: Corrected camara-http-methods rule +# - 03.12.2024: Corrected camara-path-param-id and camara-discriminator-use to handle null values error in example fields +# - 09.01.2025: Updated info-contact rule +# - 21.07.2025: Added camara-schema-type-check rule + + +extends: "spectral:oas" +functions: + - camara-reserved-words + - camara-language-avoid-telco + - camara-security-no-secrets-in-path-or-query-parameters +functionsDir: "./lint_function" +rules: + # Built-in OpenAPI Specification ruleset. Each rule then can be enabled individually. + # The severity keyword is optional in rule definition and can be error, warn, info, hint, or off. The default value is warn. + contact-properties: false + duplicated-entry-in-enum: true + info-contact: false + info-description: true + info-license: true + license-url: true + no-$ref-siblings: error + no-eval-in-markdown: true + no-script-tags-in-markdown: true + openapi-tags: false + openapi-tags-alphabetical: false + openapi-tags-uniqueness: error + operation-description: true + operation-operationId: true + operation-operationId-unique: error + operation-operationId-valid-in-url: true + operation-parameters: true + operation-singular-tag: true + operation-success-response: true + operation-tags: true + operation-tag-defined: true + path-declarations-must-exist: true + path-keys-no-trailing-slash: true + path-not-include-query: true + path-params: error + tag-description: false + typed-enum: true + oas3-api-servers: true + oas3-examples-value-or-externalValue: true + oas3-operation-security-defined: false + oas3-parameter-description: false + oas3-schema: true + oas3-server-not-example.com: false + oas3-server-trailing-slash: true + oas3-unused-component: true + oas3-valid-media-example: true + oas3-valid-schema-example: true + # oas3-server-variables: true + + # Custom Rules Utilizing Spectral's Built-in Functions and JavaScript Implementations + + camara-language-avoid-telco: + message: "{{error}}" + severity: hint + description: | + This rule checks for telco-specific terminology in your API definitions and suggests more inclusive terms. + given: "$..*.*" + then: + function: camara-language-avoid-telco + recommended: false # Set to true/false to enable/disable this rule + + camara-oas-version: + message: "OpenAPI Version Error: The OpenAPI specification must adhere to version 3.0.3." + severity: error + description: | + This rule validates the OpenAPI version in your specification and requires compliance with version 3.0.3. + given: "$" + then: + field: openapi + function: pattern + functionOptions: + match: 3.0.3 + recommended: true # Set to true/false to enable/disable this rule + + camara-path-param-id: + message: "Path Parameter Naming Warning: Use 'resource_id' instead of just 'id' in path parameters." + severity: warn + description: | + This rule ensures consistent and descriptive naming for path parameters in your OpenAPI specification. + Please use 'resource_id' instead of just 'id' for your path parameters. + given: "$.paths[*][*].parameters[?(@.in == 'path')].name" + then: + field: name + function: pattern + functionOptions: + match: "^(?!.*\\b(id|Id|ID|iD)\\b).*$" + recommended: true # Set to true/false to enable/disable this rule + + camara-security-no-secrets-in-path-or-query-parameters: + message: "Sensitive data found in path: {{error}} Consider avoiding the use of sensitive data " + severity: warn + description: | + This rule checks for sensitive data ('MSISDN' and 'IMSI') in API paths and suggests avoiding their use. + given: + - "$.paths" + then: + function: camara-security-no-secrets-in-path-or-query-parameters + recommended: true # Set to true/false to enable/disable this rule + + camara-http-methods: + description: "Ensure that all path URLs have valid HTTP methods (GET, PUT, POST, DELETE, PATCH, OPTIONS)." + message: "Invalid HTTP method for '{{path}}'. Must be one of get, put, post, delete, patch, options." + severity: error + given: $.paths[*][*]~ + then: + function: pattern + functionOptions: + match: "^(get|put|post|delete|patch|options|parameters)$" + recommended: true # Set to true/false to enable/disable this rule + + camara-get-no-request-body: + message: There must be no request body for Get and DELETE + severity: error + given: + - "$.paths.*.get" + - "$.paths.*.delete" + then: + field: requestBody + function: falsy + recommended: true # Set to true/false to enable/disable this rule + + camara-reserved-words: + message: "Reserved words found {{error}} Consider avoiding the use of reserved word " + severity: warn + description: | + This rule checks Reserved words must not be used in the following parts of an API specification [Paths, Request Body properties, Component, Operation Id, Security Schema] + given: + - "$.paths" # Paths + - "$..parameters[*]" # Path or Query Parameter Names: + - "$..components.schemas.*.properties.*" # Request and Response body parameter + - "$.paths.*." # Path and Operation Names: + - "$.components.securitySchemes" # Security Schemes: + - "$.components.*.*" # Component Names: + - "$.paths.*.*.operationId" # OperationIds: + then: + function: camara-reserved-words + recommended: true # Set to true/false to enable/disable this rule + + camara-routes-description: + message: "Functionality method description Warning: Each method should have description." + severity: warn + description: | + This rule checks if each operation (POST, GET, DELETE, PUT, PATCH, OPTIONS) in your API specification has a description. + Ensure that you have added a 'summary' field for each operation in your OpenAPI specification. + given: + - "$.paths.*.post" + - "$.paths.*.get" + - "$.paths.*.delete" + - "$.paths.*.put" + - "$.paths.*.patch" + - "$.paths.*.options" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-parameters-descriptions: + message: "Parameter description is missing or empty: {{error}}" + severity: warn + description: | + This Spectral rule ensures that each path parameter in the API specification has a descriptive and meaningful description. + given: + - "$.paths..parameters.*" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-response-descriptions: + message: "Parameter description is missing or empty: {{error}}" + severity: warn + description: | + This Spectral rule ensures that each responese object in the API specification has a descriptive and meaningful description. + given: + - "$.paths..responses.*" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-properties-descriptions: + message: "Property description is missing or empty: {{error}}" + severity: warn + description: | + This Spectral rule ensures that each propoerty within objects in the API specification has a descriptive and meaningful description. + given: + - "$.components.*.*" + - "$.components.*.*.properties.*" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-operation-summary: + message: "Operation Summary Warning: Each operation should include a short summary for better understanding." + severity: warn + description: | + This rule checks if each operation (POST, GET, DELETE, PUT, PATCH, OPTIONS) in your API specification has a meaningful summary. + Ensure that you have added a 'summary' field for each operation in your OpenAPI specification. + given: + - "$.paths.*.post" + - "$.paths.*.get" + - "$.paths.*.delete" + - "$.paths.*.put" + - "$.paths.*.patch" + - "$.paths.*.options" + then: + field: summary + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-discriminator-use: + description: | + Ensure that API definition YAML files with oneOf or anyOf sections include a discriminator object for serialization, deserialization, and validation. + severity: hint + given: "$.components.schemas[*]" + then: + - field: oneOf + function: truthy + message: "Schemas with 'oneOf' should include a 'discriminator' for type identification." + - field: anyOf + function: truthy + message: "Schemas with 'anyOf' should include a 'discriminator' for type identification." + - field: discriminator + function: truthy + message: "A 'discriminator' object is required when using 'oneOf' or 'anyOf'." + recommended: false # Muted — findings were always ignored in r3.4 + + camara-operationid-casing-convention: + message: Operation Id must be in Camel case "{{error}}" + severity: hint + description: | + This rule checks Operation ids should follow a specific case convention: camel case. + given: "$.paths.*.*.operationId" + then: + function: casing + functionOptions: + type: camel + recommended: true # Set to true/false to enable/disable this rule + + camara-schema-casing-convention: + description: This rule checks schema should follow a specific case convention pascal case. + message: "{{property}} should be pascal case (UppperCamelCase)" + severity: warn + given: $.components.schemas[*]~ + then: + function: casing + functionOptions: + type: pascal + recommended: true # Set to true/false to enable/disable this rule + + camara-parameter-casing-convention: + description: Paths should be kebab-case. + severity: error + message: "{{property}} is not kebab-case: {{error}}" + given: $.paths[*]~ + then: + function: pattern + functionOptions: + match: "^\/([a-z0-9]+(-[a-z0-9]+)*)?(\/[a-z0-9]+(-[a-z0-9]+)*|\/{.+})*$" # doesn't allow /asasd{asdas}sadas pattern or not closed braces + recommended: true # Set to true/false to enable/disable this rule + + camara-schema-type-check: + message: "Invalid type in schema definition." + severity: error + given: "$.components.schemas.*" + then: + field: type + function: pattern + functionOptions: + match: "^(string|number|integer|boolean|array|object)$" + recommended: true diff --git a/linting/config/.spectral-r4.yaml b/linting/config/.spectral-r4.yaml new file mode 100644 index 00000000..df191314 --- /dev/null +++ b/linting/config/.spectral-r4.yaml @@ -0,0 +1,285 @@ +# CAMARA Project - Spectral linting ruleset for Commonalities r4.x (Spring26+) +# r4.1 baseline (Commonalities 0.7.0-rc.1), evolves with pre-releases per DEC-025. +# Stabilizes at Commonalities 0.7.0 public release. +# +# Based on: https://github.com/camaraproject/Commonalities/blob/main/artifacts/linting_rules/.spectral.yml +# Changelog: +# - 31.01.2024: Initial version +# - 19.03.2024: Corrected camara-http-methods rule +# - 03.12.2024: Corrected camara-path-param-id and camara-discriminator-use to handle null values error in example fields +# - 09.01.2025: Updated info-contact rule +# - 21.07.2025: Added camara-schema-type-check rule +# - 12.01.2026: camara-discriminator-use deprecated + + +extends: "spectral:oas" +functions: + - camara-reserved-words + - camara-language-avoid-telco + - camara-security-no-secrets-in-path-or-query-parameters +functionsDir: "./lint_function" +rules: + # Built-in OpenAPI Specification ruleset. Each rule then can be enabled individually. + # The severity keyword is optional in rule definition and can be error, warn, info, hint, or off. The default value is warn. + contact-properties: false + duplicated-entry-in-enum: true + info-contact: false + info-description: true + info-license: true + license-url: true + no-$ref-siblings: error + no-eval-in-markdown: true + no-script-tags-in-markdown: true + openapi-tags: false + openapi-tags-alphabetical: false + openapi-tags-uniqueness: error + operation-description: true + operation-operationId: true + operation-operationId-unique: error + operation-operationId-valid-in-url: true + operation-parameters: true + operation-singular-tag: true + operation-success-response: true + operation-tags: true + operation-tag-defined: true + path-declarations-must-exist: true + path-keys-no-trailing-slash: true + path-not-include-query: true + path-params: error + tag-description: false + typed-enum: true + oas3-api-servers: true + oas3-examples-value-or-externalValue: true + oas3-operation-security-defined: false + oas3-parameter-description: false + oas3-schema: true + oas3-server-not-example.com: false + oas3-server-trailing-slash: true + oas3-unused-component: true + oas3-valid-media-example: true + oas3-valid-schema-example: true + # oas3-server-variables: true + + # Custom Rules Utilizing Spectral's Built-in Functions and JavaScript Implementations + + camara-language-avoid-telco: + message: "{{error}}" + severity: hint + description: | + This rule checks for telco-specific terminology in your API definitions and suggests more inclusive terms. + given: "$..*.*" + then: + function: camara-language-avoid-telco + recommended: false # Set to true/false to enable/disable this rule + + camara-oas-version: + message: "OpenAPI Version Error: The OpenAPI specification must adhere to version 3.0.3." + severity: error + description: | + This rule validates the OpenAPI version in your specification and requires compliance with version 3.0.3. + given: "$" + then: + field: openapi + function: pattern + functionOptions: + match: 3.0.3 + recommended: true # Set to true/false to enable/disable this rule + + camara-path-param-id: + message: "Path Parameter Naming Warning: Use 'resource_id' instead of just 'id' in path parameters." + severity: warn + description: | + This rule ensures consistent and descriptive naming for path parameters in your OpenAPI specification. + Please use 'resource_id' instead of just 'id' for your path parameters. + given: "$.paths[*][*].parameters[?(@.in == 'path')].name" + then: + field: name + function: pattern + functionOptions: + match: "^(?!.*\\b(id|Id|ID|iD)\\b).*$" + recommended: true # Set to true/false to enable/disable this rule + + camara-security-no-secrets-in-path-or-query-parameters: + message: "Sensitive data found in path: {{error}} Consider avoiding the use of sensitive data " + severity: warn + description: | + This rule checks for sensitive data ('MSISDN' and 'IMSI') in API paths and suggests avoiding their use. + given: + - "$.paths" + then: + function: camara-security-no-secrets-in-path-or-query-parameters + recommended: true # Set to true/false to enable/disable this rule + + camara-http-methods: + description: "Ensure that all path URLs have valid HTTP methods (GET, PUT, POST, DELETE, PATCH, OPTIONS)." + message: "Invalid HTTP method for '{{path}}'. Must be one of get, put, post, delete, patch, options." + severity: error + given: $.paths[*][*]~ + then: + function: pattern + functionOptions: + match: "^(get|put|post|delete|patch|options|parameters)$" + recommended: true # Set to true/false to enable/disable this rule + + camara-get-no-request-body: + message: There must be no request body for Get and DELETE + severity: error + given: + - "$.paths.*.get" + - "$.paths.*.delete" + then: + field: requestBody + function: falsy + recommended: true # Set to true/false to enable/disable this rule + + camara-reserved-words: + message: "Reserved words found {{error}} Consider avoiding the use of reserved word " + severity: warn + description: | + This rule checks Reserved words must not be used in the following parts of an API specification [Paths, Request Body properties, Component, Operation Id, Security Schema] + given: + - "$.paths" # Paths + - "$..parameters[*]" # Path or Query Parameter Names: + - "$..components.schemas.*.properties.*" # Request and Response body parameter + - "$.paths.*." # Path and Operation Names: + - "$.components.securitySchemes" # Security Schemes: + - "$.components.*.*" # Component Names: + - "$.paths.*.*.operationId" # OperationIds: + then: + function: camara-reserved-words + recommended: true # Set to true/false to enable/disable this rule + + camara-routes-description: + message: "Functionality method description Warning: Each method should have description." + severity: warn + description: | + This rule checks if each operation (POST, GET, DELETE, PUT, PATCH, OPTIONS) in your API specification has a description. + Ensure that you have added a 'summary' field for each operation in your OpenAPI specification. + given: + - "$.paths.*.post" + - "$.paths.*.get" + - "$.paths.*.delete" + - "$.paths.*.put" + - "$.paths.*.patch" + - "$.paths.*.options" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-parameters-descriptions: + message: "Parameter description is missing or empty: {{error}}" + severity: warn + description: | + This Spectral rule ensures that each path parameter in the API specification has a descriptive and meaningful description. + given: + - "$.paths..parameters.*" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-response-descriptions: + message: "Parameter description is missing or empty: {{error}}" + severity: warn + description: | + This Spectral rule ensures that each responese object in the API specification has a descriptive and meaningful description. + given: + - "$.paths..responses.*" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-properties-descriptions: + message: "Property description is missing or empty: {{error}}" + severity: warn + description: | + This Spectral rule ensures that each propoerty within objects in the API specification has a descriptive and meaningful description. + given: + - "$.components.*.*" + - "$.components.*.*.properties.*" + then: + field: description + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-operation-summary: + message: "Operation Summary Warning: Each operation should include a short summary for better understanding." + severity: warn + description: | + This rule checks if each operation (POST, GET, DELETE, PUT, PATCH, OPTIONS) in your API specification has a meaningful summary. + Ensure that you have added a 'summary' field for each operation in your OpenAPI specification. + given: + - "$.paths.*.post" + - "$.paths.*.get" + - "$.paths.*.delete" + - "$.paths.*.put" + - "$.paths.*.patch" + - "$.paths.*.options" + then: + field: summary + function: truthy + recommended: true # Set to true/false to enable/disable this rule + + camara-discriminator-use: + description: | + Ensure that API definition YAML files with oneOf or anyOf sections include a discriminator object for serialization, deserialization, and validation. + severity: hint + given: "$.components.schemas[*]" + then: + - field: oneOf + function: truthy + message: "Schemas with 'oneOf' should include a 'discriminator' for type identification." + - field: anyOf + function: truthy + message: "Schemas with 'anyOf' should include a 'discriminator' for type identification." + - field: discriminator + function: truthy + message: "A 'discriminator' object is required when using 'oneOf' or 'anyOf'." + recommended: false # Deprecated in r4.x (12.01.2026) + + camara-operationid-casing-convention: + message: Operation Id must be in Camel case "{{error}}" + severity: hint + description: | + This rule checks Operation ids should follow a specific case convention: camel case. + given: "$.paths.*.*.operationId" + then: + function: casing + functionOptions: + type: camel + recommended: true # Set to true/false to enable/disable this rule + + camara-schema-casing-convention: + description: This rule checks schema should follow a specific case convention pascal case. + message: "{{property}} should be pascal case (UppperCamelCase)" + severity: warn + given: $.components.schemas[*]~ + then: + function: casing + functionOptions: + type: pascal + recommended: true # Set to true/false to enable/disable this rule + + camara-parameter-casing-convention: + description: Paths should be kebab-case. + severity: error + message: "{{property}} is not kebab-case: {{error}}" + given: $.paths[*]~ + then: + function: pattern + functionOptions: + match: "^\/([a-z0-9]+(-[a-z0-9]+)*)?(\/[a-z0-9]+(-[a-z0-9]+)*|\/{.+})*$" # doesn't allow /asasd{asdas}sadas pattern or not closed braces + recommended: true # Set to true/false to enable/disable this rule + + camara-schema-type-check: + message: "Invalid type in schema definition." + severity: error + given: "$.components.schemas.*" + then: + field: type + function: pattern + functionOptions: + match: "^(string|number|integer|boolean|array|object)$" + recommended: true diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py index 0e3b52c0..d740d529 100644 --- a/validation/engines/spectral_adapter.py +++ b/validation/engines/spectral_adapter.py @@ -47,7 +47,13 @@ "r4": ".spectral-r4.yaml", } -# Latest version line used when commonalities_release is absent. +# When commonalities_release is absent (no release-plan.yaml), default to the +# oldest supported version line — conservative choice for repos that haven't +# declared a Commonalities dependency yet. +_DEFAULT_VERSION_LINE = "r3" + +# When commonalities_release is present but unrecognised (likely a newer version +# than what we support), default to the latest available version line. _LATEST_VERSION_LINE = "r4" # Sentinel rule name for adapter-level errors. @@ -103,9 +109,13 @@ def select_ruleset_path( Resolution order: 1. Map *commonalities_release* prefix to a version-specific filename (e.g. ``r4.1`` -> ``.spectral-r4.yaml``). - 2. If *commonalities_release* is absent or unrecognised, default to the - latest version line (currently r4). - 3. If the version-specific file does not exist on disk, fall back to + 2. If *commonalities_release* is ``None`` (no release-plan.yaml), + default to the oldest supported version line (currently r3 — + conservative choice for repos without a Commonalities dependency). + 3. If *commonalities_release* is present but unrecognised (likely + newer than supported), default to the latest version line + (currently r4). + 4. If the version-specific file does not exist on disk, fall back to ``.spectral.yaml``. Args: @@ -117,8 +127,12 @@ def select_ruleset_path( Absolute path to the selected ruleset file. """ # Determine target version line. - version_line = _LATEST_VERSION_LINE - if commonalities_release: + if commonalities_release is None: + # No release-plan.yaml or no commonalities dependency declared. + version_line = _DEFAULT_VERSION_LINE + else: + # Start with latest; override if a known prefix matches. + version_line = _LATEST_VERSION_LINE for prefix in _VERSION_RULESET_MAP: if commonalities_release.startswith(prefix): version_line = prefix diff --git a/validation/output/annotations.py b/validation/output/annotations.py index ff23cd22..6cc035c9 100644 --- a/validation/output/annotations.py +++ b/validation/output/annotations.py @@ -15,7 +15,7 @@ from validation.postfilter.engine import PostFilterResult -from .formatting import format_rule_label, sort_findings_by_priority +from .formatting import deduplicate_findings, format_rule_label, sort_findings_by_priority logger = logging.getLogger(__name__) @@ -120,7 +120,8 @@ def generate_annotations( Returns: :class:`AnnotationResult` with workflow command strings. """ - sorted_findings = sort_findings_by_priority(post_filter_result.findings) + deduped = deduplicate_findings(post_filter_result.findings) + sorted_findings = sort_findings_by_priority(deduped) total = len(sorted_findings) selected = sorted_findings[:ANNOTATION_LIMIT] diff --git a/validation/output/check_run.py b/validation/output/check_run.py index 41b97376..715e0ad5 100644 --- a/validation/output/check_run.py +++ b/validation/output/check_run.py @@ -21,7 +21,12 @@ from validation.context import ValidationContext from validation.postfilter.engine import PostFilterResult -from .formatting import count_findings, format_rule_label, sort_findings_by_priority +from .formatting import ( + count_findings, + deduplicate_findings, + format_rule_label, + sort_findings_by_priority, +) logger = logging.getLogger(__name__) @@ -160,7 +165,8 @@ def generate_check_run_payload( f"Trigger: {context.trigger_type}" ) - sorted_findings = sort_findings_by_priority(findings) + deduped = deduplicate_findings(findings) + sorted_findings = sort_findings_by_priority(deduped) annotations = [_build_annotation(f) for f in sorted_findings] logger.info( diff --git a/validation/output/formatting.py b/validation/output/formatting.py index 0f5de51b..36de87d5 100644 --- a/validation/output/formatting.py +++ b/validation/output/formatting.py @@ -102,6 +102,76 @@ def count_findings_by_engine( return {engine: count_findings(fs) for engine, fs in groups.items()} +# --------------------------------------------------------------------------- +# Deduplication +# --------------------------------------------------------------------------- + +# Cap on the number of messages to concatenate when merging duplicates. +_MAX_MERGED_MESSAGES = 3 + + +def deduplicate_findings(findings: List[dict]) -> List[dict]: + """Merge findings that share the same ``(path, line, engine_rule)`` key. + + Spectral's ``oas3-schema`` (and similar meta-rules) can fire multiple + times on the same source line with different messages. Merging them + reduces annotation noise without losing information. + + For each group of duplicates: + - The highest severity (error > warn > hint) is kept. + - Distinct messages are concatenated with ``" | "``, capped at + :data:`_MAX_MERGED_MESSAGES` (extras noted as ``"... and N more"``). + - All other fields come from the first finding in the group. + + Order of first occurrence is preserved. + """ + groups: dict[tuple, List[dict]] = {} + order: list[tuple] = [] + + for f in findings: + key = (f.get("path", ""), f.get("line", 0), f.get("engine_rule", "")) + if key not in groups: + groups[key] = [] + order.append(key) + groups[key].append(f) + + result: List[dict] = [] + for key in order: + group = groups[key] + if len(group) == 1: + result.append(group[0]) + continue + + merged = dict(group[0]) + + # Highest severity wins. + best_priority = min( + _LEVEL_PRIORITY.get(f.get("level", ""), 99) for f in group + ) + for level_name, priority in _LEVEL_PRIORITY.items(): + if priority == best_priority: + merged["level"] = level_name + break + + # Concatenate distinct messages. + seen_messages: list[str] = [] + for f in group: + msg = f.get("message", "") + if msg and msg not in seen_messages: + seen_messages.append(msg) + + if len(seen_messages) <= _MAX_MERGED_MESSAGES: + merged["message"] = " | ".join(seen_messages) + else: + shown = " | ".join(seen_messages[:_MAX_MERGED_MESSAGES]) + extra = len(seen_messages) - _MAX_MERGED_MESSAGES + merged["message"] = f"{shown} | ... and {extra} more" + + result.append(merged) + + return result + + # --------------------------------------------------------------------------- # Sorting # --------------------------------------------------------------------------- diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml index 53bb15aa..431b1595 100644 --- a/validation/rules/rule-inventory.yaml +++ b/validation/rules/rule-inventory.yaml @@ -18,6 +18,7 @@ summary: total_gap: 21 total_manual: 25 total_pending: 17 + total_tested: 0 by_engine: spectral: 46 gherkin: 25 @@ -185,6 +186,18 @@ pending_rules: # --------------------------------------------------------------------------- # Source: private-dev-docs/validation-framework/reviews/testing-guidelines-audit.md +# --------------------------------------------------------------------------- +# Tested rules — verified via regression branches (Phase 1b) +# --------------------------------------------------------------------------- +# Updated as regression branches verify rules. +# Format: rule_id: regression_branch (or list of branches) + +tested_rules: {} + +# --------------------------------------------------------------------------- +# Manual rules — require human judgment +# --------------------------------------------------------------------------- + manual_rules: count: 25 categories: diff --git a/validation/tests/test_output_annotations.py b/validation/tests/test_output_annotations.py index f384793a..220c351e 100644 --- a/validation/tests/test_output_annotations.py +++ b/validation/tests/test_output_annotations.py @@ -162,9 +162,9 @@ def test_single_finding(self): def test_priority_ordering(self): findings = [ - _make_finding(level="hint", path="a.yaml", line=1), - _make_finding(level="error", path="a.yaml", line=1), - _make_finding(level="warn", path="a.yaml", line=1), + _make_finding(level="hint", path="a.yaml", line=1, engine_rule="rule-a"), + _make_finding(level="error", path="a.yaml", line=1, engine_rule="rule-b"), + _make_finding(level="warn", path="a.yaml", line=1, engine_rule="rule-c"), ] result = generate_annotations(_make_result(findings)) assert result.commands[0].startswith("::error ") @@ -181,8 +181,8 @@ def test_limit_enforced(self): assert result.truncated def test_limit_prioritises_errors(self): - errors = [_make_finding(level="error", line=i) for i in range(30)] - warnings = [_make_finding(level="warn", line=i) for i in range(30)] + errors = [_make_finding(level="error", line=i, engine_rule="err-rule") for i in range(30)] + warnings = [_make_finding(level="warn", line=i, engine_rule="warn-rule") for i in range(30)] findings = warnings + errors # Interleave — warnings first in input result = generate_annotations(_make_result(findings)) # All 30 errors should be in the first 30 commands diff --git a/validation/tests/test_output_formatting.py b/validation/tests/test_output_formatting.py index 4da4e2ad..1c90daa8 100644 --- a/validation/tests/test_output_formatting.py +++ b/validation/tests/test_output_formatting.py @@ -7,6 +7,7 @@ FindingCounts, count_findings, count_findings_by_api, + deduplicate_findings, format_finding_location, format_rule_label, sort_findings_by_priority, @@ -45,6 +46,115 @@ def _make_finding( return f +# --------------------------------------------------------------------------- +# deduplicate_findings +# --------------------------------------------------------------------------- + + +class TestDeduplicateFindings: + def test_no_duplicates_passthrough(self): + """Non-duplicate findings pass through unchanged.""" + findings = [ + _make_finding(path="a.yaml", line=10, engine_rule="rule-a"), + _make_finding(path="a.yaml", line=20, engine_rule="rule-a"), + _make_finding(path="a.yaml", line=10, engine_rule="rule-b"), + ] + result = deduplicate_findings(findings) + assert len(result) == 3 + + def test_same_rule_same_line_merged(self): + """Findings with same (path, line, engine_rule) are merged.""" + f1 = _make_finding(path="a.yaml", line=10, engine_rule="oas3-schema") + f1["message"] = "type is not valid" + f2 = _make_finding(path="a.yaml", line=10, engine_rule="oas3-schema") + f2["message"] = "format is not valid" + + result = deduplicate_findings([f1, f2]) + assert len(result) == 1 + assert "type is not valid" in result[0]["message"] + assert "format is not valid" in result[0]["message"] + assert " | " in result[0]["message"] + + def test_different_lines_not_merged(self): + """Same rule on different lines stays separate.""" + f1 = _make_finding(path="a.yaml", line=10, engine_rule="oas3-schema") + f1["message"] = "msg1" + f2 = _make_finding(path="a.yaml", line=20, engine_rule="oas3-schema") + f2["message"] = "msg2" + + result = deduplicate_findings([f1, f2]) + assert len(result) == 2 + + def test_different_rules_not_merged(self): + """Different rules on same line stay separate.""" + f1 = _make_finding(path="a.yaml", line=10, engine_rule="rule-a") + f2 = _make_finding(path="a.yaml", line=10, engine_rule="rule-b") + + result = deduplicate_findings([f1, f2]) + assert len(result) == 2 + + def test_severity_promotion(self): + """Merged group gets the highest severity.""" + f1 = _make_finding( + path="a.yaml", line=10, engine_rule="oas3-schema", level="hint", + ) + f1["message"] = "msg1" + f2 = _make_finding( + path="a.yaml", line=10, engine_rule="oas3-schema", level="error", + ) + f2["message"] = "msg2" + + result = deduplicate_findings([f1, f2]) + assert len(result) == 1 + assert result[0]["level"] == "error" + + def test_duplicate_messages_not_repeated(self): + """Identical messages within a group appear only once.""" + f1 = _make_finding(path="a.yaml", line=10, engine_rule="oas3-schema") + f1["message"] = "same message" + f2 = _make_finding(path="a.yaml", line=10, engine_rule="oas3-schema") + f2["message"] = "same message" + + result = deduplicate_findings([f1, f2]) + assert len(result) == 1 + assert result[0]["message"] == "same message" + + def test_message_cap_at_three(self): + """More than 3 distinct messages are truncated.""" + findings = [] + for i in range(5): + f = _make_finding(path="a.yaml", line=10, engine_rule="oas3-schema") + f["message"] = f"msg{i}" + findings.append(f) + + result = deduplicate_findings(findings) + assert len(result) == 1 + assert "... and 2 more" in result[0]["message"] + assert "msg0" in result[0]["message"] + assert "msg1" in result[0]["message"] + assert "msg2" in result[0]["message"] + + def test_empty_list(self): + assert deduplicate_findings([]) == [] + + def test_single_finding(self): + f = _make_finding() + result = deduplicate_findings([f]) + assert result == [f] + + def test_order_preserved(self): + """First occurrence order is preserved.""" + f1 = _make_finding(path="b.yaml", line=5, engine_rule="rule-x") + f2 = _make_finding(path="a.yaml", line=1, engine_rule="rule-y") + f3 = _make_finding(path="b.yaml", line=5, engine_rule="rule-x") + f3["message"] = "extra msg" + + result = deduplicate_findings([f1, f2, f3]) + assert len(result) == 2 + assert result[0]["path"] == "b.yaml" # first occurrence + assert result[1]["path"] == "a.yaml" + + # --------------------------------------------------------------------------- # count_findings # --------------------------------------------------------------------------- diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py index 1f113486..db2ee90f 100644 --- a/validation/tests/test_rule_metadata_integrity.py +++ b/validation/tests/test_rule_metadata_integrity.py @@ -29,6 +29,8 @@ _LINTING_DIR = _REPO_ROOT / "linting" / "config" _SPECTRAL_CONFIG = _LINTING_DIR / ".spectral.yaml" +_SPECTRAL_R34_CONFIG = _LINTING_DIR / ".spectral-r3.4.yaml" +_SPECTRAL_R4_CONFIG = _LINTING_DIR / ".spectral-r4.yaml" _GHERKIN_CONFIG = _LINTING_DIR / ".gherkin-lintrc" _YAMLLINT_CONFIG = _LINTING_DIR / ".yamllint.yaml" @@ -153,11 +155,12 @@ def test_ids_sequential_within_ranges(self, all_rules): class TestEngineCoverage: """Verify rule metadata covers all enabled engine rules.""" - def _get_spectral_enabled_rules(self) -> set[str]: - """Extract enabled rules from .spectral.yaml.""" - if not _SPECTRAL_CONFIG.is_file(): - pytest.skip("Spectral config not found") - data = yaml.safe_load(_SPECTRAL_CONFIG.read_text(encoding="utf-8")) + @staticmethod + def _get_spectral_enabled_rules_from(config_path: Path) -> set[str]: + """Extract enabled rules from a Spectral config file.""" + if not config_path.is_file(): + pytest.skip(f"Spectral config not found: {config_path.name}") + data = yaml.safe_load(config_path.read_text(encoding="utf-8")) rules = data.get("rules", {}) enabled = set() for name, value in rules.items(): @@ -171,6 +174,10 @@ def _get_spectral_enabled_rules(self) -> set[str]: enabled.add(name) return enabled + def _get_spectral_enabled_rules(self) -> set[str]: + """Extract enabled rules from the fallback .spectral.yaml.""" + return self._get_spectral_enabled_rules_from(_SPECTRAL_CONFIG) + def _get_gherkin_enabled_rules(self) -> set[str]: """Extract enabled rules from .gherkin-lintrc.""" if not _GHERKIN_CONFIG.is_file(): @@ -219,7 +226,7 @@ def _get_python_check_names(self) -> set[str]: return {c.name for c in CHECKS} def test_spectral_coverage(self, rule_index): - """Every enabled Spectral rule has a metadata entry.""" + """Every enabled Spectral rule in the fallback ruleset has metadata.""" enabled = self._get_spectral_enabled_rules() indexed = {er for (eng, er) in rule_index if eng == "spectral"} missing = enabled - indexed @@ -227,6 +234,24 @@ def test_spectral_coverage(self, rule_index): f"Spectral rules without metadata: {sorted(missing)}" ) + def test_spectral_r34_coverage(self, rule_index): + """Every enabled Spectral rule in .spectral-r3.4.yaml has metadata.""" + enabled = self._get_spectral_enabled_rules_from(_SPECTRAL_R34_CONFIG) + indexed = {er for (eng, er) in rule_index if eng == "spectral"} + missing = enabled - indexed + assert not missing, ( + f"Spectral r3.4 rules without metadata: {sorted(missing)}" + ) + + def test_spectral_r4_coverage(self, rule_index): + """Every enabled Spectral rule in .spectral-r4.yaml has metadata.""" + enabled = self._get_spectral_enabled_rules_from(_SPECTRAL_R4_CONFIG) + indexed = {er for (eng, er) in rule_index if eng == "spectral"} + missing = enabled - indexed + assert not missing, ( + f"Spectral r4.x rules without metadata: {sorted(missing)}" + ) + def test_gherkin_coverage(self, rule_index): """Every enabled gherkin-lint rule has a metadata entry.""" enabled = self._get_gherkin_enabled_rules() diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py index a7c32dca..27f232fb 100644 --- a/validation/tests/test_spectral_adapter.py +++ b/validation/tests/test_spectral_adapter.py @@ -178,12 +178,14 @@ def test_r3_release_selects_r3_ruleset(self, tmp_path): result = select_ruleset_path("r3.4", tmp_path) assert result.name == ".spectral-r3.4.yaml" - def test_none_defaults_to_latest(self, tmp_path): - (tmp_path / ".spectral-r4.yaml").touch() + def test_none_defaults_to_oldest(self, tmp_path): + """No release-plan.yaml → conservative default (r3.4).""" + (tmp_path / ".spectral-r3.4.yaml").touch() result = select_ruleset_path(None, tmp_path) - assert result.name == ".spectral-r4.yaml" + assert result.name == ".spectral-r3.4.yaml" def test_unrecognised_version_defaults_to_latest(self, tmp_path): + """Unknown version (likely newer) → latest available (r4).""" (tmp_path / ".spectral-r4.yaml").touch() result = select_ruleset_path("r99.0", tmp_path) assert result.name == ".spectral-r4.yaml" From f3cc1930d9b35e3c52b88fdc8f0ce0c048e4081f Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 1 Apr 2026 20:40:04 +0200 Subject: [PATCH 043/157] chore(validation): add apiRoot variable gap rule to inventory Add NEW-002: apiRoot default and description must match Design Guide values (hint-level Spectral check, low priority). --- validation/rules/rule-inventory.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml index 431b1595..33022f83 100644 --- a/validation/rules/rule-inventory.yaml +++ b/validation/rules/rule-inventory.yaml @@ -15,7 +15,7 @@ generated: 2026-03-26 summary: total_implemented: 96 - total_gap: 21 + total_gap: 22 total_manual: 25 total_pending: 17 total_tested: 0 @@ -152,6 +152,12 @@ gap_rules: target_engine: python priority: medium + - audit_id: NEW-002 + description: "apiRoot variable: default and description MUST match Design Guide values" + target_engine: spectral + priority: low + notes: "Hint level. Standard values: default 'http://localhost:9091', description 'API root, defined by the service provider, e.g. `api.example.com` or `api.example.com/somepath`'" + # --------------------------------------------------------------------------- # Fixes needed — implemented rules with incorrect behavior # --------------------------------------------------------------------------- From ce9723c370068bf758ced706d8ce45f4fe0906f7 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 1 Apr 2026 21:11:42 +0200 Subject: [PATCH 044/157] chore(validation): mute P-011 and add gap rules for DG-026/027/028 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mute P-011 (license-commonalities-consistency): cross-API consistency part is meaningless — Release Automation overwrites the field. Presence of info.license already covered by Spectral. Add gap rules: - DG-026: info.license.name must be "Apache 2.0" (Spectral) - DG-027: info.license.url must be Apache License URL (Spectral) - DG-028: x-camara-commonalities valid version (Python, rewrite P-011) - NEW-002: apiRoot variable default/description match Design Guide --- validation/rules/python-rules.yaml | 4 ++-- validation/rules/rule-inventory.yaml | 24 +++++++++++++++++++++++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml index c993c17a..87709d9e 100644 --- a/validation/rules/python-rules.yaml +++ b/validation/rules/python-rules.yaml @@ -94,12 +94,12 @@ conditional_level: default: warn -# P-011: check-license-commonalities-consistency +# P-011: check-license-commonalities-consistency (muted — likely to be retired) - id: P-011 engine: python engine_rule: check-license-commonalities-consistency conditional_level: - default: warn + default: muted # P-012: check-release-review-file-restriction - id: P-012 diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml index 33022f83..117d77de 100644 --- a/validation/rules/rule-inventory.yaml +++ b/validation/rules/rule-inventory.yaml @@ -15,7 +15,7 @@ generated: 2026-03-26 summary: total_implemented: 96 - total_gap: 22 + total_gap: 25 total_manual: 25 total_pending: 17 total_tested: 0 @@ -65,6 +65,28 @@ gap_rules: priority: medium notes: commonalities_release >=r4.0 + - audit_id: DG-026 + description: "info.license.name MUST be 'Apache 2.0'" + target_engine: spectral + priority: medium + notes: Static value check, previously v0_6 only (V6-005) + + - audit_id: DG-027 + description: info.license.url MUST be Apache License URL + target_engine: spectral + priority: medium + notes: Value check (not just presence). Spectral license-url only checks existence. Previously v0_6 (V6-006) + + - audit_id: DG-028 + description: x-camara-commonalities MUST specify valid version + target_engine: python + priority: medium + notes: > + Rewrite P-011. Presence check + value validation. On main: wip, tbd, + X.Y (e.g. 0.7), or X.Y.Z (e.g. 0.7.0) allowed — if X.Y/X.Y.Z then + must match declared commonalities_release. On release: real version + required, must match commonalities_release. Previously v0_6 (V6-007) + - audit_id: DG-015 description: "API-specific error: API_NAME.SPECIFIC_CODE format" target_engine: spectral From ed8208a9baf02c406f115a2a46eb43f1f1b0c69e Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 1 Apr 2026 21:51:38 +0200 Subject: [PATCH 045/157] chore(validation): add NEW-003 orphan API definition gap rule to inventory --- validation/rules/rule-inventory.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml index 117d77de..7186e9a5 100644 --- a/validation/rules/rule-inventory.yaml +++ b/validation/rules/rule-inventory.yaml @@ -180,6 +180,12 @@ gap_rules: priority: low notes: "Hint level. Standard values: default 'http://localhost:9091', description 'API root, defined by the service provider, e.g. `api.example.com` or `api.example.com/somepath`'" + - audit_id: NEW-003 + description: "Orphan API definitions: YAML files in code/API_definitions/ not listed in release-plan.yaml" + target_engine: python + priority: low + notes: "Compare filenames against release-plan.yaml APIs. Also detect missing files (declared but absent)." + # --------------------------------------------------------------------------- # Fixes needed — implemented rules with incorrect behavior # --------------------------------------------------------------------------- From 7f7e733b3a626ce8ed0783e52b1fd58ab759d9a1 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Wed, 1 Apr 2026 22:26:15 +0200 Subject: [PATCH 046/157] fix(validation): skip bundling for specs without external $ref MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RA snapshot_creator already checks for external $ref before bundling (_bundle_specs). The validation workflow step was missing this guard — Redocly normalizes formatting even without external refs, producing misleading diff artifacts. --- .github/workflows/validation.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml index 173c6fad..d1d3f2d1 100644 --- a/.github/workflows/validation.yml +++ b/.github/workflows/validation.yml @@ -378,6 +378,12 @@ jobs: for spec in code/API_definitions/*.yaml; do [ -f "$spec" ] || continue name=$(basename "$spec") + # Skip specs with no external $ref — bundling would only + # normalize formatting, producing a misleading diff artifact. + if ! grep -qE '\$ref:\s*["'"'"'][^#]' "$spec"; then + echo "Skipped (no external \$ref): $name" + continue + fi if err=$(redocly bundle "$spec" -o "validation-output/bundled/$name" 2>&1); then if ! diff -q "$spec" "validation-output/bundled/$name" > /dev/null 2>&1; then BUNDLED=$((BUNDLED + 1)) From 709445d6fdd82c65dc99046fe013a84f124f27dc Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 08:44:41 +0200 Subject: [PATCH 047/157] feat(validation): integrate OWASP API Security Top 10 2023 rules Add 20 OWASP Spectral rules to .spectral-r4.yaml (r4.x scope only), disable 12 rules not applicable to CAMARA, and create rule metadata for all enabled rules (S-300..S-319). Implements the agreed configuration from Commonalities Linting-rules.md section 5 (issues #539, #548, #551, #552): - 8 rules at error (auth, HTTPS, 401 response, numeric IDs) - 11 rules at warn (resource consumption, mass assignment) - 1 rule at info (SSRF parameter review) - 12 rules disabled (OpenID-only, gateway-level, OAS 3.1-only) Resource consumption rules (string-limit, array-limit, integer-format, integer-limit-legacy) set to warn for 2026 with planned escalation to error in 2027 per Commonalities #551. --- linting/config/.spectral-r4.yaml | 60 ++++++++++++- shared-actions/run-validation/action.yml | 5 ++ validation/package-lock.json | 11 +++ validation/package.json | 1 + validation/rules/rule-inventory.yaml | 12 +-- validation/rules/spectral-rules.yaml | 89 +++++++++++++++++++ .../tests/test_rule_metadata_integrity.py | 6 +- 7 files changed, 172 insertions(+), 12 deletions(-) diff --git a/linting/config/.spectral-r4.yaml b/linting/config/.spectral-r4.yaml index df191314..3c969b4c 100644 --- a/linting/config/.spectral-r4.yaml +++ b/linting/config/.spectral-r4.yaml @@ -12,7 +12,12 @@ # - 12.01.2026: camara-discriminator-use deprecated -extends: "spectral:oas" +# Note: @stoplight/spectral-owasp-ruleset is installed via validation/package.json. +# Spectral resolves this package via NODE_PATH (set in shared-actions/run-validation/action.yml) +# because the ruleset file lives in linting/config/ while node_modules is in validation/. +extends: + - "spectral:oas" + - "@stoplight/spectral-owasp-ruleset" functions: - camara-reserved-words - camara-language-avoid-telco @@ -283,3 +288,56 @@ rules: functionOptions: match: "^(string|number|integer|boolean|array|object)$" recommended: true + + # ===== OWASP API Security Top 10 2023 ===== + # Source: Commonalities Linting-rules.md section 5 + # Severity overrides per CAMARA agreement (Commonalities #539, #548, #551, #552) + # Target severity escalation for API4 resource consumption rules: warn → error in 2027 + + # --- API1: Broken Object Level Authorization --- + owasp:api1:2023-no-numeric-ids: error + + # --- API2: Broken Authentication --- + owasp:api2:2023-auth-insecure-schemes: off # OpenID used exclusively + owasp:api2:2023-jwt-best-practices: off # OpenID handles JWT + owasp:api2:2023-no-api-keys-in-url: off # No API keys — OpenID used + owasp:api2:2023-no-credentials-in-url: error + owasp:api2:2023-no-http-basic: off # No HTTP basic — OpenID used + owasp:api2:2023-short-lived-access-tokens: error + owasp:api2:2023-write-restricted: error + owasp:api2:2023-read-restricted: warn + + # --- API3: Broken Object Property Level Authorization --- + owasp:api3:2023-constrained-additionalProperties: warn # OAS 3.0 only + owasp:api3:2023-constrained-unevaluatedProperties: warn # OAS 3.1 only (no-op for 3.0.3) + owasp:api3:2023-no-additionalProperties: warn # OAS 3.0 only + owasp:api3:2023-no-unevaluatedProperties: warn # OAS 3.1 only (no-op for 3.0.3) + + # --- API4: Unrestricted Resource Consumption --- + owasp:api4:2023-array-limit: warn # → error in 2027 + owasp:api4:2023-integer-format: warn # → error in 2027 + owasp:api4:2023-integer-limit: off # OAS 3.1 only; legacy covers 3.0 + owasp:api4:2023-integer-limit-legacy: warn # → error in 2027 + owasp:api4:2023-rate-limit: off # Gateway-level + owasp:api4:2023-rate-limit-retry-after: off # Gateway-level + owasp:api4:2023-rate-limit-responses-429: off # Gateway-level + owasp:api4:2023-string-limit: warn # → error in 2027 + owasp:api4:2023-string-restricted: warn + + # --- API5: Broken Function Level Authorization --- + owasp:api5:2023-admin-security-unique: error + + # --- API7: Server Side Request Forgery --- + owasp:api7:2023-concerning-url-parameter: info + + # --- API8: Security Misconfiguration --- + owasp:api8:2023-define-cors-origin: off # Gateway/implementation-level + owasp:api8:2023-define-error-responses-401: error # Elevated from upstream warn + owasp:api8:2023-define-error-responses-500: off # Not recommended in CAMARA + owasp:api8:2023-define-error-validation: warn + owasp:api8:2023-no-scheme-http: error # OAS 2 only (no-op for 3.0.3) + owasp:api8:2023-no-server-http: error + + # --- API9: Improper Inventory Management --- + owasp:api9:2023-inventory-access: off # x-internal not used + owasp:api9:2023-inventory-environment: off # Environment terms not used diff --git a/shared-actions/run-validation/action.yml b/shared-actions/run-validation/action.yml index bac801dc..a39854ea 100644 --- a/shared-actions/run-validation/action.yml +++ b/shared-actions/run-validation/action.yml @@ -65,6 +65,11 @@ runs: env: PYTHONPATH: ${{ inputs.tooling_path }} PATH_NODE_MODULES: ${{ inputs.tooling_path }}/validation/node_modules/.bin + # NODE_PATH: Spectral rulesets live in linting/config/ but npm packages + # (including @stoplight/spectral-owasp-ruleset) are installed in + # validation/node_modules/. NODE_PATH bridges this gap for Node's + # module resolution. + NODE_PATH: ${{ inputs.tooling_path }}/validation/node_modules VALIDATION_REPO_PATH: ${{ inputs.repo_path }} VALIDATION_TOOLING_PATH: ${{ inputs.tooling_path }} VALIDATION_OUTPUT_DIR: ${{ inputs.repo_path }}/validation-output diff --git a/validation/package-lock.json b/validation/package-lock.json index 8a0adc0e..d7af1907 100644 --- a/validation/package-lock.json +++ b/validation/package-lock.json @@ -8,6 +8,7 @@ "dependencies": { "@redocly/cli": "^1.31.0", "@stoplight/spectral-cli": "^6.14.0", + "@stoplight/spectral-owasp-ruleset": "^2.0.0", "gherkin-lint": "^4.2.4" } }, @@ -996,6 +997,16 @@ "node": "^16.20 || ^18.18 || >= 20.17" } }, + "node_modules/@stoplight/spectral-owasp-ruleset": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@stoplight/spectral-owasp-ruleset/-/spectral-owasp-ruleset-2.0.1.tgz", + "integrity": "sha512-9S6Z3lMwIh5oe5X5xS867iQm8xYMgCwY08FiR4At6Ivu78lQ7okFS7+I9RUs01Zawd0PtageQeZ83Ety/vlJQQ==", + "license": "MIT", + "dependencies": { + "@stoplight/spectral-formats": "^1.6.0", + "@stoplight/spectral-functions": "^1.7.2" + } + }, "node_modules/@stoplight/spectral-parsers": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/@stoplight/spectral-parsers/-/spectral-parsers-1.0.5.tgz", diff --git a/validation/package.json b/validation/package.json index 8686da9c..89420434 100644 --- a/validation/package.json +++ b/validation/package.json @@ -5,6 +5,7 @@ "dependencies": { "@redocly/cli": "^1.31.0", "@stoplight/spectral-cli": "^6.14.0", + "@stoplight/spectral-owasp-ruleset": "^2.0.0", "gherkin-lint": "^4.2.4" } } diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml index 7186e9a5..c127364e 100644 --- a/validation/rules/rule-inventory.yaml +++ b/validation/rules/rule-inventory.yaml @@ -14,13 +14,13 @@ version: 1 generated: 2026-03-26 summary: - total_implemented: 96 + total_implemented: 116 total_gap: 25 total_manual: 25 - total_pending: 17 + total_pending: 0 total_tested: 0 by_engine: - spectral: 46 + spectral: 66 gherkin: 25 python: 12 yamllint: 13 @@ -209,11 +209,7 @@ fixes: # Source: tooling#95 (OWASP Spectral rules) pending_rules: - - source: tooling#95 - description: OWASP API security rules (17 rules) - target_engine: spectral - notes: Parked. Introduce with v1 + bundling. - estimated_count: 17 + # OWASP rules (tooling#95) implemented in WS07 Phase 1b — see spectral-rules.yaml S-300..S-319 # --------------------------------------------------------------------------- # Manual rules — require human judgment diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml index 93830cf2..1a57f71e 100644 --- a/validation/rules/spectral-rules.yaml +++ b/validation/rules/spectral-rules.yaml @@ -191,3 +191,92 @@ - id: S-228 engine: spectral engine_rule: typed-enum + +# ===== OWASP API Security Top 10 2023 (S-300+) ===== + +- id: S-300 + engine: spectral + engine_rule: "owasp:api1:2023-no-numeric-ids" + hint: "Use non-numeric (e.g. string) resource identifiers to prevent enumeration attacks." + +- id: S-301 + engine: spectral + engine_rule: "owasp:api2:2023-no-credentials-in-url" + +- id: S-302 + engine: spectral + engine_rule: "owasp:api2:2023-short-lived-access-tokens" + +- id: S-303 + engine: spectral + engine_rule: "owasp:api2:2023-write-restricted" + +- id: S-304 + engine: spectral + engine_rule: "owasp:api5:2023-admin-security-unique" + +- id: S-305 + engine: spectral + engine_rule: "owasp:api8:2023-no-scheme-http" + +- id: S-306 + engine: spectral + engine_rule: "owasp:api8:2023-no-server-http" + +- id: S-307 + engine: spectral + engine_rule: "owasp:api8:2023-define-error-responses-401" + hint: "All secured operations must document a 401 Unauthorized response (CAMARA Design Guide section 3.2)." + +- id: S-308 + engine: spectral + engine_rule: "owasp:api2:2023-read-restricted" + +- id: S-309 + engine: spectral + engine_rule: "owasp:api4:2023-array-limit" + hint: "Add maxItems to constrain array size (CAMARA Design Guide section 2.2)." + +- id: S-310 + engine: spectral + engine_rule: "owasp:api4:2023-integer-format" + hint: "Add format: int32 or int64 to integer properties (CAMARA Design Guide section 2.2)." + +- id: S-311 + engine: spectral + engine_rule: "owasp:api4:2023-integer-limit-legacy" + hint: "Add minimum and maximum to integer properties (CAMARA Design Guide section 2.2)." + +- id: S-312 + engine: spectral + engine_rule: "owasp:api4:2023-string-limit" + hint: "Add maxLength, enum, or const to string properties (CAMARA Design Guide section 2.2)." + +- id: S-313 + engine: spectral + engine_rule: "owasp:api4:2023-string-restricted" + +- id: S-314 + engine: spectral + engine_rule: "owasp:api3:2023-constrained-additionalProperties" + +- id: S-315 + engine: spectral + engine_rule: "owasp:api3:2023-constrained-unevaluatedProperties" + +- id: S-316 + engine: spectral + engine_rule: "owasp:api3:2023-no-additionalProperties" + +- id: S-317 + engine: spectral + engine_rule: "owasp:api3:2023-no-unevaluatedProperties" + +- id: S-318 + engine: spectral + engine_rule: "owasp:api8:2023-define-error-validation" + hint: "Document 400 or 422 error responses for input validation (CAMARA Design Guide section 3.2)." + +- id: S-319 + engine: spectral + engine_rule: "owasp:api7:2023-concerning-url-parameter" diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py index db2ee90f..d46f5fa9 100644 --- a/validation/tests/test_rule_metadata_integrity.py +++ b/validation/tests/test_rule_metadata_integrity.py @@ -78,7 +78,7 @@ def test_expected_rule_counts(self, all_rules): for r in all_rules: counts[r.engine] = counts.get(r.engine, 0) + 1 assert counts["python"] == 13 - assert counts["spectral"] == 46 + assert counts["spectral"] == 66 assert counts["gherkin"] == 25 assert counts["yamllint"] == 13 @@ -306,8 +306,8 @@ def test_hints_are_exception_not_norm(self, all_rules): """ with_hints = [r.id for r in all_rules if r.hint is not None] with_overrides = [r.id for r in all_rules if r.message_override is not None] - assert len(with_hints) == 0, ( - f"Expected 0 explicit hints (update test if adding hints): " + assert len(with_hints) == 7, ( + f"Expected 7 explicit hints (update test if adding hints): " f"{with_hints}" ) assert len(with_overrides) == 0, ( From 109df623c5eb011923e8ce2babeb3f66de2e69bb Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 11:37:46 +0200 Subject: [PATCH 048/157] fix(validation): add OWASP changelog entry to .spectral-r4.yaml header --- linting/config/.spectral-r4.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/linting/config/.spectral-r4.yaml b/linting/config/.spectral-r4.yaml index 3c969b4c..8571fabb 100644 --- a/linting/config/.spectral-r4.yaml +++ b/linting/config/.spectral-r4.yaml @@ -10,6 +10,7 @@ # - 09.01.2025: Updated info-contact rule # - 21.07.2025: Added camara-schema-type-check rule # - 12.01.2026: camara-discriminator-use deprecated +# - 03.04.2026: Added OWASP API Security Top 10 2023 rules (Linting-rules.md section 5) # Note: @stoplight/spectral-owasp-ruleset is installed via validation/package.json. From 199da4406e2815f4266e9e3b94cc1361a71a4bc2 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 12:01:34 +0200 Subject: [PATCH 049/157] fix(validation): drop phantom string-restricted findings from resolved $ref The OWASP string-restricted rule's deep recursive JSONPath traverses Spectral's internally-resolved $ref copies, producing phantom findings with no source file and zeroed range. Filter these specifically for the string-restricted rule in parse_spectral_output(). Observed: 44 phantom warnings on ReleaseTest (actions/runs/23941828563). --- validation/engines/spectral_adapter.py | 14 ++++++++++ validation/tests/test_spectral_adapter.py | 31 +++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py index d740d529..fe46969e 100644 --- a/validation/engines/spectral_adapter.py +++ b/validation/engines/spectral_adapter.py @@ -251,6 +251,20 @@ def parse_spectral_output( findings = [] for item in data: try: + # The OWASP string-restricted rule uses a deep recursive JSONPath + # that can traverse Spectral's internally-resolved $ref copies, + # producing phantom findings with no source file and range 0:0. + # Drop these — they duplicate real findings on the actual source. + if ( + item.get("code") == "owasp:api4:2023-string-restricted" + and not item.get("source") + ): + start = item.get("range", {}).get("start", {}) + if start.get("line", 0) == 0 and start.get("character", 0) == 0: + logger.debug( + "Dropping phantom string-restricted finding (resolved $ref)" + ) + continue findings.append(normalize_finding(item, repo_root=repo_root)) except (KeyError, TypeError) as exc: logger.warning("Skipping malformed Spectral finding: %s", exc) diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py index 27f232fb..e18c6656 100644 --- a/validation/tests/test_spectral_adapter.py +++ b/validation/tests/test_spectral_adapter.py @@ -351,6 +351,37 @@ def test_repo_root_normalises_paths(self): findings = parse_spectral_output(raw, repo_root="/runner/work") assert findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml" + def test_string_restricted_phantom_dropped(self): + """Phantom string-restricted findings (no source, range 0:0) are dropped.""" + phantom = { + "code": "owasp:api4:2023-string-restricted", + "message": "Schema of type string should specify a format.", + "severity": 1, + "source": "", + "path": ["components", "schemas", "Foo", "properties", "bar"], + "range": {"start": {"line": 0, "character": 0}, + "end": {"line": 0, "character": 0}}, + } + raw = json.dumps([SAMPLE_SPECTRAL_FINDING, phantom]) + findings = parse_spectral_output(raw) + assert len(findings) == 1 + assert findings[0]["engine_rule"] == "camara-parameter-casing-convention" + + def test_other_rule_sourceless_not_dropped(self): + """Sourceless findings from other rules are kept (only string-restricted filtered).""" + other = { + "code": "owasp:api4:2023-string-limit", + "message": "Schema of type string must specify maxLength.", + "severity": 1, + "source": "", + "path": ["components", "schemas", "Foo", "properties", "bar"], + "range": {"start": {"line": 0, "character": 0}, + "end": {"line": 0, "character": 0}}, + } + raw = json.dumps([other]) + findings = parse_spectral_output(raw) + assert len(findings) == 1 + def test_external_file_findings_downgraded_to_hint(self): """Findings from common schemas (followed via $ref) become hints.""" common_finding = { From 979d5706b1cc6dd553fbb168a9817485c8e877c9 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 13:18:47 +0200 Subject: [PATCH 050/157] chore(validation): track proposed rule changes needing upstream discussion Add proposed_changes section to rule-inventory.yaml for: - oas3-unused-component: downgrade to hint (discriminator false positives) - owasp:api4:2023-string-restricted: downgrade to hint (free-text strings cannot have format/pattern without backward-compatibility risk; resource consumption already covered by string-limit) - camara-schema-casing-convention: overrides for CloudEvents abbreviations (HTTPSettings, HTTPSubscriptionRequest/Response, PrivateKeyJWTCredential) All three require Commonalities discussion before implementation. --- validation/rules/rule-inventory.yaml | 42 +++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml index c127364e..c87e4445 100644 --- a/validation/rules/rule-inventory.yaml +++ b/validation/rules/rule-inventory.yaml @@ -203,10 +203,50 @@ fixes: target_api_maturity: [stable] level: warn +# --------------------------------------------------------------------------- +# Proposed changes — need upstream discussion before implementation +# --------------------------------------------------------------------------- + +proposed_changes: + - rule_id: S-211 + engine_rule: oas3-unused-component + current_level: warn + proposed_level: hint + reason: > + Spectral does not follow discriminator mappings — schemas referenced + only via discriminator appear as unused. False positives on subscription + event schemas (e.g. EventApiSpecific1, EventSubscriptionStarted). + Unused schemas are cosmetic, not harmful. + discuss_in: Commonalities + + - rule_id: S-313 + engine_rule: "owasp:api4:2023-string-restricted" + current_level: warn + proposed_level: hint + reason: > + Free-text string properties (ErrorInfo.message, CloudEvent.id, + SubscriptionId, *Description fields) cannot meaningfully have format + or pattern without risking backward-incompatibility (e.g. localized + messages). The resource consumption concern is already covered by + string-limit (maxLength). string-restricted adds noise on fields + where no actionable fix exists. + discuss_in: Commonalities + + - engine_rule: camara-schema-casing-convention + rule_id: S-015 + change: Add overrides for CloudEvents abbreviation prefixes (HTTP, JWT) + reason: > + CloudEvents convention uses uppercase protocol prefixes in schema names + (HTTPSettings, HTTPSubscriptionRequest, HTTPSubscriptionResponse). + PrivateKeyJWTCredential follows the same pattern. Renaming to HttpSettings + etc. would deviate from CloudEvents spec. Overrides for these 4 schemas + would suppress false positives while keeping PascalCase enforced for all + other schemas. + discuss_in: Commonalities + # --------------------------------------------------------------------------- # Pending rules — in open PRs # --------------------------------------------------------------------------- -# Source: tooling#95 (OWASP Spectral rules) pending_rules: # OWASP rules (tooling#95) implemented in WS07 Phase 1b — see spectral-rules.yaml S-300..S-319 From 36f613602de6137d3486d031a22a8f1dccbc0a90 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 15:56:28 +0200 Subject: [PATCH 051/157] feat(validation): add commonalities_version to ValidationContext Resolve the concrete Commonalities version (e.g. 0.7.0) from the declared commonalities_release tag (e.g. r4.1) via GitHub API. This enables Python checks to validate x-camara-commonalities values against the actual version and will be reused by future checks (e.g. CHANGELOG validation). Changes: - Add commonalities_version field to ValidationContext dataclass - Add VALIDATION_COMMONALITIES_VERSION env var to orchestrator - Add resolution step in run-validation action (fetches VERSION.yaml from Commonalities repo at release tag ref) - Update all 18 test files with new field --- shared-actions/run-validation/action.yml | 29 +++++++++++++++++++ validation/context/context_builder.py | 3 ++ validation/orchestrator.py | 3 ++ validation/tests/test_context_builder.py | 4 ++- validation/tests/test_output_check_run.py | 1 + validation/tests/test_output_commit_status.py | 1 + validation/tests/test_output_diagnostics.py | 1 + validation/tests/test_output_pr_comment.py | 1 + .../tests/test_output_workflow_summary.py | 1 + .../tests/test_postfilter_conditions.py | 1 + validation/tests/test_postfilter_engine.py | 1 + validation/tests/test_postfilter_levels.py | 1 + validation/tests/test_python_adapter.py | 1 + .../tests/test_python_checks_changelog.py | 1 + .../tests/test_python_checks_filename.py | 1 + .../tests/test_python_checks_metadata.py | 1 + validation/tests/test_python_checks_readme.py | 1 + .../tests/test_python_checks_release_plan.py | 1 + .../test_python_checks_release_review.py | 1 + validation/tests/test_python_checks_test.py | 1 + .../tests/test_python_checks_version.py | 1 + 21 files changed, 55 insertions(+), 1 deletion(-) diff --git a/shared-actions/run-validation/action.yml b/shared-actions/run-validation/action.yml index a39854ea..46395396 100644 --- a/shared-actions/run-validation/action.yml +++ b/shared-actions/run-validation/action.yml @@ -59,6 +59,35 @@ runs: run: npm ci --ignore-scripts working-directory: ${{ inputs.tooling_path }}/validation + - name: Resolve Commonalities version + id: resolve-commonalities + shell: bash + env: + REPO_PATH: ${{ inputs.repo_path }} + run: | + # Extract commonalities_release tag from release-plan.yaml (if present) + PLAN_FILE="${REPO_PATH}/release-plan.yaml" + if [ -f "$PLAN_FILE" ]; then + RELEASE_TAG=$(python3 -c " + import yaml, sys + try: + plan = yaml.safe_load(open('${PLAN_FILE}')) + tag = plan.get('dependencies', {}).get('commonalities_release', '') + print(tag) + except Exception: + print('') + ") + if [ -n "$RELEASE_TAG" ]; then + VERSION=$(gh api "repos/camaraproject/Commonalities/contents/VERSION.yaml?ref=${RELEASE_TAG}" \ + --jq '.content' 2>/dev/null | base64 -d 2>/dev/null | \ + python3 -c "import yaml,sys; print(yaml.safe_load(sys.stdin).get('version',''))" 2>/dev/null || echo "") + if [ -n "$VERSION" ]; then + echo "VALIDATION_COMMONALITIES_VERSION=$VERSION" >> "$GITHUB_ENV" + echo "Resolved commonalities_release ${RELEASE_TAG} -> version ${VERSION}" + fi + fi + fi + - name: Run validation orchestrator id: run shell: bash diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py index a9555acf..07c489ba 100644 --- a/validation/context/context_builder.py +++ b/validation/context/context_builder.py @@ -107,6 +107,7 @@ class ValidationContext: # Release context (from release-plan.yaml; None if absent) target_release_type: Optional[str] commonalities_release: Optional[str] + commonalities_version: Optional[str] icm_release: Optional[str] # PR-specific (None / False for non-PR triggers) @@ -262,6 +263,7 @@ def build_validation_context( release_metadata_schema_path: Optional[Path] = None, workflow_run_url: str = "", tooling_ref: str = "", + commonalities_version: Optional[str] = None, ) -> ValidationContext: """Assemble the unified validation context. @@ -337,6 +339,7 @@ def build_validation_context( stage=stage, target_release_type=target_release_type, commonalities_release=commonalities_release, + commonalities_version=commonalities_version, icm_release=icm_release, base_ref=base_ref or None, is_release_review_pr=is_review, diff --git a/validation/orchestrator.py b/validation/orchestrator.py index cd2b0b0c..abc67d64 100644 --- a/validation/orchestrator.py +++ b/validation/orchestrator.py @@ -86,6 +86,7 @@ class OrchestratorArgs: workflow_run_url: str tooling_ref: str commit_sha: str + commonalities_version: Optional[str] def _env(name: str, default: str = "") -> str: @@ -132,6 +133,7 @@ def parse_args() -> OrchestratorArgs: workflow_run_url=_env("WORKFLOW_RUN_URL"), tooling_ref=_env("TOOLING_REF"), commit_sha=_env("COMMIT_SHA"), + commonalities_version=_env("COMMONALITIES_VERSION") or None, ) @@ -429,6 +431,7 @@ def main() -> int: release_metadata_schema_path=paths.release_metadata_schema, workflow_run_url=args.workflow_run_url, tooling_ref=args.tooling_ref, + commonalities_version=args.commonalities_version, ) logger.info( "Context: branch=%s trigger=%s profile=%s release_review=%s apis=%d", diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py index ada2af45..71cd5fe3 100644 --- a/validation/tests/test_context_builder.py +++ b/validation/tests/test_context_builder.py @@ -211,6 +211,7 @@ def sample_context(self): stage="enabled", target_release_type="pre-release-rc", commonalities_release="r4.1", + commonalities_version=None, icm_release=None, base_ref="main", is_release_review_pr=False, @@ -234,7 +235,8 @@ def test_all_keys_present(self, sample_context): d = sample_context.to_dict() expected_keys = { "repository", "branch_type", "trigger_type", "profile", "stage", - "target_release_type", "commonalities_release", "icm_release", + "target_release_type", "commonalities_release", + "commonalities_version", "icm_release", "base_ref", "is_release_review_pr", "release_plan_changed", "pr_number", "apis", "workflow_run_url", "tooling_ref", } diff --git a/validation/tests/test_output_check_run.py b/validation/tests/test_output_check_run.py index d3f1201e..70dc7c06 100644 --- a/validation/tests/test_output_check_run.py +++ b/validation/tests/test_output_check_run.py @@ -28,6 +28,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_output_commit_status.py b/validation/tests/test_output_commit_status.py index 02674f04..21ea41ee 100644 --- a/validation/tests/test_output_commit_status.py +++ b/validation/tests/test_output_commit_status.py @@ -27,6 +27,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_output_diagnostics.py b/validation/tests/test_output_diagnostics.py index d1d17800..ff7b0d99 100644 --- a/validation/tests/test_output_diagnostics.py +++ b/validation/tests/test_output_diagnostics.py @@ -24,6 +24,7 @@ def _make_context() -> ValidationContext: stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_output_pr_comment.py b/validation/tests/test_output_pr_comment.py index f7c9da6d..9250bfb1 100644 --- a/validation/tests/test_output_pr_comment.py +++ b/validation/tests/test_output_pr_comment.py @@ -24,6 +24,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_output_workflow_summary.py b/validation/tests/test_output_workflow_summary.py index 95e214ee..859b48b6 100644 --- a/validation/tests/test_output_workflow_summary.py +++ b/validation/tests/test_output_workflow_summary.py @@ -31,6 +31,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_postfilter_conditions.py b/validation/tests/test_postfilter_conditions.py index 3498dc03..d2172d81 100644 --- a/validation/tests/test_postfilter_conditions.py +++ b/validation/tests/test_postfilter_conditions.py @@ -36,6 +36,7 @@ def _make_context( stage="enabled", target_release_type=target_release_type, commonalities_release=commonalities_release, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=is_release_review_pr, diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py index 18eb72ba..d9e1b14e 100644 --- a/validation/tests/test_postfilter_engine.py +++ b/validation/tests/test_postfilter_engine.py @@ -39,6 +39,7 @@ def _make_context( stage="enabled", target_release_type=target_release_type, commonalities_release=commonalities_release, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=is_release_review_pr, diff --git a/validation/tests/test_postfilter_levels.py b/validation/tests/test_postfilter_levels.py index 3114ff54..1597896c 100644 --- a/validation/tests/test_postfilter_levels.py +++ b/validation/tests/test_postfilter_levels.py @@ -36,6 +36,7 @@ def _make_context( stage="enabled", target_release_type=target_release_type, commonalities_release=commonalities_release, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_adapter.py b/validation/tests/test_python_adapter.py index 2353bb34..986f4c03 100644 --- a/validation/tests/test_python_adapter.py +++ b/validation/tests/test_python_adapter.py @@ -34,6 +34,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_changelog.py b/validation/tests/test_python_checks_changelog.py index 3b297936..c7dba1e4 100644 --- a/validation/tests/test_python_checks_changelog.py +++ b/validation/tests/test_python_checks_changelog.py @@ -36,6 +36,7 @@ def _make_context( stage="enabled", target_release_type=target_release_type, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_filename.py b/validation/tests/test_python_checks_filename.py index 66017cb5..659cfaff 100644 --- a/validation/tests/test_python_checks_filename.py +++ b/validation/tests/test_python_checks_filename.py @@ -35,6 +35,7 @@ def _make_context(api_name: str) -> ValidationContext: stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_metadata.py b/validation/tests/test_python_checks_metadata.py index 172ecdbd..927dcf7a 100644 --- a/validation/tests/test_python_checks_metadata.py +++ b/validation/tests/test_python_checks_metadata.py @@ -39,6 +39,7 @@ def _make_context(api_names: list[str]) -> ValidationContext: stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_readme.py b/validation/tests/test_python_checks_readme.py index 526a0d7e..2a37c68c 100644 --- a/validation/tests/test_python_checks_readme.py +++ b/validation/tests/test_python_checks_readme.py @@ -24,6 +24,7 @@ def _make_context() -> ValidationContext: stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_release_plan.py b/validation/tests/test_python_checks_release_plan.py index 2dff698f..afb3af30 100644 --- a/validation/tests/test_python_checks_release_plan.py +++ b/validation/tests/test_python_checks_release_plan.py @@ -31,6 +31,7 @@ def _make_context() -> ValidationContext: stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_release_review.py b/validation/tests/test_python_checks_release_review.py index 5e957217..a8b77a60 100644 --- a/validation/tests/test_python_checks_release_review.py +++ b/validation/tests/test_python_checks_release_review.py @@ -31,6 +31,7 @@ def _make_context( stage="enabled", target_release_type="public-release", commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=base_ref, is_release_review_pr=is_release_review, diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py index 2e5622ca..57458b3e 100644 --- a/validation/tests/test_python_checks_test.py +++ b/validation/tests/test_python_checks_test.py @@ -43,6 +43,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, diff --git a/validation/tests/test_python_checks_version.py b/validation/tests/test_python_checks_version.py index 90b9315e..0b5eaabc 100644 --- a/validation/tests/test_python_checks_version.py +++ b/validation/tests/test_python_checks_version.py @@ -42,6 +42,7 @@ def _make_context( stage="enabled", target_release_type=None, commonalities_release=None, + commonalities_version=None, icm_release=None, base_ref=None, is_release_review_pr=False, From 34f354ac427d7628cb18d5e694336bd43ff80989 Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 16:00:56 +0200 Subject: [PATCH 052/157] feat(validation): add shared spec traversal helpers for Python checks Provides reusable utilities for navigating OpenAPI spec structures: - resolve_local_ref: resolve #/components/schemas/Foo within a spec - collect_schema_properties: walk properties + allOf one level - extract_event_types_from_spec: find EventType enum values - find_enum_value_in_schemas: search all schema enums for a value - find_properties_by_name: find schemas containing a named property - iter_response_schemas: iterate response schemas with $ref resolution Handles both inline schemas and external $ref patterns (property found by name regardless of whether value is inline or $ref). 33 unit tests covering all helpers. --- .../engines/python_checks/_spec_helpers.py | 323 ++++++++++++ validation/tests/test_spec_helpers.py | 476 ++++++++++++++++++ 2 files changed, 799 insertions(+) create mode 100644 validation/engines/python_checks/_spec_helpers.py create mode 100644 validation/tests/test_spec_helpers.py diff --git a/validation/engines/python_checks/_spec_helpers.py b/validation/engines/python_checks/_spec_helpers.py new file mode 100644 index 00000000..70fbabdc --- /dev/null +++ b/validation/engines/python_checks/_spec_helpers.py @@ -0,0 +1,323 @@ +"""Shared OpenAPI spec traversal helpers for Python checks. + +Provides utilities for navigating CAMARA OpenAPI spec structures: +local $ref resolution, schema property collection, event type +extraction, and enum value search. + +Design constraints: + - Only resolves local $ref (starting with ``#/``). + - Follows ``allOf`` one level deep — sufficient for CAMARA patterns. + - No external I/O — operates on parsed spec dicts only. +""" + +from __future__ import annotations + +import logging +from typing import Dict, Iterable, List, Optional, Tuple + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Local $ref resolution +# --------------------------------------------------------------------------- + + +def resolve_local_ref(spec: dict, ref: str) -> Optional[dict]: + """Resolve a local JSON Reference within a parsed OpenAPI spec. + + Handles ``#/components/schemas/Foo`` style references by walking + the spec dict along the path segments. + + Returns ``None`` if the reference is external, malformed, or the + target path does not exist. + """ + if not ref or not ref.startswith("#/"): + return None + parts = ref[2:].split("/") + current: object = spec + for part in parts: + if not isinstance(current, dict): + return None + current = current.get(part) + if current is None: + return None + return current if isinstance(current, dict) else None + + +# --------------------------------------------------------------------------- +# Schema property collection +# --------------------------------------------------------------------------- + + +def collect_schema_properties( + spec: dict, schema: dict +) -> Dict[str, dict]: + """Collect all property definitions from a schema. + + Walks direct ``properties`` and follows ``allOf`` compositions one + level deep (resolving ``$ref`` within ``allOf`` entries). + + Returns a dict mapping property names to their schema definitions. + """ + props: Dict[str, dict] = {} + + # Direct properties + direct = schema.get("properties") + if isinstance(direct, dict): + props.update(direct) + + # allOf compositions + all_of = schema.get("allOf") + if isinstance(all_of, list): + for entry in all_of: + if not isinstance(entry, dict): + continue + # Resolve $ref if present + if "$ref" in entry: + resolved = resolve_local_ref(spec, entry["$ref"]) + if resolved is not None: + sub_props = resolved.get("properties") + if isinstance(sub_props, dict): + props.update(sub_props) + else: + sub_props = entry.get("properties") + if isinstance(sub_props, dict): + props.update(sub_props) + + return props + + +# --------------------------------------------------------------------------- +# Event type extraction +# --------------------------------------------------------------------------- + + +def extract_event_types_from_spec(spec: dict) -> List[str]: + """Extract all event type enum values from a CAMARA subscription spec. + + Searches ``components/schemas`` for schemas whose name contains + ``EventType`` (case-insensitive) and collects their ``enum`` values. + + This covers the standard CAMARA patterns: + - ``SubscriptionEventType`` (API-specific events for subscription requests) + - ``EventTypeNotification`` (all events including lifecycle) + - ``ApiEventType`` (template pattern) + - ``SubscriptionLifecycleEventType`` (template pattern) + + Returns a deduplicated list of event type strings. + """ + schemas = spec.get("components", {}).get("schemas", {}) + if not isinstance(schemas, dict): + return [] + + event_types: set[str] = set() + for schema_name, schema_def in schemas.items(): + if not isinstance(schema_def, dict): + continue + if "eventtype" not in schema_name.lower(): + continue + enum_values = schema_def.get("enum") + if isinstance(enum_values, list): + for val in enum_values: + if isinstance(val, str): + event_types.add(val) + + return sorted(event_types) + + +# --------------------------------------------------------------------------- +# Enum value search +# --------------------------------------------------------------------------- + + +def find_enum_value_in_schemas( + spec: dict, target: str +) -> List[Tuple[str, List[str]]]: + """Search all component schemas for a specific enum value. + + Walks ``components/schemas`` and checks every ``enum`` list + (including nested ``properties`` and ``allOf`` compositions). + + Args: + spec: Parsed OpenAPI spec dict. + target: The enum value to search for (exact match). + + Returns: + List of ``(schema_path, enum_values)`` tuples where *target* + was found. *schema_path* is a dot-separated location string. + """ + schemas = spec.get("components", {}).get("schemas", {}) + if not isinstance(schemas, dict): + return [] + + results: List[Tuple[str, List[str]]] = [] + for schema_name, schema_def in schemas.items(): + if not isinstance(schema_def, dict): + continue + _search_enum_recursive( + spec, schema_def, f"components.schemas.{schema_name}", + target, results, + ) + return results + + +def _search_enum_recursive( + spec: dict, + node: dict, + path: str, + target: str, + results: List[Tuple[str, List[str]]], +) -> None: + """Recursively search a schema node for enums containing *target*.""" + # Check direct enum + enum_values = node.get("enum") + if isinstance(enum_values, list) and target in enum_values: + results.append((path, list(enum_values))) + + # Check properties + props = node.get("properties") + if isinstance(props, dict): + for prop_name, prop_def in props.items(): + if isinstance(prop_def, dict): + _search_enum_recursive( + spec, prop_def, f"{path}.{prop_name}", + target, results, + ) + + # Check allOf entries + all_of = node.get("allOf") + if isinstance(all_of, list): + for i, entry in enumerate(all_of): + if not isinstance(entry, dict): + continue + if "$ref" in entry: + resolved = resolve_local_ref(spec, entry["$ref"]) + if resolved is not None: + _search_enum_recursive( + spec, resolved, f"{path}.allOf[{i}]", + target, results, + ) + else: + _search_enum_recursive( + spec, entry, f"{path}.allOf[{i}]", + target, results, + ) + + +# --------------------------------------------------------------------------- +# Property name search +# --------------------------------------------------------------------------- + + +def find_properties_by_name( + spec: dict, property_name: str +) -> List[Tuple[str, dict]]: + """Find all schemas in ``components/schemas`` containing a named property. + + Walks top-level schemas and their ``allOf`` compositions (one level). + + Args: + spec: Parsed OpenAPI spec dict. + property_name: The property name to search for. + + Returns: + List of ``(schema_name, property_schema)`` tuples. + """ + schemas = spec.get("components", {}).get("schemas", {}) + if not isinstance(schemas, dict): + return [] + + results: List[Tuple[str, dict]] = [] + for schema_name, schema_def in schemas.items(): + if not isinstance(schema_def, dict): + continue + all_props = collect_schema_properties(spec, schema_def) + if property_name in all_props: + results.append((schema_name, all_props[property_name])) + + return results + + +# --------------------------------------------------------------------------- +# Response schema iteration +# --------------------------------------------------------------------------- + + +def iter_response_schemas( + spec: dict, path_prefix: str = "" +) -> Iterable[Tuple[str, str, str, dict]]: + """Yield response schemas from OpenAPI paths. + + For each path matching *path_prefix*, yields + ``(path, method, status_code, resolved_schema)`` for every + response that has ``content.application/json.schema``. + + Schema ``$ref`` is resolved one level. + + Args: + spec: Parsed OpenAPI spec dict. + path_prefix: Only yield from paths containing this string. + Empty string matches all paths. + """ + paths = spec.get("paths") + if not isinstance(paths, dict): + return + + for path_key, path_item in paths.items(): + if path_prefix and path_prefix not in path_key: + continue + if not isinstance(path_item, dict): + continue + + for method in ("get", "post", "put", "patch", "delete"): + operation = path_item.get(method) + if not isinstance(operation, dict): + continue + + responses = operation.get("responses") + if not isinstance(responses, dict): + continue + + for status_code, response_def in responses.items(): + if not isinstance(response_def, dict): + continue + + # Resolve response-level $ref + if "$ref" in response_def: + resolved = resolve_local_ref(spec, response_def["$ref"]) + if resolved is None: + continue + response_def = resolved + + content = response_def.get("content", {}) + if not isinstance(content, dict): + continue + + json_content = content.get("application/json") + if not isinstance(json_content, dict): + continue + + schema = json_content.get("schema") + if not isinstance(schema, dict): + continue + + # Resolve schema-level $ref + if "$ref" in schema: + resolved = resolve_local_ref(spec, schema["$ref"]) + if resolved is not None: + schema = resolved + + # Handle array responses (e.g. GET /subscriptions) + if schema.get("type") == "array": + items = schema.get("items") + if isinstance(items, dict): + if "$ref" in items: + resolved = resolve_local_ref(spec, items["$ref"]) + if resolved is not None: + yield (path_key, method, status_code, resolved) + else: + yield (path_key, method, status_code, items) + continue + + yield (path_key, method, status_code, schema) diff --git a/validation/tests/test_spec_helpers.py b/validation/tests/test_spec_helpers.py new file mode 100644 index 00000000..639dde95 --- /dev/null +++ b/validation/tests/test_spec_helpers.py @@ -0,0 +1,476 @@ +"""Unit tests for validation.engines.python_checks._spec_helpers.""" + +from __future__ import annotations + +from validation.engines.python_checks._spec_helpers import ( + collect_schema_properties, + extract_event_types_from_spec, + find_enum_value_in_schemas, + find_properties_by_name, + iter_response_schemas, + resolve_local_ref, +) + + +# --------------------------------------------------------------------------- +# resolve_local_ref +# --------------------------------------------------------------------------- + + +class TestResolveLocalRef: + def test_simple_ref(self): + spec = {"components": {"schemas": {"Foo": {"type": "object"}}}} + result = resolve_local_ref(spec, "#/components/schemas/Foo") + assert result == {"type": "object"} + + def test_nested_ref(self): + spec = {"components": {"responses": {"NotFound": {"description": "nf"}}}} + result = resolve_local_ref(spec, "#/components/responses/NotFound") + assert result == {"description": "nf"} + + def test_missing_path(self): + spec = {"components": {"schemas": {}}} + assert resolve_local_ref(spec, "#/components/schemas/Missing") is None + + def test_external_ref(self): + assert resolve_local_ref({}, "../common/Foo.yaml#/bar") is None + + def test_empty_ref(self): + assert resolve_local_ref({}, "") is None + + def test_non_dict_target(self): + spec = {"components": {"schemas": {"Foo": "not-a-dict"}}} + assert resolve_local_ref(spec, "#/components/schemas/Foo") is None + + +# --------------------------------------------------------------------------- +# collect_schema_properties +# --------------------------------------------------------------------------- + + +class TestCollectSchemaProperties: + def test_direct_properties(self): + spec = {} + schema = {"properties": {"name": {"type": "string"}, "age": {"type": "integer"}}} + props = collect_schema_properties(spec, schema) + assert set(props.keys()) == {"name", "age"} + + def test_allof_inline(self): + spec = {} + schema = { + "allOf": [ + {"properties": {"base": {"type": "string"}}}, + {"properties": {"extra": {"type": "integer"}}}, + ] + } + props = collect_schema_properties(spec, schema) + assert set(props.keys()) == {"base", "extra"} + + def test_allof_with_ref(self): + spec = { + "components": { + "schemas": { + "Base": {"properties": {"id": {"type": "string"}}} + } + } + } + schema = { + "allOf": [ + {"$ref": "#/components/schemas/Base"}, + {"properties": {"extra": {"type": "integer"}}}, + ] + } + props = collect_schema_properties(spec, schema) + assert set(props.keys()) == {"id", "extra"} + + def test_mixed_direct_and_allof(self): + spec = {} + schema = { + "properties": {"direct": {"type": "string"}}, + "allOf": [{"properties": {"composed": {"type": "integer"}}}], + } + props = collect_schema_properties(spec, schema) + assert set(props.keys()) == {"direct", "composed"} + + def test_external_ref_in_allof(self): + """External $ref in allOf is skipped (returns None from resolve).""" + spec = {} + schema = { + "allOf": [ + {"$ref": "../common/Foo.yaml#/components/schemas/Bar"}, + {"properties": {"local": {"type": "string"}}}, + ] + } + props = collect_schema_properties(spec, schema) + assert set(props.keys()) == {"local"} + + def test_empty_schema(self): + assert collect_schema_properties({}, {}) == {} + + +# --------------------------------------------------------------------------- +# extract_event_types_from_spec +# --------------------------------------------------------------------------- + + +class TestExtractEventTypes: + def test_subscription_event_type(self): + spec = { + "components": { + "schemas": { + "SubscriptionEventType": { + "type": "string", + "enum": [ + "org.camaraproject.device-status.v0.roaming-on", + "org.camaraproject.device-status.v0.roaming-off", + ], + } + } + } + } + result = extract_event_types_from_spec(spec) + assert len(result) == 2 + assert "org.camaraproject.device-status.v0.roaming-on" in result + + def test_multiple_event_type_schemas(self): + """Both SubscriptionEventType and EventTypeNotification are found.""" + spec = { + "components": { + "schemas": { + "SubscriptionEventType": { + "type": "string", + "enum": ["org.camaraproject.foo.v0.bar"], + }, + "EventTypeNotification": { + "type": "string", + "enum": [ + "org.camaraproject.foo.v0.bar", + "org.camaraproject.foo.v0.subscription-ended", + ], + }, + } + } + } + result = extract_event_types_from_spec(spec) + assert len(result) == 2 # deduplicated + + def test_api_event_type_template_pattern(self): + """Template uses ApiEventType schema name.""" + spec = { + "components": { + "schemas": { + "ApiEventType": { + "type": "string", + "enum": [ + "org.camaraproject.api-name.v0.event-type1", + "org.camaraproject.api-name.v0.event-type2", + ], + }, + "SubscriptionLifecycleEventType": { + "type": "string", + "enum": [ + "org.camaraproject.api-name.v0.subscription-started", + "org.camaraproject.api-name.v0.subscription-ended", + ], + }, + } + } + } + result = extract_event_types_from_spec(spec) + assert len(result) == 4 + + def test_no_event_type_schemas(self): + spec = {"components": {"schemas": {"Foo": {"type": "object"}}}} + assert extract_event_types_from_spec(spec) == [] + + def test_no_components(self): + assert extract_event_types_from_spec({}) == [] + + def test_schema_without_enum(self): + spec = { + "components": { + "schemas": { + "EventTypeNotification": {"type": "string"} + } + } + } + assert extract_event_types_from_spec(spec) == [] + + +# --------------------------------------------------------------------------- +# find_enum_value_in_schemas +# --------------------------------------------------------------------------- + + +class TestFindEnumValueInSchemas: + def test_find_in_top_level_enum(self): + spec = { + "components": { + "schemas": { + "ErrorCode": {"type": "string", "enum": ["INVALID", "CONFLICT", "NOT_FOUND"]} + } + } + } + results = find_enum_value_in_schemas(spec, "CONFLICT") + assert len(results) == 1 + assert "ErrorCode" in results[0][0] + + def test_find_in_nested_property(self): + spec = { + "components": { + "schemas": { + "ErrorInfo": { + "type": "object", + "properties": { + "code": { + "type": "string", + "enum": ["INVALID", "CONFLICT"], + } + }, + } + } + } + } + results = find_enum_value_in_schemas(spec, "CONFLICT") + assert len(results) == 1 + assert "code" in results[0][0] + + def test_not_found(self): + spec = { + "components": { + "schemas": { + "ErrorCode": {"type": "string", "enum": ["INVALID", "NOT_FOUND"]} + } + } + } + assert find_enum_value_in_schemas(spec, "CONFLICT") == [] + + def test_find_in_allof_ref(self): + spec = { + "components": { + "schemas": { + "Base": { + "properties": { + "status": {"type": "string", "enum": ["OK", "CONFLICT"]} + } + }, + "Extended": { + "allOf": [{"$ref": "#/components/schemas/Base"}] + }, + } + } + } + results = find_enum_value_in_schemas(spec, "CONFLICT") + # Found in Base directly and in Extended via allOf + assert len(results) >= 1 + + def test_empty_spec(self): + assert find_enum_value_in_schemas({}, "CONFLICT") == [] + + +# --------------------------------------------------------------------------- +# find_properties_by_name +# --------------------------------------------------------------------------- + + +class TestFindPropertiesByName: + def test_direct_property(self): + spec = { + "components": { + "schemas": { + "Subscription": { + "properties": { + "sinkCredential": {"$ref": "#/components/schemas/SinkCredential"}, + "sink": {"type": "string"}, + } + } + } + } + } + results = find_properties_by_name(spec, "sinkCredential") + assert len(results) == 1 + assert results[0][0] == "Subscription" + + def test_property_via_allof(self): + spec = { + "components": { + "schemas": { + "Base": { + "properties": {"sinkCredential": {"type": "object"}} + }, + "Extended": { + "allOf": [ + {"$ref": "#/components/schemas/Base"}, + {"properties": {"extra": {"type": "string"}}}, + ] + }, + } + } + } + results = find_properties_by_name(spec, "sinkCredential") + assert len(results) == 2 # Found in both Base and Extended + + def test_property_not_found(self): + spec = { + "components": { + "schemas": { + "Foo": {"properties": {"bar": {"type": "string"}}} + } + } + } + assert find_properties_by_name(spec, "sinkCredential") == [] + + def test_external_ref_property(self): + """Property with external $ref is still found by name.""" + spec = { + "components": { + "schemas": { + "SubscriptionRequest": { + "properties": { + "sinkCredential": { + "$ref": "../common/CAMARA_event_common.yaml#/components/schemas/SinkCredential" + } + } + } + } + } + } + results = find_properties_by_name(spec, "sinkCredential") + assert len(results) == 1 + + +# --------------------------------------------------------------------------- +# iter_response_schemas +# --------------------------------------------------------------------------- + + +class TestIterResponseSchemas: + def test_simple_response(self): + spec = { + "paths": { + "/subscriptions": { + "post": { + "responses": { + "201": { + "content": { + "application/json": { + "schema": {"type": "object", "properties": {"id": {"type": "string"}}} + } + } + } + } + } + } + } + } + results = list(iter_response_schemas(spec, "/subscriptions")) + assert len(results) == 1 + path, method, code, schema = results[0] + assert path == "/subscriptions" + assert method == "post" + assert code == "201" + assert "id" in schema.get("properties", {}) + + def test_ref_response_schema(self): + spec = { + "components": { + "schemas": { + "Subscription": {"type": "object", "properties": {"id": {"type": "string"}}} + } + }, + "paths": { + "/subscriptions/{id}": { + "get": { + "responses": { + "200": { + "content": { + "application/json": { + "schema": {"$ref": "#/components/schemas/Subscription"} + } + } + } + } + } + } + }, + } + results = list(iter_response_schemas(spec, "/subscriptions")) + assert len(results) == 1 + assert results[0][3].get("properties", {}).get("id") is not None + + def test_array_response(self): + """GET /subscriptions returns array — items are yielded.""" + spec = { + "components": { + "schemas": { + "Subscription": {"type": "object", "properties": {"id": {"type": "string"}}} + } + }, + "paths": { + "/subscriptions": { + "get": { + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "array", + "items": {"$ref": "#/components/schemas/Subscription"}, + } + } + } + } + } + } + } + }, + } + results = list(iter_response_schemas(spec, "/subscriptions")) + assert len(results) == 1 + assert "id" in results[0][3].get("properties", {}) + + def test_path_prefix_filter(self): + spec = { + "paths": { + "/subscriptions": { + "get": { + "responses": { + "200": {"content": {"application/json": {"schema": {"type": "object"}}}} + } + } + }, + "/other": { + "get": { + "responses": { + "200": {"content": {"application/json": {"schema": {"type": "object"}}}} + } + } + }, + } + } + results = list(iter_response_schemas(spec, "/subscriptions")) + assert len(results) == 1 + + def test_no_paths(self): + assert list(iter_response_schemas({}, "/subscriptions")) == [] + + def test_error_responses_included(self): + """Error responses (4xx) are also yielded when matching path.""" + spec = { + "paths": { + "/subscriptions": { + "post": { + "responses": { + "201": { + "content": {"application/json": {"schema": {"type": "object"}}} + }, + "400": { + "content": {"application/json": {"schema": {"type": "object"}}} + }, + } + } + } + } + } + results = list(iter_response_schemas(spec, "/subscriptions")) + assert len(results) == 2 From a382b8f633190c4bdd9df68d259db1ce8d57f60d Mon Sep 17 00:00:00 2001 From: Herbert Damker <52109189+hdamker@users.noreply.github.com> Date: Fri, 3 Apr 2026 16:08:49 +0200 Subject: [PATCH 053/157] feat(validation): rewrite P-011 as check-commonalities-version (DG-028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the muted check-license-commonalities-consistency with a context-aware x-camara-commonalities version check per Design Guide section 5.3.7. Validates: - Presence: field must exist in info object (error) - Format by branch type: main/feature accept wip/tbd/version; release/maintenance require full semver X.Y.Z[-pre] - Version match: concrete values compared against resolved commonalities_version from release-plan.yaml (warn on mismatch) License checks dropped — Spectral already covers presence. Changed scope from REPO to API (runs per API spec file). 26 unit tests covering all branch types and edge cases. --- validation/engines/python_checks/__init__.py | 4 +- .../engines/python_checks/metadata_checks.py | 222 ++++++++------ validation/rules/python-rules.yaml | 8 +- .../tests/test_python_checks_metadata.py | 279 +++++++++++++----- 4 files changed, 340 insertions(+), 173 deletions(-) diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py index c02c2d93..2cc45e81 100644 --- a/validation/engines/python_checks/__init__.py +++ b/validation/engines/python_checks/__init__.py @@ -8,7 +8,7 @@ from .changelog_checks import check_changelog_format from .filename_checks import check_filename_kebab_case, check_filename_matches_api_name -from .metadata_checks import check_license_commonalities_consistency +from .metadata_checks import check_commonalities_version from .readme_checks import check_readme_placeholder_removal from .release_plan_checks import check_release_plan_semantics from .release_review_checks import check_release_review_file_restriction @@ -38,7 +38,7 @@ CheckDescriptor("check-test-directory-exists", CheckScope.REPO, check_test_directory_exists), CheckDescriptor("check-release-plan-semantics", CheckScope.REPO, check_release_plan_semantics), CheckDescriptor("check-changelog-format", CheckScope.REPO, check_changelog_format), - CheckDescriptor("check-license-commonalities-consistency", CheckScope.REPO, check_license_commonalities_consistency), + CheckDescriptor("check-commonalities-version", CheckScope.API, check_commonalities_version), CheckDescriptor("check-readme-placeholder-removal", CheckScope.REPO, check_readme_placeholder_removal), CheckDescriptor("check-release-review-file-restriction", CheckScope.REPO, check_release_review_file_restriction), ] diff --git a/validation/engines/python_checks/metadata_checks.py b/validation/engines/python_checks/metadata_checks.py index f10f1bc1..3aee8d36 100644 --- a/validation/engines/python_checks/metadata_checks.py +++ b/validation/engines/python_checks/metadata_checks.py @@ -1,119 +1,165 @@ -"""Metadata consistency checks. +"""Commonalities version check. -Validates that ``info.license`` and ``info.x-camara-commonalities`` are -present in all API spec files and consistent across them. +Validates that ``info.x-camara-commonalities`` is present and contains +a valid version value appropriate for the branch type. + +Design Guide section 5.3.7: "The API SHALL specify the Commonalities +release version they are compliant to, by including the +x-camara-commonalities extension field." """ from __future__ import annotations +import re from pathlib import Path -from typing import Any, List, Optional +from typing import List from validation.context import ValidationContext from ._types import load_yaml_safe, make_finding +_ENGINE_RULE = "check-commonalities-version" -def _extract_metadata(spec: dict) -> tuple[Optional[Any], Optional[Any]]: - """Extract license and x-camara-commonalities from a spec.""" - info = spec.get("info", {}) - license_val = info.get("license") - commonalities_val = info.get("x-camara-commonalities") - return license_val, commonalities_val +# Full semver: 0.7.0, 0.7.0-rc.1, 1.0.0-alpha.2 +_SEMVER_RE = re.compile( + r"^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)" + r"(?:-(?P
[a-zA-Z0-9]+(?:\.[a-zA-Z0-9]+)*))?$"
+)
+
+# Short form: 0.7, 4.1 (allowed on main/feature only)
+_SHORT_VERSION_RE = re.compile(
+    r"^(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)$"
+)
+
+# Placeholder values allowed on main/feature branches
+_PLACEHOLDERS = frozenset({"wip", "tbd"})
+
+
+def _is_valid_format(value: str, branch_type: str) -> bool:
+    """Check if the value is a valid x-camara-commonalities format.
+
+    Main/feature: wip, tbd, X.Y, or X.Y.Z[-pre] are all valid.
+    Release/maintenance: only X.Y.Z[-pre] is valid.
+    """
+    if branch_type in ("main", "feature"):
+        if value in _PLACEHOLDERS:
+            return True
+        if _SHORT_VERSION_RE.match(value):
+            return True
+        if _SEMVER_RE.match(value):
+            return True
+        return False
 
+    # release / maintenance — must be full semver
+    return _SEMVER_RE.match(value) is not None
 
-def check_license_commonalities_consistency(
+
+def _is_concrete_version(value: str) -> bool:
+    """True if the value is a concrete version (not a placeholder)."""
+    return value not in _PLACEHOLDERS and (
+        _SEMVER_RE.match(value) is not None
+        or _SHORT_VERSION_RE.match(value) is not None
+    )
+
+
+def check_commonalities_version(
     repo_path: Path, context: ValidationContext
 ) -> List[dict]:
-    """Verify license and x-camara-commonalities are present and consistent.
+    """Validate info.x-camara-commonalities presence and value.
 
-    Repo-level check.  Loads all spec files referenced in ``context.apis``,
-    checks that each has ``info.license`` and ``info.x-camara-commonalities``,
-    and verifies the values are identical across all API files.
-    """
-    if not context.apis:
-        return []
+    Per-API check (runs once per API in context.apis).
 
-    findings: List[dict] = []
-    first_license: Optional[Any] = None
-    first_commonalities: Optional[Any] = None
-    first_api: Optional[str] = None
+    Checks:
+    1. Field must be present in info object.
+    2. Value must be a valid format for the branch type.
+    3. If a concrete version and context.commonalities_version is set,
+       the values must match.
+    """
+    api = context.apis[0]
+    spec_path = repo_path / api.spec_file
+    spec = load_yaml_safe(spec_path)
 
-    for api in context.apis:
-        spec_path = repo_path / api.spec_file
-        spec = load_yaml_safe(spec_path)
+    if spec is None:
+        # Missing file — filename check reports this.
+        return []
 
-        if spec is None:
-            # Missing file — filename check reports this.
-            continue
+    info = spec.get("info", {})
+    raw_value = info.get("x-camara-commonalities")
+
+    # Check 1: presence
+    if raw_value is None:
+        return [
+            make_finding(
+                engine_rule=_ENGINE_RULE,
+                level="error",
+                message=(
+                    f"info.x-camara-commonalities is missing in "
+                    f"{api.spec_file}"
+                ),
+                path=api.spec_file,
+                line=1,
+                api_name=api.api_name,
+            )
+        ]
 
-        license_val, commonalities_val = _extract_metadata(spec)
+    value = str(raw_value).strip()
 
-        # Check presence.
-        if license_val is None:
-            findings.append(
-                make_finding(
-                    engine_rule="check-license-commonalities-consistency",
-                    level="error",
-                    message=f"info.license is missing in {api.spec_file}",
-                    path=api.spec_file,
-                    line=1,
-                    api_name=api.api_name,
-                )
+    # Check 2: format validation
+    if not _is_valid_format(value, context.branch_type):
+        if context.branch_type in ("release", "maintenance"):
+            detail = (
+                f"must be a full version (X.Y.Z or X.Y.Z-pre) on "
+                f"{context.branch_type} branch"
             )
-        if commonalities_val is None:
-            findings.append(
+        else:
+            detail = (
+                "must be 'wip', 'tbd', or a valid version "
+                "(X.Y, X.Y.Z, X.Y.Z-pre)"
+            )
+        return [
+            make_finding(
+                engine_rule=_ENGINE_RULE,
+                level="error",
+                message=(
+                    f"info.x-camara-commonalities '{value}' has invalid "
+                    f"format — {detail}"
+                ),
+                path=api.spec_file,
+                line=1,
+                api_name=api.api_name,
+            )
+        ]
+
+    # Check 3: version mismatch against resolved commonalities_version
+    if (
+        context.commonalities_version
+        and _is_concrete_version(value)
+    ):
+        # Normalize short form for comparison: "0.7" matches "0.7.0"
+        expected = context.commonalities_version
+        actual = value
+        if _SHORT_VERSION_RE.match(actual):
+            actual = f"{actual}.0"
+
+        # Strip pre-release for prefix comparison if short form was used
+        expected_base = expected.split("-")[0]
+        actual_base = actual.split("-")[0]
+
+        if actual_base != expected_base and actual != expected:
+            return [
                 make_finding(
-                    engine_rule="check-license-commonalities-consistency",
-                    level="error",
+                    engine_rule=_ENGINE_RULE,
+                    level="warn",
                     message=(
-                        f"info.x-camara-commonalities is missing in "
-                        f"{api.spec_file}"
+                        f"info.x-camara-commonalities '{value}' does not "
+                        f"match the declared Commonalities version "
+                        f"'{context.commonalities_version}' from "
+                        f"release-plan.yaml"
                     ),
                     path=api.spec_file,
                     line=1,
                     api_name=api.api_name,
                 )
-            )
-
-        # Track first values for consistency check.
-        if first_api is None:
-            first_api = api.api_name
-            first_license = license_val
-            first_commonalities = commonalities_val
-            continue
-
-        # Consistency: compare against first API's values.
-        if license_val is not None and first_license is not None:
-            if license_val != first_license:
-                findings.append(
-                    make_finding(
-                        engine_rule="check-license-commonalities-consistency",
-                        level="error",
-                        message=(
-                            f"info.license in {api.spec_file} differs from "
-                            f"{first_api}"
-                        ),
-                        path=api.spec_file,
-                        line=1,
-                        api_name=api.api_name,
-                    )
-                )
-
-        if commonalities_val is not None and first_commonalities is not None:
-            if commonalities_val != first_commonalities:
-                findings.append(
-                    make_finding(
-                        engine_rule="check-license-commonalities-consistency",
-                        level="error",
-                        message=(
-                            f"info.x-camara-commonalities in {api.spec_file} "
-                            f"differs from {first_api}"
-                        ),
-                        path=api.spec_file,
-                        line=1,
-                        api_name=api.api_name,
-                    )
-                )
+            ]
 
-    return findings
+    return []
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 87709d9e..7e8fc8ec 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -94,12 +94,14 @@
   conditional_level:
     default: warn
 
-# P-011: check-license-commonalities-consistency (muted — likely to be retired)
+# P-011: check-commonalities-version (DG-028 rewrite)
+# Validates info.x-camara-commonalities presence and version format.
+# Error for missing or invalid format; warn for version mismatch.
 - id: P-011
   engine: python
-  engine_rule: check-license-commonalities-consistency
+  engine_rule: check-commonalities-version
   conditional_level:
-    default: muted
+    default: error
 
 # P-012: check-release-review-file-restriction
 - id: P-012
diff --git a/validation/tests/test_python_checks_metadata.py b/validation/tests/test_python_checks_metadata.py
index 927dcf7a..80123834 100644
--- a/validation/tests/test_python_checks_metadata.py
+++ b/validation/tests/test_python_checks_metadata.py
@@ -1,15 +1,15 @@
-"""Unit tests for validation.engines.python_checks.metadata_checks."""
+"""Unit tests for validation.engines.python_checks.metadata_checks (DG-028)."""
 
 from __future__ import annotations
 
 from pathlib import Path
+from typing import Optional
 
-import pytest
 import yaml
 
 from validation.context import ApiContext, ValidationContext
 from validation.engines.python_checks.metadata_checks import (
-    check_license_commonalities_consistency,
+    check_commonalities_version,
 )
 
 
@@ -18,34 +18,34 @@
 # ---------------------------------------------------------------------------
 
 
-def _make_api(name: str) -> ApiContext:
-    return ApiContext(
-        api_name=name,
+def _make_context(
+    api_name: str = "quality-on-demand",
+    branch_type: str = "main",
+    commonalities_version: Optional[str] = None,
+) -> ValidationContext:
+    api = ApiContext(
+        api_name=api_name,
         target_api_version="1.0.0",
         target_api_status="public",
         target_api_maturity="stable",
         api_pattern="request-response",
-        spec_file=f"code/API_definitions/{name}.yaml",
+        spec_file=f"code/API_definitions/{api_name}.yaml",
     )
-
-
-def _make_context(api_names: list[str]) -> ValidationContext:
-    apis = tuple(_make_api(n) for n in api_names)
     return ValidationContext(
         repository="TestRepo",
-        branch_type="main",
+        branch_type=branch_type,
         trigger_type="dispatch",
         profile="advisory",
         stage="enabled",
         target_release_type=None,
         commonalities_release=None,
-        commonalities_version=None,
+        commonalities_version=commonalities_version,
         icm_release=None,
         base_ref=None,
         is_release_review_pr=False,
         release_plan_changed=None,
         pr_number=None,
-        apis=apis,
+        apis=(api,),
         workflow_run_url="",
         tooling_ref="",
     )
@@ -53,27 +53,26 @@ def _make_context(api_names: list[str]) -> ValidationContext:
 
 def _write_spec(
     tmp_path: Path,
-    api_name: str,
-    license_val: dict | None = None,
-    commonalities_val: str | None = None,
+    api_name: str = "quality-on-demand",
+    commonalities_value: object = "0.7.0",
+    include_field: bool = True,
 ) -> None:
     spec: dict = {
         "openapi": "3.0.3",
-        "info": {"title": api_name, "version": "1.0.0"},
+        "info": {
+            "title": "Test API",
+            "version": "1.0.0",
+        },
+        "paths": {},
     }
-    if license_val is not None:
-        spec["info"]["license"] = license_val
-    if commonalities_val is not None:
-        spec["info"]["x-camara-commonalities"] = commonalities_val
-    api_dir = tmp_path / "code" / "API_definitions"
-    api_dir.mkdir(parents=True, exist_ok=True)
-    (api_dir / f"{api_name}.yaml").write_text(
-        yaml.dump(spec, default_flow_style=False)
-    )
+    if include_field:
+        spec["info"]["x-camara-commonalities"] = commonalities_value
 
-
-LICENSE_A = {"name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0"}
-LICENSE_B = {"name": "MIT", "url": "https://opensource.org/licenses/MIT"}
+    spec_dir = tmp_path / "code" / "API_definitions"
+    spec_dir.mkdir(parents=True, exist_ok=True)
+    (spec_dir / f"{api_name}.yaml").write_text(
+        yaml.dump(spec, default_flow_style=False), encoding="utf-8"
+    )
 
 
 # ---------------------------------------------------------------------------
@@ -81,61 +80,181 @@ def _write_spec(
 # ---------------------------------------------------------------------------
 
 
-class TestCheckLicenseCommonalitiesConsistency:
-    def test_no_apis(self, tmp_path: Path):
-        ctx = _make_context([])
-        assert check_license_commonalities_consistency(tmp_path, ctx) == []
+class TestCheckCommonalitiesVersion:
+
+    # --- Presence ---
+
+    def test_missing_field_error(self, tmp_path: Path):
+        _write_spec(tmp_path, include_field=False)
+        ctx = _make_context()
+        findings = check_commonalities_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "missing" in findings[0]["message"]
+
+    def test_missing_field_error_on_release(self, tmp_path: Path):
+        _write_spec(tmp_path, include_field=False)
+        ctx = _make_context(branch_type="release")
+        findings = check_commonalities_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    # --- Main branch: valid formats ---
+
+    def test_wip_on_main_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="wip")
+        ctx = _make_context(branch_type="main")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_tbd_on_main_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="tbd")
+        ctx = _make_context(branch_type="main")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_short_version_on_main_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7")
+        ctx = _make_context(branch_type="main")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_full_version_on_main_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7.0")
+        ctx = _make_context(branch_type="main")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_prerelease_on_main_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7.0-rc.1")
+        ctx = _make_context(branch_type="main")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    # --- Main branch: invalid formats ---
+
+    def test_garbage_on_main_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="foo")
+        ctx = _make_context(branch_type="main")
+        findings = check_commonalities_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "invalid format" in findings[0]["message"]
+
+    def test_empty_string_on_main_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="")
+        ctx = _make_context(branch_type="main")
+        findings = check_commonalities_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    # --- Feature branch: same as main ---
+
+    def test_wip_on_feature_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="wip")
+        ctx = _make_context(branch_type="feature")
+        assert check_commonalities_version(tmp_path, ctx) == []
 
-    def test_single_api_all_present(self, tmp_path: Path):
-        _write_spec(tmp_path, "qod", license_val=LICENSE_A, commonalities_val="r4.1")
-        ctx = _make_context(["qod"])
-        assert check_license_commonalities_consistency(tmp_path, ctx) == []
+    def test_full_version_on_feature_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7.0")
+        ctx = _make_context(branch_type="feature")
+        assert check_commonalities_version(tmp_path, ctx) == []
 
-    def test_single_api_missing_license(self, tmp_path: Path):
-        _write_spec(tmp_path, "qod", commonalities_val="r4.1")
-        ctx = _make_context(["qod"])
-        findings = check_license_commonalities_consistency(tmp_path, ctx)
+    def test_garbage_on_feature_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="xyz")
+        ctx = _make_context(branch_type="feature")
+        findings = check_commonalities_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "license" in findings[0]["message"]
+        assert findings[0]["level"] == "error"
+
+    # --- Release branch: valid formats ---
+
+    def test_full_version_on_release_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7.0")
+        ctx = _make_context(branch_type="release")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_prerelease_on_release_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7.0-rc.1")
+        ctx = _make_context(branch_type="release")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    # --- Release branch: invalid formats ---
 
-    def test_single_api_missing_commonalities(self, tmp_path: Path):
-        _write_spec(tmp_path, "qod", license_val=LICENSE_A)
-        ctx = _make_context(["qod"])
-        findings = check_license_commonalities_consistency(tmp_path, ctx)
+    def test_wip_on_release_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="wip")
+        ctx = _make_context(branch_type="release")
+        findings = check_commonalities_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "x-camara-commonalities" in findings[0]["message"]
-
-    def test_single_api_both_missing(self, tmp_path: Path):
-        _write_spec(tmp_path, "qod")
-        ctx = _make_context(["qod"])
-        findings = check_license_commonalities_consistency(tmp_path, ctx)
-        assert len(findings) == 2
-
-    def test_two_apis_consistent(self, tmp_path: Path):
-        _write_spec(tmp_path, "api-a", license_val=LICENSE_A, commonalities_val="r4.1")
-        _write_spec(tmp_path, "api-b", license_val=LICENSE_A, commonalities_val="r4.1")
-        ctx = _make_context(["api-a", "api-b"])
-        assert check_license_commonalities_consistency(tmp_path, ctx) == []
-
-    def test_two_apis_license_mismatch(self, tmp_path: Path):
-        _write_spec(tmp_path, "api-a", license_val=LICENSE_A, commonalities_val="r4.1")
-        _write_spec(tmp_path, "api-b", license_val=LICENSE_B, commonalities_val="r4.1")
-        ctx = _make_context(["api-a", "api-b"])
-        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert findings[0]["level"] == "error"
+        assert "full version" in findings[0]["message"]
+
+    def test_tbd_on_release_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="tbd")
+        ctx = _make_context(branch_type="release")
+        findings = check_commonalities_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    def test_short_version_on_release_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7")
+        ctx = _make_context(branch_type="release")
+        findings = check_commonalities_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "license" in findings[0]["message"]
-        assert "differs" in findings[0]["message"]
-
-    def test_two_apis_commonalities_mismatch(self, tmp_path: Path):
-        _write_spec(tmp_path, "api-a", license_val=LICENSE_A, commonalities_val="r4.1")
-        _write_spec(tmp_path, "api-b", license_val=LICENSE_A, commonalities_val="r3.4")
-        ctx = _make_context(["api-a", "api-b"])
-        findings = check_license_commonalities_consistency(tmp_path, ctx)
+        assert findings[0]["level"] == "error"
+
+    # --- Maintenance branch: same as release ---
+
+    def test_wip_on_maintenance_error(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="wip")
+        ctx = _make_context(branch_type="maintenance")
+        findings = check_commonalities_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "x-camara-commonalities" in findings[0]["message"]
-        assert "differs" in findings[0]["message"]
+        assert findings[0]["level"] == "error"
+
+    # --- Version mismatch ---
+
+    def test_version_match_ok(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.7.0")
+        ctx = _make_context(commonalities_version="0.7.0")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_version_mismatch_warn(self, tmp_path: Path):
+        _write_spec(tmp_path, commonalities_value="0.6.0")
+        ctx = _make_context(commonalities_version="0.7.0")
+        findings = check_commonalities_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert "does not match" in findings[0]["message"]
+
+    def test_short_version_matches_full(self, tmp_path: Path):
+        """Short form 0.7 should match 0.7.0."""
+        _write_spec(tmp_path, commonalities_value="0.7")
+        ctx = _make_context(commonalities_version="0.7.0")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_prerelease_matches_exact(self, tmp_path: Path):
+        """0.7.0-rc.1 in spec matches 0.7.0-rc.1 from context."""
+        _write_spec(tmp_path, commonalities_value="0.7.0-rc.1")
+        ctx = _make_context(commonalities_version="0.7.0-rc.1")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_no_commonalities_version_skips_mismatch(self, tmp_path: Path):
+        """When context has no commonalities_version, skip mismatch check."""
+        _write_spec(tmp_path, commonalities_value="0.7.0")
+        ctx = _make_context(commonalities_version=None)
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    def test_wip_skips_mismatch_check(self, tmp_path: Path):
+        """Placeholder values don't trigger mismatch check."""
+        _write_spec(tmp_path, commonalities_value="wip")
+        ctx = _make_context(commonalities_version="0.7.0")
+        assert check_commonalities_version(tmp_path, ctx) == []
+
+    # --- Edge cases ---
+
+    def test_missing_spec_file(self, tmp_path: Path):
+        ctx = _make_context()
+        assert check_commonalities_version(tmp_path, ctx) == []
 
-    def test_missing_spec_file_skipped(self, tmp_path: Path):
-        """Missing spec file is silently skipped (filename check reports)."""
-        ctx = _make_context(["qod"])
-        assert check_license_commonalities_consistency(tmp_path, ctx) == []
+    def test_numeric_value(self, tmp_path: Path):
+        """YAML may parse bare 0.7 as float — should be handled via str()."""
+        _write_spec(tmp_path, commonalities_value=0.7)
+        ctx = _make_context(branch_type="main")
+        # 0.7 as float becomes "0.7" as string — valid short form
+        assert check_commonalities_version(tmp_path, ctx) == []

From 8d8bc51730cd25f38002e2579cb0f4cb7e976f38 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 16:13:11 +0200
Subject: [PATCH 054/157] feat(validation): add subscription checks P-014,
 P-015, P-016

P-014 (DG-088): check-subscription-filename
  Explicit subscription APIs must end with '-subscriptions'.

P-015 (DG-086): check-event-type-format
  Event types must follow org.camaraproject....
  Validates format, api-name match, and version segment.

P-016 (DG-092): check-sinkcredential-not-in-response
  sinkCredential must not appear in subscription 2xx response schemas.
  Handles direct properties, allOf compositions, and external $ref.

21 unit tests covering all three checks.
---
 .../python_checks/subscription_checks.py      | 222 ++++++++++
 .../tests/test_python_checks_subscription.py  | 394 ++++++++++++++++++
 2 files changed, 616 insertions(+)
 create mode 100644 validation/engines/python_checks/subscription_checks.py
 create mode 100644 validation/tests/test_python_checks_subscription.py

diff --git a/validation/engines/python_checks/subscription_checks.py b/validation/engines/python_checks/subscription_checks.py
new file mode 100644
index 00000000..fbc30788
--- /dev/null
+++ b/validation/engines/python_checks/subscription_checks.py
@@ -0,0 +1,222 @@
+"""Subscription API checks.
+
+Validates naming conventions, event type formats, and response schema
+constraints specific to CAMARA subscription APIs.
+
+Design doc references:
+  - Event Subscription Guide section 2.2: explicit subscription naming
+  - Event Subscription Guide section 2.3: event type format
+  - Event Subscription Guide section 2.2.3: sinkCredential in responses
+"""
+
+from __future__ import annotations
+
+import re
+from pathlib import Path
+from typing import List
+
+from validation.context import ValidationContext
+
+from ._spec_helpers import (
+    collect_schema_properties,
+    extract_event_types_from_spec,
+    iter_response_schemas,
+    resolve_local_ref,
+)
+from ._types import load_yaml_safe, make_finding
+
+# ---------------------------------------------------------------------------
+# Constants
+# ---------------------------------------------------------------------------
+
+# Event type format: org.camaraproject...
+_EVENT_TYPE_RE = re.compile(
+    r"^org\.camaraproject\."
+    r"(?P[a-z0-9][a-z0-9-]*)"
+    r"\.(?Pv\d+)"
+    r"\.(?P[a-z][a-z0-9-]*)$"
+)
+
+
+# ---------------------------------------------------------------------------
+# P-014 (DG-088): check-subscription-filename
+# ---------------------------------------------------------------------------
+
+
+def check_subscription_filename(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate subscription API filename ends with '-subscriptions'.
+
+    Event Subscription Guide section 2.2: "it is mandatory to append
+    the keyword 'subscriptions' at the end of the API name."
+
+    Only applies to explicit-subscription APIs.
+    """
+    api = context.apis[0]
+
+    if api.api_pattern != "explicit-subscription":
+        return []
+
+    if api.api_name.endswith("-subscriptions"):
+        return []
+
+    return [
+        make_finding(
+            engine_rule="check-subscription-filename",
+            level="warn",
+            message=(
+                f"Explicit subscription API name '{api.api_name}' should "
+                f"end with '-subscriptions' (e.g. "
+                f"'{api.api_name}-subscriptions')"
+            ),
+            path=api.spec_file,
+            line=1,
+            api_name=api.api_name,
+        )
+    ]
+
+
+# ---------------------------------------------------------------------------
+# P-015 (DG-086): check-event-type-format
+# ---------------------------------------------------------------------------
+
+
+def check_event_type_format(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate event type values follow the CAMARA format.
+
+    Event Subscription Guide section 2.3: event type MUST follow
+    ``org.camaraproject...``.
+    Event version format is ``vN`` (v0, v1, etc.).
+
+    Applies to explicit-subscription and implicit-subscription APIs.
+    """
+    api = context.apis[0]
+
+    if api.api_pattern not in ("explicit-subscription", "implicit-subscription"):
+        return []
+
+    spec = load_yaml_safe(repo_path / api.spec_file)
+    if spec is None:
+        return []
+
+    event_types = extract_event_types_from_spec(spec)
+    if not event_types:
+        return [
+            make_finding(
+                engine_rule="check-event-type-format",
+                level="hint",
+                message=(
+                    f"No event type enum values found in {api.spec_file} "
+                    f"— subscription APIs should define EventType schemas"
+                ),
+                path=api.spec_file,
+                line=1,
+                api_name=api.api_name,
+            )
+        ]
+
+    findings: List[dict] = []
+    for event_type in event_types:
+        m = _EVENT_TYPE_RE.match(event_type)
+        if m is None:
+            findings.append(
+                make_finding(
+                    engine_rule="check-event-type-format",
+                    level="error",
+                    message=(
+                        f"Event type '{event_type}' does not match expected "
+                        f"format 'org.camaraproject...'"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+            continue
+
+        # Verify api-name segment matches the API name from context
+        type_api_name = m.group("api_name")
+        if type_api_name != api.api_name:
+            findings.append(
+                make_finding(
+                    engine_rule="check-event-type-format",
+                    level="error",
+                    message=(
+                        f"Event type '{event_type}' has api-name segment "
+                        f"'{type_api_name}' which does not match API name "
+                        f"'{api.api_name}'"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+
+    return findings
+
+
+# ---------------------------------------------------------------------------
+# P-016 (DG-092): check-sinkcredential-not-in-response
+# ---------------------------------------------------------------------------
+
+
+def check_sinkcredential_not_in_response(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate sinkCredential does not appear in subscription responses.
+
+    Event Subscription Guide section 2.2.3: "The sinkCredential MUST
+    NOT be present in POST and GET responses."
+
+    Inspects 2xx response schemas for paths containing ``/subscriptions``
+    and checks for ``sinkCredential`` in their properties (including
+    ``allOf`` compositions).
+    """
+    api = context.apis[0]
+
+    if api.api_pattern != "explicit-subscription":
+        return []
+
+    spec = load_yaml_safe(repo_path / api.spec_file)
+    if spec is None:
+        return []
+
+    findings: List[dict] = []
+    seen_schemas: set[str] = set()
+
+    for path, method, status_code, schema in iter_response_schemas(
+        spec, "/subscriptions"
+    ):
+        # Only check 2xx success responses
+        if not status_code.startswith("2"):
+            continue
+
+        # Collect properties from the resolved schema
+        all_props = collect_schema_properties(spec, schema)
+        if "sinkCredential" in all_props:
+            # Deduplicate: same underlying schema may appear in multiple
+            # responses (e.g. POST 201 and GET 200 both use Subscription)
+            schema_id = id(schema)
+            if schema_id in seen_schemas:
+                continue
+            seen_schemas.add(schema_id)
+
+            findings.append(
+                make_finding(
+                    engine_rule="check-sinkcredential-not-in-response",
+                    level="error",
+                    message=(
+                        f"sinkCredential found in {method.upper()} "
+                        f"{path} {status_code} response schema — "
+                        f"sinkCredential MUST NOT appear in responses"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+
+    return findings
diff --git a/validation/tests/test_python_checks_subscription.py b/validation/tests/test_python_checks_subscription.py
new file mode 100644
index 00000000..5e2eb82b
--- /dev/null
+++ b/validation/tests/test_python_checks_subscription.py
@@ -0,0 +1,394 @@
+"""Unit tests for subscription checks (P-014, P-015, P-016)."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import yaml
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.subscription_checks import (
+    check_event_type_format,
+    check_sinkcredential_not_in_response,
+    check_subscription_filename,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(
+    api_name: str = "device-status-subscriptions",
+    api_pattern: str = "explicit-subscription",
+) -> ValidationContext:
+    api = ApiContext(
+        api_name=api_name,
+        target_api_version="0.1.0",
+        target_api_status="alpha",
+        target_api_maturity="initial",
+        api_pattern=api_pattern,
+        spec_file=f"code/API_definitions/{api_name}.yaml",
+    )
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="enabled",
+        target_release_type=None,
+        commonalities_release=None,
+        commonalities_version=None,
+        icm_release=None,
+        base_ref=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(api,),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _write_spec(
+    tmp_path: Path,
+    api_name: str = "device-status-subscriptions",
+    spec_content: dict | None = None,
+) -> None:
+    if spec_content is None:
+        spec_content = {"openapi": "3.0.3", "info": {"title": "Test", "version": "wip"}, "paths": {}}
+
+    spec_dir = tmp_path / "code" / "API_definitions"
+    spec_dir.mkdir(parents=True, exist_ok=True)
+    (spec_dir / f"{api_name}.yaml").write_text(
+        yaml.dump(spec_content, default_flow_style=False), encoding="utf-8"
+    )
+
+
+def _subscription_spec_with_events(
+    api_name: str,
+    event_types: list[str],
+) -> dict:
+    """Build a minimal subscription spec with event type enums."""
+    return {
+        "openapi": "3.0.3",
+        "info": {"title": "Test", "version": "wip"},
+        "paths": {"/subscriptions": {"post": {"responses": {"201": {}}}}},
+        "components": {
+            "schemas": {
+                "SubscriptionEventType": {
+                    "type": "string",
+                    "enum": event_types,
+                }
+            }
+        },
+    }
+
+
+def _subscription_spec_with_response(
+    response_properties: dict,
+) -> dict:
+    """Build a subscription spec with a response schema."""
+    return {
+        "openapi": "3.0.3",
+        "info": {"title": "Test", "version": "wip"},
+        "paths": {
+            "/subscriptions": {
+                "post": {
+                    "responses": {
+                        "201": {
+                            "content": {
+                                "application/json": {
+                                    "schema": {
+                                        "$ref": "#/components/schemas/Subscription"
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        "components": {
+            "schemas": {
+                "Subscription": {
+                    "type": "object",
+                    "properties": response_properties,
+                }
+            }
+        },
+    }
+
+
+# ---------------------------------------------------------------------------
+# P-014: check-subscription-filename
+# ---------------------------------------------------------------------------
+
+
+class TestCheckSubscriptionFilename:
+    def test_explicit_with_suffix_ok(self, tmp_path: Path):
+        ctx = _make_context(api_name="device-status-subscriptions")
+        assert check_subscription_filename(tmp_path, ctx) == []
+
+    def test_explicit_without_suffix_warn(self, tmp_path: Path):
+        ctx = _make_context(api_name="device-status")
+        findings = check_subscription_filename(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert "-subscriptions" in findings[0]["message"]
+
+    def test_implicit_no_suffix_ok(self, tmp_path: Path):
+        ctx = _make_context(api_name="device-status", api_pattern="implicit-subscription")
+        assert check_subscription_filename(tmp_path, ctx) == []
+
+    def test_request_response_skip(self, tmp_path: Path):
+        ctx = _make_context(api_name="device-status", api_pattern="request-response")
+        assert check_subscription_filename(tmp_path, ctx) == []
+
+
+# ---------------------------------------------------------------------------
+# P-015: check-event-type-format
+# ---------------------------------------------------------------------------
+
+
+class TestCheckEventTypeFormat:
+    def test_valid_event_types(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_events(api_name, [
+            f"org.camaraproject.{api_name}.v0.status-changed",
+            f"org.camaraproject.{api_name}.v0.subscription-ended",
+        ])
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_event_type_format(tmp_path, ctx) == []
+
+    def test_wrong_api_name_error(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_events(api_name, [
+            "org.camaraproject.wrong-api.v0.status-changed",
+        ])
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_event_type_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "does not match" in findings[0]["message"]
+
+    def test_missing_version_error(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_events(api_name, [
+            f"org.camaraproject.{api_name}.status-changed",
+        ])
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_event_type_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    def test_invalid_version_format_error(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_events(api_name, [
+            f"org.camaraproject.{api_name}.version1.status-changed",
+        ])
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_event_type_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    def test_non_subscription_skip(self, tmp_path: Path):
+        ctx = _make_context(api_pattern="request-response")
+        assert check_event_type_format(tmp_path, ctx) == []
+
+    def test_no_event_types_hint(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        _write_spec(tmp_path, api_name=api_name)
+        ctx = _make_context(api_name=api_name)
+        findings = check_event_type_format(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "hint"
+
+    def test_mixed_valid_invalid(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_events(api_name, [
+            f"org.camaraproject.{api_name}.v0.status-changed",
+            "invalid-event-type",
+        ])
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_event_type_format(tmp_path, ctx)
+        assert len(findings) == 1  # Only the invalid one
+        assert "invalid-event-type" in findings[0]["message"]
+
+    def test_implicit_subscription_checked(self, tmp_path: Path):
+        api_name = "device-status"
+        spec = _subscription_spec_with_events(api_name, [
+            f"org.camaraproject.{api_name}.v0.status-changed",
+        ])
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name, api_pattern="implicit-subscription")
+        assert check_event_type_format(tmp_path, ctx) == []
+
+    def test_missing_spec_file(self, tmp_path: Path):
+        ctx = _make_context()
+        assert check_event_type_format(tmp_path, ctx) == []
+
+
+# ---------------------------------------------------------------------------
+# P-016: check-sinkcredential-not-in-response
+# ---------------------------------------------------------------------------
+
+
+class TestCheckSinkCredentialNotInResponse:
+    def test_clean_response_ok(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_response({
+            "id": {"type": "string"},
+            "sink": {"type": "string"},
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_sinkcredential_not_in_response(tmp_path, ctx) == []
+
+    def test_sinkcredential_in_response_error(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_response({
+            "id": {"type": "string"},
+            "sink": {"type": "string"},
+            "sinkCredential": {"$ref": "#/components/schemas/SinkCredential"},
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_sinkcredential_not_in_response(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "sinkCredential" in findings[0]["message"]
+
+    def test_sinkcredential_in_request_only_ok(self, tmp_path: Path):
+        """sinkCredential in request schema but not in response — OK."""
+        api_name = "device-status-subscriptions"
+        spec = {
+            "openapi": "3.0.3",
+            "info": {"title": "Test", "version": "wip"},
+            "paths": {
+                "/subscriptions": {
+                    "post": {
+                        "requestBody": {
+                            "content": {
+                                "application/json": {
+                                    "schema": {"$ref": "#/components/schemas/SubscriptionRequest"}
+                                }
+                            }
+                        },
+                        "responses": {
+                            "201": {
+                                "content": {
+                                    "application/json": {
+                                        "schema": {"$ref": "#/components/schemas/Subscription"}
+                                    }
+                                }
+                            }
+                        },
+                    }
+                }
+            },
+            "components": {
+                "schemas": {
+                    "SubscriptionRequest": {
+                        "type": "object",
+                        "properties": {
+                            "sink": {"type": "string"},
+                            "sinkCredential": {"type": "object"},
+                        },
+                    },
+                    "Subscription": {
+                        "type": "object",
+                        "properties": {
+                            "id": {"type": "string"},
+                            "sink": {"type": "string"},
+                        },
+                    },
+                }
+            },
+        }
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_sinkcredential_not_in_response(tmp_path, ctx) == []
+
+    def test_non_subscription_skip(self, tmp_path: Path):
+        ctx = _make_context(api_pattern="request-response")
+        assert check_sinkcredential_not_in_response(tmp_path, ctx) == []
+
+    def test_sinkcredential_via_allof_error(self, tmp_path: Path):
+        """sinkCredential inherited via allOf is still caught."""
+        api_name = "device-status-subscriptions"
+        spec = {
+            "openapi": "3.0.3",
+            "info": {"title": "Test", "version": "wip"},
+            "paths": {
+                "/subscriptions": {
+                    "post": {
+                        "responses": {
+                            "201": {
+                                "content": {
+                                    "application/json": {
+                                        "schema": {"$ref": "#/components/schemas/Subscription"}
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            },
+            "components": {
+                "schemas": {
+                    "BaseSubscription": {
+                        "type": "object",
+                        "properties": {
+                            "sinkCredential": {"type": "object"},
+                        },
+                    },
+                    "Subscription": {
+                        "allOf": [
+                            {"$ref": "#/components/schemas/BaseSubscription"},
+                            {
+                                "type": "object",
+                                "properties": {"id": {"type": "string"}},
+                            },
+                        ]
+                    },
+                }
+            },
+        }
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_sinkcredential_not_in_response(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+
+    def test_missing_spec_file(self, tmp_path: Path):
+        ctx = _make_context()
+        assert check_sinkcredential_not_in_response(tmp_path, ctx) == []
+
+    def test_no_subscription_paths_ok(self, tmp_path: Path):
+        """Spec has no /subscriptions path — no findings."""
+        api_name = "device-status-subscriptions"
+        spec = {"openapi": "3.0.3", "info": {"title": "Test", "version": "wip"}, "paths": {"/other": {}}}
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_sinkcredential_not_in_response(tmp_path, ctx) == []
+
+    def test_external_ref_sinkcredential_detected(self, tmp_path: Path):
+        """sinkCredential with external $ref is still detected by name."""
+        api_name = "device-status-subscriptions"
+        spec = _subscription_spec_with_response({
+            "id": {"type": "string"},
+            "sinkCredential": {
+                "$ref": "../common/CAMARA_event_common.yaml#/components/schemas/SinkCredential"
+            },
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_sinkcredential_not_in_response(tmp_path, ctx)
+        assert len(findings) == 1

From e341b2ce8f90d1a9633e18233e914d8cd8a35a79 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 16:14:50 +0200
Subject: [PATCH 055/157] feat(validation): add error code checks P-017, P-018

P-017 (DG-018): check-conflict-deprecated
  Warn when CONFLICT error code is used (deprecated in r4.x).

P-018 (DG-011): check-contextcode-format
  Hint when contextCode enum values don't follow SCREAMING_SNAKE_CASE
  (API_NAME.SPECIFIC_CODE), or when contextCode has no enum constraint.

Both checks use shared helpers from _spec_helpers.py.
Applicability: commonalities_release >= r4.0 (via post-filter).
11 unit tests.
---
 .../python_checks/error_code_checks.py        | 141 +++++++++++
 .../tests/test_python_checks_error_codes.py   | 225 ++++++++++++++++++
 2 files changed, 366 insertions(+)
 create mode 100644 validation/engines/python_checks/error_code_checks.py
 create mode 100644 validation/tests/test_python_checks_error_codes.py

diff --git a/validation/engines/python_checks/error_code_checks.py b/validation/engines/python_checks/error_code_checks.py
new file mode 100644
index 00000000..23bfdc3d
--- /dev/null
+++ b/validation/engines/python_checks/error_code_checks.py
@@ -0,0 +1,141 @@
+"""Error code and contextCode checks.
+
+Validates error code deprecation (CONFLICT) and contextCode naming
+conventions introduced in Commonalities r4.x.
+
+Design doc references:
+  - Design Guide section 3.2: error response table (CONFLICT deprecated)
+  - Design Guide section 3.1.3: contextCode SCREAMING_SNAKE_CASE
+"""
+
+from __future__ import annotations
+
+import re
+from pathlib import Path
+from typing import List
+
+from validation.context import ValidationContext
+
+from ._spec_helpers import find_enum_value_in_schemas, find_properties_by_name
+from ._types import load_yaml_safe, make_finding
+
+# contextCode values: API_NAME.SPECIFIC_CODE or PLAIN_CODE
+# Both parts in SCREAMING_SNAKE_CASE.
+_SCREAMING_SNAKE_RE = re.compile(
+    r"^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*"
+    r"(\.[A-Z][A-Z0-9]*(_[A-Z0-9]+)*)?$"
+)
+
+
+# ---------------------------------------------------------------------------
+# P-017 (DG-018): check-conflict-deprecated
+# ---------------------------------------------------------------------------
+
+
+def check_conflict_deprecated(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Warn if the CONFLICT error code is used.
+
+    Design Guide section 3.2: "CONFLICT — Duplication of an existing
+    resource (This Error Code is DEPRECATED)".
+
+    Searches all component schema enums for the value ``"CONFLICT"``.
+    Applicability (post-filter): ``commonalities_release >= r4.0``.
+    """
+    api = context.apis[0]
+    spec = load_yaml_safe(repo_path / api.spec_file)
+    if spec is None:
+        return []
+
+    matches = find_enum_value_in_schemas(spec, "CONFLICT")
+    if not matches:
+        return []
+
+    findings: List[dict] = []
+    for schema_path, _ in matches:
+        findings.append(
+            make_finding(
+                engine_rule="check-conflict-deprecated",
+                level="warn",
+                message=(
+                    f"Error code 'CONFLICT' is deprecated in Commonalities "
+                    f"r4.x — use 'ALREADY_EXISTS' or a specific error code "
+                    f"(found in {schema_path})"
+                ),
+                path=api.spec_file,
+                line=1,
+                api_name=api.api_name,
+            )
+        )
+    return findings
+
+
+# ---------------------------------------------------------------------------
+# P-018 (DG-011): check-contextcode-format
+# ---------------------------------------------------------------------------
+
+
+def check_contextcode_format(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate contextCode enum values follow SCREAMING_SNAKE_CASE.
+
+    Design Guide section 3.1.3: "API-specific codes following CAMARA
+    conventions (API_NAME.SPECIFIC_CODE in SCREAMING_SNAKE_CASE)".
+
+    The contextCode field is optional. This check only fires when a
+    ``contextCode`` property is found in the spec.
+    Applicability (post-filter): ``commonalities_release >= r4.0``.
+    """
+    api = context.apis[0]
+    spec = load_yaml_safe(repo_path / api.spec_file)
+    if spec is None:
+        return []
+
+    results = find_properties_by_name(spec, "contextCode")
+    if not results:
+        return []
+
+    findings: List[dict] = []
+    for schema_name, prop_schema in results:
+        enum_values = prop_schema.get("enum")
+        if not isinstance(enum_values, list):
+            # contextCode without enum — recommend adding one
+            findings.append(
+                make_finding(
+                    engine_rule="check-contextcode-format",
+                    level="hint",
+                    message=(
+                        f"contextCode in {schema_name} has no enum "
+                        f"constraint — enum is recommended per Design "
+                        f"Guide section 3.1.3"
+                    ),
+                    path=api.spec_file,
+                    line=1,
+                    api_name=api.api_name,
+                )
+            )
+            continue
+
+        # Validate each enum value
+        for val in enum_values:
+            if not isinstance(val, str):
+                continue
+            if not _SCREAMING_SNAKE_RE.match(val):
+                findings.append(
+                    make_finding(
+                        engine_rule="check-contextcode-format",
+                        level="hint",
+                        message=(
+                            f"contextCode value '{val}' in {schema_name} "
+                            f"does not follow SCREAMING_SNAKE_CASE format "
+                            f"(expected API_NAME.SPECIFIC_CODE)"
+                        ),
+                        path=api.spec_file,
+                        line=1,
+                        api_name=api.api_name,
+                    )
+                )
+
+    return findings
diff --git a/validation/tests/test_python_checks_error_codes.py b/validation/tests/test_python_checks_error_codes.py
new file mode 100644
index 00000000..74811731
--- /dev/null
+++ b/validation/tests/test_python_checks_error_codes.py
@@ -0,0 +1,225 @@
+"""Unit tests for error code checks (P-017, P-018)."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+import yaml
+
+from validation.context import ApiContext, ValidationContext
+from validation.engines.python_checks.error_code_checks import (
+    check_conflict_deprecated,
+    check_contextcode_format,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_context(api_name: str = "quality-on-demand") -> ValidationContext:
+    api = ApiContext(
+        api_name=api_name,
+        target_api_version="1.0.0",
+        target_api_status="public",
+        target_api_maturity="stable",
+        api_pattern="request-response",
+        spec_file=f"code/API_definitions/{api_name}.yaml",
+    )
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="enabled",
+        target_release_type=None,
+        commonalities_release="r4.1",
+        commonalities_version=None,
+        icm_release=None,
+        base_ref=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(api,),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _write_spec(
+    tmp_path: Path,
+    api_name: str = "quality-on-demand",
+    spec_content: dict | None = None,
+) -> None:
+    if spec_content is None:
+        spec_content = {"openapi": "3.0.3", "info": {"title": "Test", "version": "wip"}, "paths": {}}
+    spec_dir = tmp_path / "code" / "API_definitions"
+    spec_dir.mkdir(parents=True, exist_ok=True)
+    (spec_dir / f"{api_name}.yaml").write_text(
+        yaml.dump(spec_content, default_flow_style=False), encoding="utf-8"
+    )
+
+
+# ---------------------------------------------------------------------------
+# P-017: check-conflict-deprecated
+# ---------------------------------------------------------------------------
+
+
+class TestCheckConflictDeprecated:
+    def test_no_conflict_ok(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "ErrorCode": {"type": "string", "enum": ["INVALID_ARGUMENT", "NOT_FOUND"]}
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        assert check_conflict_deprecated(tmp_path, _make_context()) == []
+
+    def test_conflict_in_enum_warn(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "ErrorInfo": {
+                        "type": "object",
+                        "properties": {
+                            "code": {"type": "string", "enum": ["INVALID_ARGUMENT", "CONFLICT"]},
+                        },
+                    }
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        findings = check_conflict_deprecated(tmp_path, _make_context())
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert "deprecated" in findings[0]["message"].lower()
+
+    def test_conflict_in_top_level_enum(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "Code409": {"type": "string", "enum": ["ABORTED", "ALREADY_EXISTS", "CONFLICT"]}
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        findings = check_conflict_deprecated(tmp_path, _make_context())
+        assert len(findings) >= 1
+        assert all(f["level"] == "warn" for f in findings)
+
+    def test_missing_spec(self, tmp_path: Path):
+        assert check_conflict_deprecated(tmp_path, _make_context()) == []
+
+    def test_no_schemas(self, tmp_path: Path):
+        spec = {"openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {}}
+        _write_spec(tmp_path, spec_content=spec)
+        assert check_conflict_deprecated(tmp_path, _make_context()) == []
+
+
+# ---------------------------------------------------------------------------
+# P-018: check-contextcode-format
+# ---------------------------------------------------------------------------
+
+
+class TestCheckContextcodeFormat:
+    def test_no_contextcode_ok(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {"schemas": {"Foo": {"properties": {"bar": {"type": "string"}}}}},
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        assert check_contextcode_format(tmp_path, _make_context()) == []
+
+    def test_valid_screaming_snake_ok(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "Outcome": {
+                        "properties": {
+                            "contextCode": {
+                                "type": "string",
+                                "enum": [
+                                    "COMMON.REGIONAL_PRIVACY_RESTRICTION",
+                                    "CARRIER_BILLING.PAYMENT_DENIED",
+                                    "NOT_AVAILABLE",
+                                ],
+                            }
+                        }
+                    }
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        assert check_contextcode_format(tmp_path, _make_context()) == []
+
+    def test_invalid_camel_case_hint(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "Outcome": {
+                        "properties": {
+                            "contextCode": {
+                                "type": "string",
+                                "enum": ["apiName.specificCode", "VALID_CODE"],
+                            }
+                        }
+                    }
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        findings = check_contextcode_format(tmp_path, _make_context())
+        assert len(findings) == 1
+        assert findings[0]["level"] == "hint"
+        assert "apiName.specificCode" in findings[0]["message"]
+
+    def test_contextcode_without_enum_hint(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "Outcome": {
+                        "properties": {
+                            "contextCode": {"type": "string"},
+                        }
+                    }
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        findings = check_contextcode_format(tmp_path, _make_context())
+        assert len(findings) == 1
+        assert findings[0]["level"] == "hint"
+        assert "no enum" in findings[0]["message"]
+
+    def test_missing_spec(self, tmp_path: Path):
+        assert check_contextcode_format(tmp_path, _make_context()) == []
+
+    def test_mixed_valid_invalid(self, tmp_path: Path):
+        spec = {
+            "openapi": "3.0.3", "info": {"title": "T", "version": "wip"}, "paths": {},
+            "components": {
+                "schemas": {
+                    "Outcome": {
+                        "properties": {
+                            "contextCode": {
+                                "type": "string",
+                                "enum": ["VALID_CODE", "invalid-code", "ALSO.VALID"],
+                            }
+                        }
+                    }
+                }
+            },
+        }
+        _write_spec(tmp_path, spec_content=spec)
+        findings = check_contextcode_format(tmp_path, _make_context())
+        assert len(findings) == 1  # Only invalid-code
+        assert "invalid-code" in findings[0]["message"]

From 53830554b2150bc53718116322c4d7c6b121c854 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 16:16:38 +0200
Subject: [PATCH 056/157] feat(validation): add P-019
 check-orphan-api-definitions (NEW-003)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Detects YAML files in code/API_definitions/ not listed in
release-plan.yaml. Flags each orphan as a warn-level finding
for possible naming mismatch or forgotten file.

Reuses filesystem scan pattern from existing _check_file_existence.
No applicability restriction — runs whenever release-plan.yaml exists.
6 unit tests.
---
 .../python_checks/release_plan_checks.py      | 55 ++++++++++++++++++
 .../tests/test_python_checks_release_plan.py  | 58 +++++++++++++++++++
 2 files changed, 113 insertions(+)

diff --git a/validation/engines/python_checks/release_plan_checks.py b/validation/engines/python_checks/release_plan_checks.py
index b9d40217..30dd1d84 100644
--- a/validation/engines/python_checks/release_plan_checks.py
+++ b/validation/engines/python_checks/release_plan_checks.py
@@ -274,3 +274,58 @@ def check_release_plan_semantics(
     findings.extend(_check_file_existence(release_plan, repo_path))
 
     return findings
+
+
+# ---------------------------------------------------------------------------
+# P-019 (NEW-003): Orphan API definitions
+# ---------------------------------------------------------------------------
+
+
+def check_orphan_api_definitions(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Detect YAML files in API_definitions not listed in release-plan.yaml.
+
+    Repo-level check.  Compares YAML file stems in
+    ``code/API_definitions/`` against API names declared in
+    ``release-plan.yaml``.  Files not in the release plan are flagged
+    as potential orphans or naming mismatches.
+    """
+    plan_path = repo_path / _RELEASE_PLAN_PATH
+    release_plan = load_yaml_safe(plan_path)
+    if release_plan is None:
+        return []
+
+    api_dir = repo_path / "code" / "API_definitions"
+    if not api_dir.is_dir():
+        return []
+
+    # Declared API names from release plan
+    apis = release_plan.get("apis", [])
+    declared_names = {
+        api.get("api_name")
+        for api in apis
+        if isinstance(api, dict) and api.get("api_name")
+    }
+
+    # YAML files on disk
+    existing_stems = {
+        f.stem for f in api_dir.iterdir()
+        if f.suffix == ".yaml" and f.is_file()
+    }
+
+    orphans = sorted(existing_stems - declared_names)
+
+    return [
+        make_finding(
+            engine_rule="check-orphan-api-definitions",
+            level="warn",
+            message=(
+                f"API definition file '{name}.yaml' is not listed in "
+                f"release-plan.yaml — possible orphan or naming mismatch"
+            ),
+            path=f"code/API_definitions/{name}.yaml",
+            line=1,
+        )
+        for name in orphans
+    ]
diff --git a/validation/tests/test_python_checks_release_plan.py b/validation/tests/test_python_checks_release_plan.py
index afb3af30..0342f0c7 100644
--- a/validation/tests/test_python_checks_release_plan.py
+++ b/validation/tests/test_python_checks_release_plan.py
@@ -13,6 +13,7 @@
     _check_file_existence,
     _check_release_type_consistency,
     _check_track_consistency,
+    check_orphan_api_definitions,
     check_release_plan_semantics,
 )
 
@@ -254,3 +255,60 @@ def test_collects_all_findings(self, tmp_path: Path):
         findings = check_release_plan_semantics(tmp_path, ctx)
         # meta_release missing (track) + draft in public-release (type) = 2
         assert len(findings) >= 2
+
+
+# ---------------------------------------------------------------------------
+# P-019: check-orphan-api-definitions
+# ---------------------------------------------------------------------------
+
+
+class TestCheckOrphanApiDefinitions:
+    def test_no_orphans(self, tmp_path: Path):
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "alpha"}])
+        _write_release_plan(tmp_path, plan)
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").touch()
+        findings = check_orphan_api_definitions(tmp_path, _make_context())
+        assert findings == []
+
+    def test_orphan_file_warn(self, tmp_path: Path):
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "alpha"}])
+        _write_release_plan(tmp_path, plan)
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").touch()
+        (api_dir / "old-api.yaml").touch()
+        findings = check_orphan_api_definitions(tmp_path, _make_context())
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert "old-api" in findings[0]["message"]
+
+    def test_multiple_orphans(self, tmp_path: Path):
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "alpha"}])
+        _write_release_plan(tmp_path, plan)
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").touch()
+        (api_dir / "orphan-a.yaml").touch()
+        (api_dir / "orphan-b.yaml").touch()
+        findings = check_orphan_api_definitions(tmp_path, _make_context())
+        assert len(findings) == 2
+
+    def test_no_release_plan(self, tmp_path: Path):
+        assert check_orphan_api_definitions(tmp_path, _make_context()) == []
+
+    def test_no_api_definitions_dir(self, tmp_path: Path):
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "alpha"}])
+        _write_release_plan(tmp_path, plan)
+        assert check_orphan_api_definitions(tmp_path, _make_context()) == []
+
+    def test_non_yaml_files_ignored(self, tmp_path: Path):
+        plan = _make_plan(apis=[{"api_name": "qod", "target_api_status": "alpha"}])
+        _write_release_plan(tmp_path, plan)
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "qod.yaml").touch()
+        (api_dir / "README.md").touch()
+        findings = check_orphan_api_definitions(tmp_path, _make_context())
+        assert findings == []

From d184e33a7463b7192d800f1789840529dba8ca71 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 16:22:53 +0200
Subject: [PATCH 057/157] feat(validation): register P-014..P-019 and update
 rule inventory

Register all 6 new Python checks in the check registry.
Add rule metadata entries with applicability conditions:
- P-014, P-015, P-016: api_pattern subscription conditions
- P-017, P-018: commonalities_release >= r4.0
- P-019: no restriction (runs when release-plan.yaml exists)

Update rule inventory:
- Python rules: 13 -> 19 (6 new + P-011 rewrite)
- Gap rules: 25 -> 17 (7 implemented, 1 deferred)
- Mark NEW-001 = P-013 (already implemented)
- Mark DG-095 as deferred
- Total implemented: 123

751 tests passing.
---
 validation/engines/python_checks/__init__.py  | 16 ++++-
 validation/rules/python-rules.yaml            | 58 +++++++++++++++++++
 validation/rules/rule-inventory.yaml          | 53 +++++++++--------
 .../tests/test_rule_metadata_integrity.py     |  2 +-
 4 files changed, 102 insertions(+), 27 deletions(-)

diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py
index 2cc45e81..0acf6602 100644
--- a/validation/engines/python_checks/__init__.py
+++ b/validation/engines/python_checks/__init__.py
@@ -7,11 +7,17 @@
 from ._types import CheckDescriptor, CheckScope
 
 from .changelog_checks import check_changelog_format
+from .error_code_checks import check_conflict_deprecated, check_contextcode_format
 from .filename_checks import check_filename_kebab_case, check_filename_matches_api_name
 from .metadata_checks import check_commonalities_version
 from .readme_checks import check_readme_placeholder_removal
-from .release_plan_checks import check_release_plan_semantics
+from .release_plan_checks import check_orphan_api_definitions, check_release_plan_semantics
 from .release_review_checks import check_release_review_file_restriction
+from .subscription_checks import (
+    check_event_type_format,
+    check_sinkcredential_not_in_response,
+    check_subscription_filename,
+)
 from .test_checks import (
     check_test_directory_exists,
     check_test_file_version,
@@ -34,13 +40,19 @@
     CheckDescriptor("check-server-url-api-name", CheckScope.API, check_server_url_api_name),
     CheckDescriptor("check-test-files-exist", CheckScope.API, check_test_files_exist),
     CheckDescriptor("check-test-file-version", CheckScope.API, check_test_file_version),
+    CheckDescriptor("check-commonalities-version", CheckScope.API, check_commonalities_version),
+    CheckDescriptor("check-subscription-filename", CheckScope.API, check_subscription_filename),
+    CheckDescriptor("check-event-type-format", CheckScope.API, check_event_type_format),
+    CheckDescriptor("check-sinkcredential-not-in-response", CheckScope.API, check_sinkcredential_not_in_response),
+    CheckDescriptor("check-conflict-deprecated", CheckScope.API, check_conflict_deprecated),
+    CheckDescriptor("check-contextcode-format", CheckScope.API, check_contextcode_format),
     # --- Repo-level checks (run once) ---
     CheckDescriptor("check-test-directory-exists", CheckScope.REPO, check_test_directory_exists),
     CheckDescriptor("check-release-plan-semantics", CheckScope.REPO, check_release_plan_semantics),
     CheckDescriptor("check-changelog-format", CheckScope.REPO, check_changelog_format),
-    CheckDescriptor("check-commonalities-version", CheckScope.API, check_commonalities_version),
     CheckDescriptor("check-readme-placeholder-removal", CheckScope.REPO, check_readme_placeholder_removal),
     CheckDescriptor("check-release-review-file-restriction", CheckScope.REPO, check_release_review_file_restriction),
+    CheckDescriptor("check-orphan-api-definitions", CheckScope.REPO, check_orphan_api_definitions),
 ]
 
 __all__ = ["CHECKS", "CheckDescriptor", "CheckScope"]
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 7e8fc8ec..963f160c 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -118,3 +118,61 @@
   engine_rule: check-readme-placeholder-removal
   conditional_level:
     default: warn
+
+# P-014: check-subscription-filename (DG-088)
+# Explicit subscription API names must end with '-subscriptions'.
+- id: P-014
+  engine: python
+  engine_rule: check-subscription-filename
+  applicability:
+    api_pattern: [explicit-subscription]
+  conditional_level:
+    default: warn
+
+# P-015: check-event-type-format (DG-086)
+# Event types must follow org.camaraproject....
+- id: P-015
+  engine: python
+  engine_rule: check-event-type-format
+  applicability:
+    api_pattern: [explicit-subscription, implicit-subscription]
+  conditional_level:
+    default: error
+
+# P-016: check-sinkcredential-not-in-response (DG-092)
+# sinkCredential must not appear in subscription 2xx response schemas.
+- id: P-016
+  engine: python
+  engine_rule: check-sinkcredential-not-in-response
+  applicability:
+    api_pattern: [explicit-subscription]
+  conditional_level:
+    default: error
+
+# P-017: check-conflict-deprecated (DG-018)
+# CONFLICT error code is deprecated in Commonalities r4.x.
+- id: P-017
+  engine: python
+  engine_rule: check-conflict-deprecated
+  applicability:
+    commonalities_release: ">=r4.0"
+  conditional_level:
+    default: warn
+
+# P-018: check-contextcode-format (DG-011)
+# contextCode enum values should follow SCREAMING_SNAKE_CASE.
+- id: P-018
+  engine: python
+  engine_rule: check-contextcode-format
+  applicability:
+    commonalities_release: ">=r4.0"
+  conditional_level:
+    default: hint
+
+# P-019: check-orphan-api-definitions (NEW-003)
+# YAML files in API_definitions not listed in release-plan.yaml.
+- id: P-019
+  engine: python
+  engine_rule: check-orphan-api-definitions
+  conditional_level:
+    default: warn
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index c127364e..d317ab25 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -11,18 +11,18 @@
 #   pending     — in open PRs, not yet merged
 
 version: 1
-generated: 2026-03-26
+generated: 2026-04-03
 
 summary:
-  total_implemented: 116
-  total_gap: 25
+  total_implemented: 123
+  total_gap: 17
   total_manual: 25
   total_pending: 0
   total_tested: 0
   by_engine:
     spectral: 66
     gherkin: 25
-    python: 12
+    python: 19
     yamllint: 13
 
 # ---------------------------------------------------------------------------
@@ -77,16 +77,6 @@ gap_rules:
     priority: medium
     notes: Value check (not just presence). Spectral license-url only checks existence. Previously v0_6 (V6-006)
 
-  - audit_id: DG-028
-    description: x-camara-commonalities MUST specify valid version
-    target_engine: python
-    priority: medium
-    notes: >
-      Rewrite P-011. Presence check + value validation. On main: wip, tbd,
-      X.Y (e.g. 0.7), or X.Y.Z (e.g. 0.7.0) allowed — if X.Y/X.Y.Z then
-      must match declared commonalities_release. On release: real version
-      required, must match commonalities_release. Previously v0_6 (V6-007)
-
   - audit_id: DG-015
     description: "API-specific error: API_NAME.SPECIFIC_CODE format"
     target_engine: spectral
@@ -137,42 +127,57 @@ gap_rules:
     priority: low
     notes: api_pattern subscription only
 
-  # Python gaps (new checks needed)
+  # Python gaps — implemented in Phase 2b
   - audit_id: DG-011
     description: contextCode SCREAMING_SNAKE_CASE format (r4.x)
     target_engine: python
-    priority: low
+    status: implemented
+    rule_id: P-018
 
   - audit_id: DG-018
     description: CONFLICT error code deprecated (r4.x)
     target_engine: python
-    priority: low
+    status: implemented
+    rule_id: P-017
+
+  - audit_id: DG-028
+    description: x-camara-commonalities MUST specify valid version
+    target_engine: python
+    status: implemented
+    rule_id: P-011
+    notes: Rewrite of original P-011 (license/commonalities consistency)
 
   - audit_id: DG-086
     description: Event type format org.camaraproject validation (subscription)
     target_engine: python
-    priority: medium
+    status: implemented
+    rule_id: P-015
 
   - audit_id: DG-088
     description: Subscription API filename convention
     target_engine: python
-    priority: medium
+    status: implemented
+    rule_id: P-014
 
   - audit_id: DG-092
     description: sinkCredential not in responses
     target_engine: python
-    priority: medium
+    status: implemented
+    rule_id: P-016
 
+  # Python gaps — deferred
   - audit_id: DG-095
     description: Event version independence from API version
     target_engine: python
-    priority: low
+    status: deferred
+    notes: "No actionable static check — event version format already validated by P-015 (DG-086)"
 
   # New rules (not from audit — identified during implementation)
   - audit_id: NEW-001
     description: README.md placeholder must be removed from API_definitions/ when spec files are present
     target_engine: python
-    priority: medium
+    status: implemented
+    rule_id: P-013
 
   - audit_id: NEW-002
     description: "apiRoot variable: default and description MUST match Design Guide values"
@@ -183,8 +188,8 @@ gap_rules:
   - audit_id: NEW-003
     description: "Orphan API definitions: YAML files in code/API_definitions/ not listed in release-plan.yaml"
     target_engine: python
-    priority: low
-    notes: "Compare filenames against release-plan.yaml APIs. Also detect missing files (declared but absent)."
+    status: implemented
+    rule_id: P-019
 
 # ---------------------------------------------------------------------------
 # Fixes needed — implemented rules with incorrect behavior
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index d46f5fa9..410aaf84 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -77,7 +77,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 13
+        assert counts["python"] == 19
         assert counts["spectral"] == 66
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13

From 0aa101cb7c55e80c710ffca1f250e574ed012da3 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 18:33:58 +0200
Subject: [PATCH 058/157] chore(validation): track S-016 fix + implement
 proposed rule changes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

S-016 (schema-type-check): mark as partial — needs recursive rewrite.

Proposed changes (preliminary decisions, tbc in PR):
- S-211 (oas3-unused-component): post-filter downgrade to hint
- S-313 (string-restricted): post-filter downgrade to hint + hint text
- S-015 (schema-casing): custom JS function with CloudEvents exceptions
---
 validation/rules/rule-inventory.yaml | 45 +++++++++++++++++-----------
 1 file changed, 28 insertions(+), 17 deletions(-)

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index c87e4445..357b2a4d 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -191,6 +191,19 @@ gap_rules:
 # ---------------------------------------------------------------------------
 
 fixes:
+  - rule_id: S-016
+    engine_rule: camara-schema-type-check
+    status: partial
+    issue: >
+      Only checks top-level schemas ($.components.schemas.*). Does not recurse
+      into properties, items, or nested schemas. Does not handle combiners
+      (allOf/anyOf/oneOf) as valid alternatives to type.
+    fix: >
+      Rewrite as custom JS function with recursive traversal. Check type
+      presence on all schema nodes, accept combiners as substitute for type,
+      mark combiner children as partial (no type required). Skip $ref nodes.
+      Also recurse into additionalProperties when it is a schema.
+
   - rule_id: P-007
     engine_rule: check-test-file-version
     status: suppressed
@@ -210,19 +223,18 @@ fixes:
 proposed_changes:
   - rule_id: S-211
     engine_rule: oas3-unused-component
-    current_level: warn
-    proposed_level: hint
+    change: Keep Spectral severity (warn), add conditional_level default=hint in metadata
     reason: >
       Spectral does not follow discriminator mappings — schemas referenced
       only via discriminator appear as unused. False positives on subscription
       event schemas (e.g. EventApiSpecific1, EventSubscriptionStarted).
       Unused schemas are cosmetic, not harmful.
-    discuss_in: Commonalities
+    status: done
 
   - rule_id: S-313
     engine_rule: "owasp:api4:2023-string-restricted"
-    current_level: warn
-    proposed_level: hint
+    change: Keep Spectral severity (warn), add conditional_level default=hint + hint text in metadata
+    hint: "Acceptable if free-form field or implementation-dependent — no fix needed."
     reason: >
       Free-text string properties (ErrorInfo.message, CloudEvent.id,
       SubscriptionId, *Description fields) cannot meaningfully have format
@@ -230,19 +242,18 @@ proposed_changes:
       messages). The resource consumption concern is already covered by
       string-limit (maxLength). string-restricted adds noise on fields
       where no actionable fix exists.
-    discuss_in: Commonalities
-
-  - engine_rule: camara-schema-casing-convention
-    rule_id: S-015
-    change: Add overrides for CloudEvents abbreviation prefixes (HTTP, JWT)
+    status: done
+
+  - rule_id: S-015
+    engine_rule: camara-schema-casing-convention
+    change: >
+      Custom JS function replaces built-in casing: pascal. Exact exception
+      list for 4 CloudEvents schema names (HTTPSettings, HTTPSubscriptionRequest,
+      HTTPSubscriptionResponse, PrivateKeyJWTCredential). r4 ruleset only.
     reason: >
-      CloudEvents convention uses uppercase protocol prefixes in schema names
-      (HTTPSettings, HTTPSubscriptionRequest, HTTPSubscriptionResponse).
-      PrivateKeyJWTCredential follows the same pattern. Renaming to HttpSettings
-      etc. would deviate from CloudEvents spec. Overrides for these 4 schemas
-      would suppress false positives while keeping PascalCase enforced for all
-      other schemas.
-    discuss_in: Commonalities
+      CloudEvents convention uses uppercase protocol prefixes in schema names.
+      Renaming would deviate from CloudEvents spec.
+    status: done
 
 # ---------------------------------------------------------------------------
 # Pending rules — in open PRs

From 19ca03f945c3b1b6194b0a1659ff3dc94f20bfa9 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 19:47:36 +0200
Subject: [PATCH 059/157] feat(validation): implement S-211, S-313, S-015
 proposed rule changes
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

S-211 (oas3-unused-component): post-filter downgrade to hint — Spectral
does not follow discriminator mappings, producing false positives.

S-313 (string-restricted): post-filter downgrade to hint — free-text
fields cannot have format/pattern without backward-compatibility risk.

S-015 (schema-casing): custom JS function replaces built-in casing:pascal
in .spectral-r4.yaml. Exact exception list for 4 CloudEvents schema names
(HTTPSettings, HTTPSubscriptionRequest/Response, PrivateKeyJWTCredential).
r3.4 and fallback rulesets unchanged.
---
 linting/config/.spectral-r4.yaml              | 12 ++++++----
 .../camara-schema-casing-convention.js        | 23 +++++++++++++++++++
 validation/rules/spectral-rules.yaml          |  6 +++++
 .../tests/test_rule_metadata_integrity.py     |  4 ++--
 4 files changed, 38 insertions(+), 7 deletions(-)
 create mode 100644 linting/config/lint_function/camara-schema-casing-convention.js

diff --git a/linting/config/.spectral-r4.yaml b/linting/config/.spectral-r4.yaml
index 8571fabb..2392de33 100644
--- a/linting/config/.spectral-r4.yaml
+++ b/linting/config/.spectral-r4.yaml
@@ -23,6 +23,7 @@ functions:
   - camara-reserved-words
   - camara-language-avoid-telco
   - camara-security-no-secrets-in-path-or-query-parameters
+  - camara-schema-casing-convention
 functionsDir: "./lint_function"
 rules:
   #  Built-in OpenAPI Specification ruleset. Each rule then can be enabled individually.
@@ -258,14 +259,15 @@ rules:
     recommended: true  # Set to true/false to enable/disable this rule
 
   camara-schema-casing-convention:
-    description: This rule checks schema should follow a specific case convention pascal case.
-    message: "{{property}} should be pascal case (UppperCamelCase)"
+    description: >
+      Schema names must be PascalCase (UpperCamelCase). CloudEvents schema names
+      (HTTPSettings, HTTPSubscriptionRequest, HTTPSubscriptionResponse,
+      PrivateKeyJWTCredential) are allowed as explicit exceptions.
+    message: "{{error}}"
     severity: warn
     given: $.components.schemas[*]~
     then:
-      function: casing
-      functionOptions:
-        type: pascal
+      function: camara-schema-casing-convention
     recommended: true  # Set to true/false to enable/disable this rule
 
   camara-parameter-casing-convention:
diff --git a/linting/config/lint_function/camara-schema-casing-convention.js b/linting/config/lint_function/camara-schema-casing-convention.js
new file mode 100644
index 00000000..a6f59667
--- /dev/null
+++ b/linting/config/lint_function/camara-schema-casing-convention.js
@@ -0,0 +1,23 @@
+// CAMARA Project - support function for Spectral linter
+// PascalCase check for schema names with exact CloudEvents exceptions.
+// 03.04.2026 - initial version
+
+// Schema names allowed to deviate from PascalCase (CloudEvents convention).
+const ALLOWED = new Set([
+  "HTTPSettings",
+  "HTTPSubscriptionRequest",
+  "HTTPSubscriptionResponse",
+  "PrivateKeyJWTCredential",
+]);
+
+// PascalCase: first char uppercase, each uppercase followed by lowercase/digit
+// or end-of-string. Matches Spectral's built-in casing: pascal behavior.
+const PASCAL = /^[A-Z](?:[a-z0-9]|[A-Z](?=[a-z0-9]|$))*$/;
+
+export default (input) => {
+  if (typeof input !== "string" || ALLOWED.has(input)) return;
+
+  if (!PASCAL.test(input)) {
+    return [{ message: `${input} should be PascalCase (UpperCamelCase)` }];
+  }
+};
diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml
index 1a57f71e..cd448a62 100644
--- a/validation/rules/spectral-rules.yaml
+++ b/validation/rules/spectral-rules.yaml
@@ -123,6 +123,9 @@
 - id: S-211
   engine: spectral
   engine_rule: oas3-unused-component
+  hint: "Spectral does not follow discriminator mappings — verify the schema is truly unused."
+  conditional_level:
+    default: hint
 
 - id: S-212
   engine: spectral
@@ -255,6 +258,9 @@
 - id: S-313
   engine: spectral
   engine_rule: "owasp:api4:2023-string-restricted"
+  hint: "Acceptable if free-form field or implementation-dependent — no fix needed."
+  conditional_level:
+    default: hint
 
 - id: S-314
   engine: spectral
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index d46f5fa9..c3c2fe75 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -306,8 +306,8 @@ def test_hints_are_exception_not_norm(self, all_rules):
         """
         with_hints = [r.id for r in all_rules if r.hint is not None]
         with_overrides = [r.id for r in all_rules if r.message_override is not None]
-        assert len(with_hints) == 7, (
-            f"Expected 7 explicit hints (update test if adding hints): "
+        assert len(with_hints) == 9, (
+            f"Expected 9 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
         assert len(with_overrides) == 0, (

From 60c15600949f5eaab6a804e1c5c6a8d4d55a4377 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 3 Apr 2026 20:58:06 +0200
Subject: [PATCH 060/157] feat(validation): rewrite S-016 schema-type-check as
 recursive function

Replace inline pattern rule with custom JS function in .spectral-r4.yaml.
Recursively checks type presence and value through properties, items,
and additionalProperties. Accepts allOf/anyOf/oneOf as valid alternatives
to type, marks combiner children as partial (no type required).

Original inline rule preserved in r3.4 and fallback rulesets.
---
 linting/config/.spectral-r4.yaml              | 12 ++--
 .../lint_function/camara-schema-type-check.js | 55 +++++++++++++++++++
 validation/rules/rule-inventory.yaml          | 14 ++---
 3 files changed, 68 insertions(+), 13 deletions(-)
 create mode 100644 linting/config/lint_function/camara-schema-type-check.js

diff --git a/linting/config/.spectral-r4.yaml b/linting/config/.spectral-r4.yaml
index 2392de33..55d0d32b 100644
--- a/linting/config/.spectral-r4.yaml
+++ b/linting/config/.spectral-r4.yaml
@@ -24,6 +24,7 @@ functions:
   - camara-language-avoid-telco
   - camara-security-no-secrets-in-path-or-query-parameters
   - camara-schema-casing-convention
+  - camara-schema-type-check
 functionsDir: "./lint_function"
 rules:
   #  Built-in OpenAPI Specification ruleset. Each rule then can be enabled individually.
@@ -282,14 +283,15 @@ rules:
     recommended: true  # Set to true/false to enable/disable this rule
 
   camara-schema-type-check:
-    message: "Invalid type in schema definition."
+    description: >
+      Every schema node must declare a valid type (string, number, integer,
+      boolean, array, object) or use a combiner (allOf, anyOf, oneOf).
+      Recursively checks properties, items, and additionalProperties.
+    message: "{{error}}"
     severity: error
     given: "$.components.schemas.*"
     then:
-      field: type
-      function: pattern
-      functionOptions:
-        match: "^(string|number|integer|boolean|array|object)$"
+      function: camara-schema-type-check
     recommended: true
 
   # ===== OWASP API Security Top 10 2023 =====
diff --git a/linting/config/lint_function/camara-schema-type-check.js b/linting/config/lint_function/camara-schema-type-check.js
new file mode 100644
index 00000000..d01d9063
--- /dev/null
+++ b/linting/config/lint_function/camara-schema-type-check.js
@@ -0,0 +1,55 @@
+// CAMARA Project - support function for Spectral linter
+// Recursive type check for schema definitions with combiner support.
+// 03.04.2026 - initial version
+
+const VALID_TYPES = new Set(["string", "number", "integer", "boolean", "array", "object"]);
+
+export default (schema, _options, context) => {
+  const errors = [];
+
+  function check(node, path, isPartial = false) {
+    if (!node || typeof node !== "object" || node.$ref) return;
+
+    const hasCombiner = node.allOf || node.anyOf || node.oneOf;
+
+    // Only require 'type' if:
+    // - not a partial schema (inside a combiner)
+    // - and no combiner is used as a substitute
+    if (!isPartial && !hasCombiner) {
+      if (!node.type) {
+        errors.push({ message: `Missing 'type' at ${path.join(".")}`, path });
+      } else if (!VALID_TYPES.has(node.type)) {
+        errors.push({ message: `Invalid type '${node.type}' at ${path.join(".")}`, path });
+      }
+    }
+
+    // Recurse into properties
+    if (node.properties) {
+      for (const [key, value] of Object.entries(node.properties)) {
+        check(value, [...path, "properties", key], isPartial);
+      }
+    }
+
+    // Recurse into array items
+    if (node.items) {
+      check(node.items, [...path, "items"], isPartial);
+    }
+
+    // Recurse into additionalProperties when it is a schema
+    if (typeof node.additionalProperties === "object" && node.additionalProperties !== null) {
+      check(node.additionalProperties, [...path, "additionalProperties"], isPartial);
+    }
+
+    // Recurse into combiners — mark children as partial
+    for (const combiner of ["allOf", "anyOf", "oneOf"]) {
+      if (node[combiner]) {
+        node[combiner].forEach((subSchema, i) => {
+          check(subSchema, [...path, combiner, i], true);
+        });
+      }
+    }
+  }
+
+  check(schema, context.path);
+  return errors;
+};
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 357b2a4d..2158fafd 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -193,16 +193,14 @@ gap_rules:
 fixes:
   - rule_id: S-016
     engine_rule: camara-schema-type-check
-    status: partial
+    status: done
     issue: >
-      Only checks top-level schemas ($.components.schemas.*). Does not recurse
-      into properties, items, or nested schemas. Does not handle combiners
-      (allOf/anyOf/oneOf) as valid alternatives to type.
+      Original inline rule only checked top-level schemas. Did not recurse
+      into properties, items, or nested schemas. Did not handle combiners.
     fix: >
-      Rewrite as custom JS function with recursive traversal. Check type
-      presence on all schema nodes, accept combiners as substitute for type,
-      mark combiner children as partial (no type required). Skip $ref nodes.
-      Also recurse into additionalProperties when it is a schema.
+      Replaced with custom JS function in .spectral-r4.yaml. Recursive
+      traversal, combiner-aware, validates type values. Original inline
+      rule preserved in r3.4 and fallback rulesets.
 
   - rule_id: P-007
     engine_rule: check-test-file-version

From 9a2cb43397cb37c75fe3d8fc29fc72165a375c2c Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 7 Apr 2026 17:10:06 +0200
Subject: [PATCH 061/157] feat(validation): add 18 Spectral gap rules (Phase
 2a)

Implement 17 design guide gap rules as 18 Spectral rule definitions
(S-018..S-035) in .spectral-r4.yaml, closing all remaining Spectral
gaps from the design guide audit.

Rules added:
- License: name (S-018), URL value (S-019)
- Info: no contact (S-020)
- Tags: Title Case (S-021)
- apiRoot: default (S-022), description (S-023)
- Responses: 403 required (S-024)
- Error codes: not numeric (S-025), SCREAMING_SNAKE_CASE (S-026, r4.x),
  API_NAME.CODE dot format (S-027)
- Format descriptions: date-time RFC 3339 (S-028), duration (S-029)
- Schema: required properties exist (S-030), array items description
  (S-031, r4.x)
- Subscription: specversion 1.0 (S-032), protocol HTTP (S-033),
  sink HTTPS (S-034), notification content-type (S-035)

5 new custom JS functions for checks that need recursive traversal
or conditional logic beyond built-in Spectral functions.

33 new tests validating all rules fire correctly on non-compliant
specs and stay silent on compliant ones.

786 tests pass (was 751).
---
 linting/config/.spectral-r4.yaml              | 214 +++++++
 .../camara-array-items-description.js         |  41 ++
 .../camara-error-code-dot-format.js           |  17 +
 .../camara-format-description-check.js        |  46 ++
 .../camara-notification-content-type.js       |  14 +
 .../camara-required-properties-exist.js       |  44 ++
 validation/rules/rule-inventory.yaml          |  86 +--
 validation/rules/spectral-rules.yaml          |  87 +++
 .../tests/test_rule_metadata_integrity.py     |   6 +-
 validation/tests/test_spectral_gap_rules.py   | 598 ++++++++++++++++++
 10 files changed, 1111 insertions(+), 42 deletions(-)
 create mode 100644 linting/config/lint_function/camara-array-items-description.js
 create mode 100644 linting/config/lint_function/camara-error-code-dot-format.js
 create mode 100644 linting/config/lint_function/camara-format-description-check.js
 create mode 100644 linting/config/lint_function/camara-notification-content-type.js
 create mode 100644 linting/config/lint_function/camara-required-properties-exist.js
 create mode 100644 validation/tests/test_spectral_gap_rules.py

diff --git a/linting/config/.spectral-r4.yaml b/linting/config/.spectral-r4.yaml
index 55d0d32b..9a6fc151 100644
--- a/linting/config/.spectral-r4.yaml
+++ b/linting/config/.spectral-r4.yaml
@@ -11,6 +11,9 @@
 # - 21.07.2025: Added camara-schema-type-check rule
 # - 12.01.2026: camara-discriminator-use deprecated
 # - 03.04.2026: Added OWASP API Security Top 10 2023 rules (Linting-rules.md section 5)
+# - 07.04.2026: Added Phase 2a gap rules (S-018..S-035): license, contact, tags,
+#   apiRoot, 403, error codes, subscription, format descriptions, required props,
+#   array items description, notification content-type
 
 
 # Note: @stoplight/spectral-owasp-ruleset is installed via validation/package.json.
@@ -25,6 +28,11 @@ functions:
   - camara-security-no-secrets-in-path-or-query-parameters
   - camara-schema-casing-convention
   - camara-schema-type-check
+  - camara-format-description-check
+  - camara-error-code-dot-format
+  - camara-required-properties-exist
+  - camara-array-items-description
+  - camara-notification-content-type
 functionsDir: "./lint_function"
 rules:
   #  Built-in OpenAPI Specification ruleset. Each rule then can be enabled individually.
@@ -294,6 +302,212 @@ rules:
       function: camara-schema-type-check
     recommended: true
 
+  # ===== Phase 2a Gap Rules (Design Guide + Subscription Guide) =====
+
+  # --- Group A: Simple field checks ---
+
+  camara-license-name:
+    description: "info.license.name MUST be 'Apache 2.0'."
+    message: "License name must be exactly 'Apache 2.0', got '{{value}}'."
+    severity: error
+    given: $.info.license.name
+    then:
+      function: pattern
+      functionOptions:
+        match: "^Apache 2\\.0$"
+    recommended: true
+
+  camara-license-url-value:
+    description: "info.license.url MUST be the Apache 2.0 license URL."
+    message: "License URL must be 'https://www.apache.org/licenses/LICENSE-2.0.html', got '{{value}}'."
+    severity: error
+    given: $.info.license.url
+    then:
+      function: pattern
+      functionOptions:
+        match: "^https://www\\.apache\\.org/licenses/LICENSE-2\\.0\\.html$"
+    recommended: true
+
+  camara-no-contact:
+    description: "info.contact MUST NOT be present in CAMARA API specifications."
+    message: "info.contact must not be present. CAMARA APIs do not use individual contact information."
+    severity: warn
+    given: $.info
+    then:
+      field: contact
+      function: falsy
+    recommended: true
+
+  camara-tag-name-title-case:
+    description: "Tag names SHOULD follow Title Case convention (each word capitalized, separated by spaces)."
+    message: "Tag name '{{value}}' is not Title Case. Use e.g. 'Quality On Demand' instead of 'quality-on-demand'."
+    severity: hint
+    given: $.tags[*].name
+    then:
+      function: pattern
+      functionOptions:
+        match: "^[A-Z][a-zA-Z0-9]*(\\s[A-Z][a-zA-Z0-9]*)*$"
+    recommended: true
+
+  camara-api-root-default:
+    description: "apiRoot variable default SHOULD be 'http://localhost:9091'."
+    message: "apiRoot default should be 'http://localhost:9091', got '{{value}}'."
+    severity: hint
+    given: $.servers[*].variables.apiRoot.default
+    then:
+      function: pattern
+      functionOptions:
+        match: "^http://localhost:9091$"
+    recommended: true
+
+  camara-api-root-description:
+    description: "apiRoot variable description SHOULD match the standard CAMARA text."
+    message: "apiRoot description does not match the standard CAMARA text."
+    severity: hint
+    given: $.servers[*].variables.apiRoot.description
+    then:
+      function: pattern
+      functionOptions:
+        match: "^API root, defined by the service provider, e\\.g\\. `api\\.example\\.com` or `api\\.example\\.com/somepath`$"
+    recommended: true
+
+  camara-response-403:
+    description: "All API operations MUST document a 403 Forbidden response."
+    message: "Operation is missing a 403 response definition."
+    severity: warn
+    given: "$.paths[*][get,put,post,delete,patch].responses"
+    then:
+      field: "403"
+      function: truthy
+    recommended: true
+
+  # --- Group B: Error code checks ---
+
+  camara-error-code-not-numeric:
+    description: "Error code enum values MUST NOT be purely numeric."
+    message: "Error code '{{value}}' must not be numeric. Use descriptive string codes like INVALID_ARGUMENT."
+    severity: error
+    given:
+      - "$.paths.*.*.responses.*.content.*.schema.allOf[*].properties.code.enum[*]"
+      - "$.components.responses.*.content.*.schema.allOf[*].properties.code.enum[*]"
+    then:
+      function: pattern
+      functionOptions:
+        notMatch: "^\\d+$"
+    recommended: true
+
+  camara-error-code-screaming-snake-case:
+    description: "Error code enum values MUST be SCREAMING_SNAKE_CASE, optionally with API_NAME prefix separated by dot."
+    message: "Error code '{{value}}' is not SCREAMING_SNAKE_CASE (optionally API_NAME.CODE)."
+    severity: warn
+    given:
+      - "$.paths.*.*.responses.*.content.*.schema.allOf[*].properties.code.enum[*]"
+      - "$.components.responses.*.content.*.schema.allOf[*].properties.code.enum[*]"
+    then:
+      function: pattern
+      functionOptions:
+        match: "^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*(\\.[A-Z][A-Z0-9]*(_[A-Z0-9]+)*)?$"
+    recommended: true
+
+  camara-error-code-api-specific-format:
+    description: "API-specific error codes with dots MUST follow API_NAME.SPECIFIC_CODE format."
+    message: "{{error}}"
+    severity: warn
+    given:
+      - "$.paths.*.*.responses.*.content.*.schema.allOf[*].properties.code.enum[*]"
+      - "$.components.responses.*.content.*.schema.allOf[*].properties.code.enum[*]"
+    then:
+      function: camara-error-code-dot-format
+    recommended: true
+
+  # --- Group C: Subscription schema checks ---
+
+  camara-cloudevent-specversion:
+    description: "CloudEvent specversion MUST be constrained to enum ['1.0']."
+    message: "specversion must be '1.0', got '{{value}}'."
+    severity: hint
+    given: "$.components.schemas.*.properties.specversion.enum[0]"
+    then:
+      function: pattern
+      functionOptions:
+        match: "^1\\.0$"
+    recommended: true
+
+  camara-subscription-protocol-http:
+    description: "Protocol enum MUST contain only 'HTTP' (only HTTP is allowed for now)."
+    message: "Protocol enum should contain only 'HTTP'. Found '{{value}}'."
+    severity: hint
+    given: "$.components.schemas.Protocol.enum[*]"
+    then:
+      function: pattern
+      functionOptions:
+        match: "^HTTP$"
+    recommended: true
+
+  camara-subscription-sink-https:
+    description: "The 'sink' property MUST have a pattern enforcing HTTPS."
+    message: "sink property must have a pattern enforcing HTTPS (e.g. '^https:\\/\\/.+$')."
+    severity: warn
+    given: "$.components.schemas.*.properties.sink.pattern"
+    then:
+      function: pattern
+      functionOptions:
+        match: "https"
+    recommended: true
+
+  camara-notification-content-type:
+    description: "Notification callback request body MUST use 'application/cloudevents+json' content type."
+    message: "{{error}}"
+    severity: warn
+    given: "$.paths.*.*.callbacks.*.*.post.requestBody.content"
+    then:
+      function: camara-notification-content-type
+    recommended: true
+
+  # --- Group D: Custom JS function rules ---
+
+  camara-datetime-rfc3339-description:
+    description: "Properties with format 'date-time' MUST have a description mentioning RFC 3339."
+    message: "{{error}}"
+    severity: hint
+    given: "$.components.schemas.*"
+    then:
+      function: camara-format-description-check
+      functionOptions:
+        format: "date-time"
+        requiredText: "RFC\\s*3339"
+    recommended: true
+
+  camara-duration-rfc3339-description:
+    description: "Properties with format 'duration' MUST have a description mentioning RFC 3339."
+    message: "{{error}}"
+    severity: hint
+    given: "$.components.schemas.*"
+    then:
+      function: camara-format-description-check
+      functionOptions:
+        format: "duration"
+        requiredText: "RFC\\s*3339"
+    recommended: true
+
+  camara-required-properties-exist:
+    description: "Every property listed in 'required' MUST be defined in 'properties'."
+    message: "{{error}}"
+    severity: warn
+    given: "$.components.schemas.*"
+    then:
+      function: camara-required-properties-exist
+    recommended: true
+
+  camara-array-items-description:
+    description: "Array 'items' MUST have a description when defined inline (not $ref)."
+    message: "{{error}}"
+    severity: warn
+    given: "$.components.schemas.*"
+    then:
+      function: camara-array-items-description
+    recommended: true
+
   # ===== OWASP API Security Top 10 2023 =====
   # Source: Commonalities Linting-rules.md section 5
   # Severity overrides per CAMARA agreement (Commonalities #539, #548, #551, #552)
diff --git a/linting/config/lint_function/camara-array-items-description.js b/linting/config/lint_function/camara-array-items-description.js
new file mode 100644
index 00000000..51981e9e
--- /dev/null
+++ b/linting/config/lint_function/camara-array-items-description.js
@@ -0,0 +1,41 @@
+// CAMARA Project - support function for Spectral linter
+// Checks that inline array 'items' schemas have a 'description' field.
+// Items that are $ref are skipped (the target schema should have its
+// own description).
+
+export default (schema, _options, context) => {
+  const errors = [];
+
+  function check(node, path) {
+    if (!node || typeof node !== "object" || node.$ref) return;
+
+    if (node.type === "array" && node.items) {
+      if (typeof node.items === "object" && !node.items.$ref && !node.items.description) {
+        errors.push({
+          message: `Array items must have a description`,
+          path: [...path, "items"]
+        });
+      }
+    }
+
+    if (node.properties) {
+      for (const [key, value] of Object.entries(node.properties)) {
+        check(value, [...path, "properties", key]);
+      }
+    }
+    if (node.items && typeof node.items === "object" && !node.items.$ref) {
+      check(node.items, [...path, "items"]);
+    }
+    if (typeof node.additionalProperties === "object" && node.additionalProperties !== null) {
+      check(node.additionalProperties, [...path, "additionalProperties"]);
+    }
+    for (const combiner of ["allOf", "anyOf", "oneOf"]) {
+      if (node[combiner]) {
+        node[combiner].forEach((sub, i) => check(sub, [...path, combiner, i]));
+      }
+    }
+  }
+
+  check(schema, context.path);
+  return errors;
+};
diff --git a/linting/config/lint_function/camara-error-code-dot-format.js b/linting/config/lint_function/camara-error-code-dot-format.js
new file mode 100644
index 00000000..d660f6bc
--- /dev/null
+++ b/linting/config/lint_function/camara-error-code-dot-format.js
@@ -0,0 +1,17 @@
+// CAMARA Project - support function for Spectral linter
+// Validates that error codes containing a dot follow the
+// API_NAME.SPECIFIC_CODE format (both segments in SCREAMING_SNAKE_CASE).
+// Non-dot codes are silently skipped (they are common codes).
+
+const DOT_FORMAT = /^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*\.[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$/;
+
+export default (input) => {
+  if (typeof input !== "string") return;
+  if (!input.includes(".")) return;
+
+  if (!DOT_FORMAT.test(input)) {
+    return [{
+      message: `API-specific error code '${input}' must follow API_NAME.SPECIFIC_CODE format (SCREAMING_SNAKE_CASE on both sides of the dot)`
+    }];
+  }
+};
diff --git a/linting/config/lint_function/camara-format-description-check.js b/linting/config/lint_function/camara-format-description-check.js
new file mode 100644
index 00000000..885477b3
--- /dev/null
+++ b/linting/config/lint_function/camara-format-description-check.js
@@ -0,0 +1,46 @@
+// CAMARA Project - support function for Spectral linter
+// Checks that properties with a specific format have descriptions
+// containing required text (e.g. date-time must mention RFC 3339).
+//
+// Options:
+//   format:       the format value to match (e.g. "date-time", "duration")
+//   requiredText: regex pattern to search for in description (e.g. "RFC\\s*3339")
+
+export default (schema, options, context) => {
+  const errors = [];
+  const { format, requiredText } = options;
+  const re = new RegExp(requiredText, "i");
+
+  function check(node, path) {
+    if (!node || typeof node !== "object" || node.$ref) return;
+
+    if (node.format === format) {
+      if (!node.description || !re.test(node.description)) {
+        errors.push({
+          message: `Property with format '${format}' must have a description mentioning ${requiredText.replace(/\\\\/g, "\\")}`,
+          path
+        });
+      }
+    }
+
+    if (node.properties) {
+      for (const [key, value] of Object.entries(node.properties)) {
+        check(value, [...path, "properties", key]);
+      }
+    }
+    if (node.items && typeof node.items === "object" && !node.items.$ref) {
+      check(node.items, [...path, "items"]);
+    }
+    if (typeof node.additionalProperties === "object" && node.additionalProperties !== null) {
+      check(node.additionalProperties, [...path, "additionalProperties"]);
+    }
+    for (const combiner of ["allOf", "anyOf", "oneOf"]) {
+      if (node[combiner]) {
+        node[combiner].forEach((sub, i) => check(sub, [...path, combiner, i]));
+      }
+    }
+  }
+
+  check(schema, context.path);
+  return errors;
+};
diff --git a/linting/config/lint_function/camara-notification-content-type.js b/linting/config/lint_function/camara-notification-content-type.js
new file mode 100644
index 00000000..87b49537
--- /dev/null
+++ b/linting/config/lint_function/camara-notification-content-type.js
@@ -0,0 +1,14 @@
+// CAMARA Project - support function for Spectral linter
+// Checks that callback POST requestBody content includes the
+// 'application/cloudevents+json' media type key.
+
+export default (content) => {
+  if (!content || typeof content !== "object") return;
+
+  const keys = Object.keys(content);
+  if (!keys.includes("application/cloudevents+json")) {
+    return [{
+      message: `Notification callback content type must include 'application/cloudevents+json', found: ${keys.join(", ") || "(empty)"}`
+    }];
+  }
+};
diff --git a/linting/config/lint_function/camara-required-properties-exist.js b/linting/config/lint_function/camara-required-properties-exist.js
new file mode 100644
index 00000000..fcea4a63
--- /dev/null
+++ b/linting/config/lint_function/camara-required-properties-exist.js
@@ -0,0 +1,44 @@
+// CAMARA Project - support function for Spectral linter
+// Checks that every entry in a schema's 'required' array has a
+// corresponding key in 'properties'. Only checks nodes where both
+// 'required' and 'properties' coexist (avoids false positives on
+// allOf partial fragments that have 'required' without 'properties').
+
+export default (schema, _options, context) => {
+  const errors = [];
+
+  function check(node, path) {
+    if (!node || typeof node !== "object" || node.$ref) return;
+
+    if (Array.isArray(node.required) && node.properties) {
+      for (const name of node.required) {
+        if (!(name in node.properties)) {
+          errors.push({
+            message: `Required property '${name}' is not defined in 'properties'`,
+            path: [...path, "required"]
+          });
+        }
+      }
+    }
+
+    if (node.properties) {
+      for (const [key, value] of Object.entries(node.properties)) {
+        check(value, [...path, "properties", key]);
+      }
+    }
+    if (node.items && typeof node.items === "object" && !node.items.$ref) {
+      check(node.items, [...path, "items"]);
+    }
+    if (typeof node.additionalProperties === "object" && node.additionalProperties !== null) {
+      check(node.additionalProperties, [...path, "additionalProperties"]);
+    }
+    for (const combiner of ["allOf", "anyOf", "oneOf"]) {
+      if (node[combiner]) {
+        node[combiner].forEach((sub, i) => check(sub, [...path, combiner, i]));
+      }
+    }
+  }
+
+  check(schema, context.path);
+  return errors;
+};
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 3ff4ef86..7a0bc330 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -11,16 +11,16 @@
 #   pending     — in open PRs, not yet merged
 
 version: 1
-generated: 2026-04-03
+generated: 2026-04-07
 
 summary:
-  total_implemented: 123
-  total_gap: 17
+  total_implemented: 141
+  total_gap: 0
   total_manual: 25
   total_pending: 0
   total_tested: 0
   by_engine:
-    spectral: 66
+    spectral: 84
     gherkin: 25
     python: 19
     yamllint: 13
@@ -38,94 +38,102 @@ summary:
 # Source: private-dev-docs/validation-framework/reviews/commonalities-design-guide-audit.md
 
 gap_rules:
-  # Spectral gaps (new rules needed)
+  # Spectral gaps — implemented in Phase 2a
   - audit_id: DG-003
     description: date-time description RFC 3339 format
     target_engine: spectral
-    priority: low
+    status: implemented
+    rule_id: S-028
 
   - audit_id: DG-004
     description: duration description RFC 3339 format
     target_engine: spectral
-    priority: low
+    status: implemented
+    rule_id: S-029
 
   - audit_id: DG-008
     description: "Object: required properties MUST exist in properties"
     target_engine: spectral
-    priority: medium
+    status: implemented
+    rule_id: S-030
 
   - audit_id: DG-013
     description: Error code MUST NOT be numeric
     target_engine: spectral
-    priority: medium
+    status: implemented
+    rule_id: S-025
 
   - audit_id: DG-014
     description: Error code MUST be SCREAMING_SNAKE_CASE (r4.x)
     target_engine: spectral
-    priority: medium
-    notes: commonalities_release >=r4.0
-
-  - audit_id: DG-026
-    description: "info.license.name MUST be 'Apache 2.0'"
-    target_engine: spectral
-    priority: medium
-    notes: Static value check, previously v0_6 only (V6-005)
-
-  - audit_id: DG-027
-    description: info.license.url MUST be Apache License URL
-    target_engine: spectral
-    priority: medium
-    notes: Value check (not just presence). Spectral license-url only checks existence. Previously v0_6 (V6-006)
+    status: implemented
+    rule_id: S-026
 
   - audit_id: DG-015
     description: "API-specific error: API_NAME.SPECIFIC_CODE format"
     target_engine: spectral
-    priority: medium
+    status: implemented
+    rule_id: S-027
 
   - audit_id: DG-017
     description: All APIs MUST document 403 response
     target_engine: spectral
-    priority: medium
+    status: implemented
+    rule_id: S-024
+
+  - audit_id: DG-026
+    description: "info.license.name MUST be 'Apache 2.0'"
+    target_engine: spectral
+    status: implemented
+    rule_id: S-018
+
+  - audit_id: DG-027
+    description: info.license.url MUST be Apache License URL
+    target_engine: spectral
+    status: implemented
+    rule_id: S-019
 
   - audit_id: DG-032
     description: info.contact MUST be absent
     target_engine: spectral
-    priority: low
+    status: implemented
+    rule_id: S-020
 
   - audit_id: DG-041
     description: Tag names Title Case convention
     target_engine: spectral
-    priority: low
+    status: implemented
+    rule_id: S-021
 
   - audit_id: DG-058
     description: Array items MUST have description (r4.x)
     target_engine: spectral
-    priority: medium
-    notes: commonalities_release >=r4.0
+    status: implemented
+    rule_id: S-031
 
   - audit_id: DG-087
     description: "specversion MUST be '1.0' (subscription)"
     target_engine: spectral
-    priority: low
-    notes: api_pattern subscription only
+    status: implemented
+    rule_id: S-032
 
   - audit_id: DG-090
     description: "protocol MUST be 'HTTP' (subscription)"
     target_engine: spectral
-    priority: low
-    notes: api_pattern subscription only
+    status: implemented
+    rule_id: S-033
 
   - audit_id: DG-091
     description: sink MUST use HTTPS (subscription)
     target_engine: spectral
-    priority: low
-    notes: api_pattern subscription only
+    status: implemented
+    rule_id: S-034
 
   - audit_id: DG-094
     description: Notification content-type cloudevents+json (subscription)
     target_engine: spectral
-    priority: low
-    notes: api_pattern subscription only
+    status: implemented
+    rule_id: S-035
 
   # Python gaps — implemented in Phase 2b
   - audit_id: DG-011
@@ -182,8 +190,8 @@ gap_rules:
   - audit_id: NEW-002
     description: "apiRoot variable: default and description MUST match Design Guide values"
     target_engine: spectral
-    priority: low
-    notes: "Hint level. Standard values: default 'http://localhost:9091', description 'API root, defined by the service provider, e.g. `api.example.com` or `api.example.com/somepath`'"
+    status: implemented
+    rule_id: S-022, S-023
 
   - audit_id: NEW-003
     description: "Orphan API definitions: YAML files in code/API_definitions/ not listed in release-plan.yaml"
diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml
index cd448a62..33d4343b 100644
--- a/validation/rules/spectral-rules.yaml
+++ b/validation/rules/spectral-rules.yaml
@@ -74,6 +74,93 @@
   engine: spectral
   engine_rule: camara-security-no-secrets-in-path-or-query-parameters
 
+# ===== Phase 2a gap rules (S-018+) =====
+
+- id: S-018
+  engine: spectral
+  engine_rule: camara-license-name
+
+- id: S-019
+  engine: spectral
+  engine_rule: camara-license-url-value
+
+- id: S-020
+  engine: spectral
+  engine_rule: camara-no-contact
+
+- id: S-021
+  engine: spectral
+  engine_rule: camara-tag-name-title-case
+
+- id: S-022
+  engine: spectral
+  engine_rule: camara-api-root-default
+
+- id: S-023
+  engine: spectral
+  engine_rule: camara-api-root-description
+
+- id: S-024
+  engine: spectral
+  engine_rule: camara-response-403
+  hint: "All operations must document a 403 Forbidden response (CAMARA Design Guide section 3.2)."
+
+- id: S-025
+  engine: spectral
+  engine_rule: camara-error-code-not-numeric
+
+- id: S-026
+  engine: spectral
+  engine_rule: camara-error-code-screaming-snake-case
+  applicability:
+    commonalities_release: ">=r4.0"
+
+- id: S-027
+  engine: spectral
+  engine_rule: camara-error-code-api-specific-format
+
+- id: S-028
+  engine: spectral
+  engine_rule: camara-datetime-rfc3339-description
+
+- id: S-029
+  engine: spectral
+  engine_rule: camara-duration-rfc3339-description
+
+- id: S-030
+  engine: spectral
+  engine_rule: camara-required-properties-exist
+
+- id: S-031
+  engine: spectral
+  engine_rule: camara-array-items-description
+  applicability:
+    commonalities_release: ">=r4.0"
+
+- id: S-032
+  engine: spectral
+  engine_rule: camara-cloudevent-specversion
+  applicability:
+    api_pattern: [implicit-subscription, explicit-subscription]
+
+- id: S-033
+  engine: spectral
+  engine_rule: camara-subscription-protocol-http
+  applicability:
+    api_pattern: [implicit-subscription, explicit-subscription]
+
+- id: S-034
+  engine: spectral
+  engine_rule: camara-subscription-sink-https
+  applicability:
+    api_pattern: [implicit-subscription, explicit-subscription]
+
+- id: S-035
+  engine: spectral
+  engine_rule: camara-notification-content-type
+  applicability:
+    api_pattern: [implicit-subscription, explicit-subscription]
+
 # ===== Built-in OAS rules (S-200+) =====
 
 - id: S-200
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index 53b0d659..55008ac4 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -78,7 +78,7 @@ def test_expected_rule_counts(self, all_rules):
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
         assert counts["python"] == 19
-        assert counts["spectral"] == 66
+        assert counts["spectral"] == 84
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13
 
@@ -306,8 +306,8 @@ def test_hints_are_exception_not_norm(self, all_rules):
         """
         with_hints = [r.id for r in all_rules if r.hint is not None]
         with_overrides = [r.id for r in all_rules if r.message_override is not None]
-        assert len(with_hints) == 9, (
-            f"Expected 9 explicit hints (update test if adding hints): "
+        assert len(with_hints) == 10, (
+            f"Expected 10 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
         assert len(with_overrides) == 0, (
diff --git a/validation/tests/test_spectral_gap_rules.py b/validation/tests/test_spectral_gap_rules.py
new file mode 100644
index 00000000..972c43f1
--- /dev/null
+++ b/validation/tests/test_spectral_gap_rules.py
@@ -0,0 +1,598 @@
+"""Tests for Phase 2a Spectral gap rules (S-018..S-035).
+
+Tests create minimal OpenAPI YAML fixtures, run Spectral with the r4 ruleset,
+and verify that expected rules fire (or don't fire) on them.  Each test targets
+a specific rule by checking for its rule code in the Spectral JSON output.
+
+Requires: Node.js + Spectral CLI (installed via validation/package.json).
+"""
+
+from __future__ import annotations
+
+import json
+import subprocess
+import tempfile
+from pathlib import Path
+
+import pytest
+
+# ---------------------------------------------------------------------------
+# Paths & helpers
+# ---------------------------------------------------------------------------
+
+_REPO_ROOT = Path(__file__).resolve().parent.parent.parent
+_RULESET = _REPO_ROOT / "linting" / "config" / ".spectral-r4.yaml"
+_NODE_MODULES = _REPO_ROOT / "validation" / "node_modules"
+
+
+def _run_spectral(yaml_content: str) -> list[dict]:
+    """Write *yaml_content* to a temp file, lint it with Spectral, return findings."""
+    with tempfile.NamedTemporaryFile(suffix=".yaml", mode="w", delete=False) as f:
+        f.write(yaml_content)
+        f.flush()
+        tmp_path = f.name
+
+    env = {
+        "PATH": subprocess.os.environ.get("PATH", ""),
+        "NODE_PATH": str(_NODE_MODULES),
+        "HOME": subprocess.os.environ.get("HOME", ""),
+    }
+    result = subprocess.run(
+        [
+            "node",
+            str(_NODE_MODULES / ".bin" / "spectral"),
+            "lint",
+            tmp_path,
+            "-r", str(_RULESET),
+            "--format", "json",
+        ],
+        capture_output=True,
+        text=True,
+        env=env,
+        timeout=30,
+    )
+    Path(tmp_path).unlink(missing_ok=True)
+    if result.stdout.strip():
+        return json.loads(result.stdout)
+    return []
+
+
+def _codes(findings: list[dict]) -> set[str]:
+    """Extract the set of rule codes from Spectral findings."""
+    return {f["code"] for f in findings}
+
+
+def _findings_for(findings: list[dict], code: str) -> list[dict]:
+    """Filter findings to a specific rule code."""
+    return [f for f in findings if f["code"] == code]
+
+
+# ---------------------------------------------------------------------------
+# Minimal valid spec (passes all rules)
+# ---------------------------------------------------------------------------
+
+_VALID_SPEC = """\
+openapi: 3.0.3
+info:
+  title: Test API
+  description: A test API
+  version: wip
+  license:
+    name: Apache 2.0
+    url: https://www.apache.org/licenses/LICENSE-2.0.html
+  x-camara-commonalities: 0.7.0
+externalDocs:
+  description: Product documentation at CAMARA
+  url: https://github.com/camaraproject/TestAPI
+servers:
+  - url: "{apiRoot}/test-api/vwip"
+    variables:
+      apiRoot:
+        default: http://localhost:9091
+        description: "API root, defined by the service provider, e.g. `api.example.com` or `api.example.com/somepath`"
+tags:
+  - name: Test API
+security:
+  - openId:
+    - test-api:read
+paths:
+  /test:
+    get:
+      tags:
+        - Test API
+      summary: Get test
+      description: Get test description
+      operationId: getTest
+      responses:
+        "200":
+          description: OK
+        "401":
+          description: Unauthorized
+          content:
+            application/json:
+              schema:
+                allOf:
+                  - $ref: "#/components/schemas/ErrorInfo"
+                  - type: object
+                    properties:
+                      code:
+                        enum:
+                          - UNAUTHENTICATED
+        "403":
+          description: Forbidden
+          content:
+            application/json:
+              schema:
+                allOf:
+                  - $ref: "#/components/schemas/ErrorInfo"
+                  - type: object
+                    properties:
+                      code:
+                        enum:
+                          - PERMISSION_DENIED
+components:
+  securitySchemes:
+    openId:
+      type: openIdConnect
+      openIdConnectUrl: https://example.com/.well-known/openid-configuration
+  schemas:
+    ErrorInfo:
+      type: object
+      required:
+        - status
+        - code
+        - message
+      properties:
+        status:
+          type: integer
+          format: int32
+          minimum: 100
+          maximum: 599
+          description: HTTP response status code
+        code:
+          type: string
+          maxLength: 96
+          description: A human-readable code to describe the error
+        message:
+          type: string
+          maxLength: 512
+          description: A human-readable description of what the event represents
+"""
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture(scope="module")
+def valid_findings():
+    """Findings from the minimal valid spec — baseline for 'no false positives'."""
+    return _run_spectral(_VALID_SPEC)
+
+
+class TestGroupA:
+    """Group A: Simple field checks."""
+
+    def test_valid_spec_no_license_findings(self, valid_findings):
+        codes = _codes(valid_findings)
+        assert "camara-license-name" not in codes
+        assert "camara-license-url-value" not in codes
+
+    def test_license_name_wrong(self):
+        spec = _VALID_SPEC.replace("name: Apache 2.0", "name: MIT")
+        findings = _run_spectral(spec)
+        assert "camara-license-name" in _codes(findings)
+
+    def test_license_url_wrong(self):
+        spec = _VALID_SPEC.replace(
+            "url: https://www.apache.org/licenses/LICENSE-2.0.html",
+            "url: https://opensource.org/licenses/MIT",
+        )
+        findings = _run_spectral(spec)
+        assert "camara-license-url-value" in _codes(findings)
+
+    def test_no_contact_passes(self, valid_findings):
+        assert "camara-no-contact" not in _codes(valid_findings)
+
+    def test_contact_present_fails(self):
+        spec = _VALID_SPEC.replace(
+            "  x-camara-commonalities: 0.7.0",
+            "  contact:\n    name: Foo\n  x-camara-commonalities: 0.7.0",
+        )
+        findings = _run_spectral(spec)
+        assert "camara-no-contact" in _codes(findings)
+
+    def test_tag_title_case_passes(self, valid_findings):
+        assert "camara-tag-name-title-case" not in _codes(valid_findings)
+
+    def test_tag_title_case_fails(self):
+        spec = _VALID_SPEC.replace("name: Test API", "name: test api")
+        findings = _run_spectral(spec)
+        assert "camara-tag-name-title-case" in _codes(findings)
+
+    def test_api_root_default_passes(self, valid_findings):
+        assert "camara-api-root-default" not in _codes(valid_findings)
+
+    def test_api_root_default_fails(self):
+        spec = _VALID_SPEC.replace(
+            "default: http://localhost:9091",
+            "default: http://localhost:8080",
+        )
+        findings = _run_spectral(spec)
+        assert "camara-api-root-default" in _codes(findings)
+
+    def test_api_root_description_passes(self, valid_findings):
+        assert "camara-api-root-description" not in _codes(valid_findings)
+
+    def test_response_403_passes(self, valid_findings):
+        assert "camara-response-403" not in _codes(valid_findings)
+
+    def test_response_403_missing(self):
+        # Remove the 403 response block
+        spec = _VALID_SPEC.replace(
+            '        "403":\n'
+            "          description: Forbidden\n"
+            "          content:\n"
+            "            application/json:\n"
+            "              schema:\n"
+            "                allOf:\n"
+            '                  - $ref: "#/components/schemas/ErrorInfo"\n'
+            "                  - type: object\n"
+            "                    properties:\n"
+            "                      code:\n"
+            "                        enum:\n"
+            "                          - PERMISSION_DENIED",
+            "",
+        )
+        findings = _run_spectral(spec)
+        assert "camara-response-403" in _codes(findings)
+
+
+class TestGroupB:
+    """Group B: Error code checks."""
+
+    def test_valid_error_codes_pass(self, valid_findings):
+        codes = _codes(valid_findings)
+        assert "camara-error-code-not-numeric" not in codes
+        assert "camara-error-code-screaming-snake-case" not in codes
+        assert "camara-error-code-api-specific-format" not in codes
+
+    def test_numeric_error_code_fails(self):
+        # Must quote the value so YAML parses it as a string, not integer
+        spec = _VALID_SPEC.replace("- UNAUTHENTICATED", '- "401"')
+        findings = _run_spectral(spec)
+        assert "camara-error-code-not-numeric" in _codes(findings)
+
+    def test_non_screaming_snake_case_fails(self):
+        spec = _VALID_SPEC.replace("- UNAUTHENTICATED", "- unauthenticated")
+        findings = _run_spectral(spec)
+        assert "camara-error-code-screaming-snake-case" in _codes(findings)
+
+    def test_api_specific_code_valid(self):
+        spec = _VALID_SPEC.replace(
+            "- PERMISSION_DENIED", "- TEST_API.PERMISSION_DENIED"
+        )
+        findings = _run_spectral(spec)
+        assert "camara-error-code-api-specific-format" not in _codes(findings)
+
+    def test_api_specific_code_bad_format(self):
+        spec = _VALID_SPEC.replace(
+            "- PERMISSION_DENIED", "- test.permission_denied"
+        )
+        findings = _run_spectral(spec)
+        assert "camara-error-code-api-specific-format" in _codes(findings)
+
+
+class TestGroupC:
+    """Group C: Subscription schema checks."""
+
+    _SUBSCRIPTION_SPEC = """\
+    openapi: 3.0.3
+    info:
+      title: Test Subscriptions
+      description: Test
+      version: wip
+      license:
+        name: Apache 2.0
+        url: https://www.apache.org/licenses/LICENSE-2.0.html
+      x-camara-commonalities: 0.7.0
+    externalDocs:
+      description: Product documentation at CAMARA
+      url: https://github.com/camaraproject/TestAPI
+    servers:
+      - url: "{apiRoot}/test-subscriptions/vwip"
+        variables:
+          apiRoot:
+            default: http://localhost:9091
+            description: "API root, defined by the service provider, e.g. `api.example.com` or `api.example.com/somepath`"
+    tags:
+      - name: Test Subscription
+    security:
+      - openId:
+        - test:read
+    paths:
+      /subscriptions:
+        post:
+          tags:
+            - Test Subscription
+          summary: Create subscription
+          description: Create a subscription
+          operationId: createSubscription
+          requestBody:
+            required: true
+            content:
+              application/json:
+                schema:
+                  $ref: "#/components/schemas/SubscriptionRequest"
+          callbacks:
+            notifications:
+              "{$request.body#/sink}":
+                post:
+                  summary: Notification callback
+                  description: Notification callback
+                  operationId: postNotification
+                  requestBody:
+                    required: true
+                    content:
+                      application/cloudevents+json:
+                        schema:
+                          $ref: "#/components/schemas/CloudEvent"
+                  responses:
+                    "204":
+                      description: No Content
+                  security:
+                    - {}
+          responses:
+            "201":
+              description: Created
+            "401":
+              description: Unauthorized
+            "403":
+              description: Forbidden
+    components:
+      securitySchemes:
+        openId:
+          type: openIdConnect
+          openIdConnectUrl: https://example.com/.well-known/openid-configuration
+      schemas:
+        Protocol:
+          type: string
+          enum:
+            - HTTP
+          description: Delivery protocol
+        SubscriptionRequest:
+          type: object
+          required:
+            - sink
+            - protocol
+          properties:
+            protocol:
+              $ref: "#/components/schemas/Protocol"
+            sink:
+              type: string
+              format: uri
+              maxLength: 2048
+              pattern: "^https:\\\\/\\\\/.+$"
+              description: The address to which events shall be delivered
+        CloudEvent:
+          type: object
+          required:
+            - id
+            - source
+            - specversion
+            - type
+            - time
+          properties:
+            id:
+              type: string
+              description: Event identifier
+              minLength: 1
+            source:
+              type: string
+              format: uri-reference
+              minLength: 1
+              description: Event source
+            type:
+              type: string
+              description: Event type
+              minLength: 1
+            specversion:
+              type: string
+              description: CloudEvents version
+              enum:
+                - "1.0"
+            datacontenttype:
+              type: string
+              description: Content type
+              enum:
+                - application/json
+            time:
+              type: string
+              format: date-time
+              description: "Timestamp. It must follow [RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) and must have time zone."
+            data:
+              type: object
+              description: Event payload
+    """
+
+    def test_specversion_valid(self):
+        findings = _run_spectral(self._SUBSCRIPTION_SPEC)
+        assert "camara-cloudevent-specversion" not in _codes(findings)
+
+    def test_specversion_wrong(self):
+        spec = self._SUBSCRIPTION_SPEC.replace(
+            'enum:\n                - "1.0"',
+            'enum:\n                - "2.0"',
+        )
+        findings = _run_spectral(spec)
+        assert "camara-cloudevent-specversion" in _codes(findings)
+
+    def test_protocol_http_only_passes(self):
+        findings = _run_spectral(self._SUBSCRIPTION_SPEC)
+        assert "camara-subscription-protocol-http" not in _codes(findings)
+
+    def test_protocol_non_http_fails(self):
+        spec = self._SUBSCRIPTION_SPEC.replace(
+            "enum:\n            - HTTP\n          description: Delivery protocol",
+            "enum:\n            - HTTP\n            - MQTT3\n          description: Delivery protocol",
+        )
+        findings = _run_spectral(spec)
+        assert "camara-subscription-protocol-http" in _codes(findings)
+
+    def test_sink_https_passes(self):
+        findings = _run_spectral(self._SUBSCRIPTION_SPEC)
+        assert "camara-subscription-sink-https" not in _codes(findings)
+
+    def test_notification_content_type_passes(self):
+        findings = _run_spectral(self._SUBSCRIPTION_SPEC)
+        assert "camara-notification-content-type" not in _codes(findings)
+
+    def test_notification_content_type_wrong(self):
+        spec = self._SUBSCRIPTION_SPEC.replace(
+            "application/cloudevents+json:", "application/json:"
+        )
+        findings = _run_spectral(spec)
+        assert "camara-notification-content-type" in _codes(findings)
+
+
+class TestGroupD:
+    """Group D: Custom JS function rules."""
+
+    def test_datetime_rfc3339_passes(self):
+        # 4-space indent puts schema under components.schemas (sibling of ErrorInfo)
+        spec = _VALID_SPEC + (
+            "    TimestampSchema:\n"
+            "      type: object\n"
+            "      properties:\n"
+            "        createdAt:\n"
+            "          type: string\n"
+            "          format: date-time\n"
+            '          description: "Created timestamp. It must follow [RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) and must have time zone."\n'
+        )
+        findings = _run_spectral(spec)
+        assert "camara-datetime-rfc3339-description" not in _codes(findings)
+
+    def test_datetime_rfc3339_fails(self):
+        spec = _VALID_SPEC + (
+            "    TimestampSchema:\n"
+            "      type: object\n"
+            "      properties:\n"
+            "        createdAt:\n"
+            "          type: string\n"
+            "          format: date-time\n"
+            '          description: "A timestamp"\n'
+        )
+        findings = _run_spectral(spec)
+        assert "camara-datetime-rfc3339-description" in _codes(findings)
+
+    def test_duration_rfc3339_fails(self):
+        spec = _VALID_SPEC + (
+            "    DurationSchema:\n"
+            "      type: object\n"
+            "      properties:\n"
+            "        maxDuration:\n"
+            "          type: string\n"
+            "          format: duration\n"
+            '          description: "How long it takes"\n'
+        )
+        findings = _run_spectral(spec)
+        assert "camara-duration-rfc3339-description" in _codes(findings)
+
+    def test_required_properties_pass(self, valid_findings):
+        assert "camara-required-properties-exist" not in _codes(valid_findings)
+
+    def test_required_properties_fail(self):
+        spec = _VALID_SPEC + (
+            "    BadSchema:\n"
+            "      type: object\n"
+            "      required:\n"
+            "        - name\n"
+            "        - age\n"
+            "        - missing_field\n"
+            "      properties:\n"
+            "        name:\n"
+            "          type: string\n"
+            "          description: Name\n"
+            "        age:\n"
+            "          type: integer\n"
+            "          format: int32\n"
+            "          minimum: 0\n"
+            "          maximum: 200\n"
+            "          description: Age\n"
+        )
+        findings = _run_spectral(spec)
+        assert "camara-required-properties-exist" in _codes(findings)
+
+    def test_required_properties_allof_no_false_positive(self):
+        """allOf fragments with required but no properties should not fire."""
+        spec = _VALID_SPEC + (
+            "    ExtendedError:\n"
+            "      allOf:\n"
+            '        - $ref: "#/components/schemas/ErrorInfo"\n'
+            "        - type: object\n"
+            "          required:\n"
+            "            - detail\n"
+            "          properties:\n"
+            "            detail:\n"
+            "              type: string\n"
+            "              description: Additional detail\n"
+        )
+        findings = _run_spectral(spec)
+        allof_findings = [
+            f for f in _findings_for(findings, "camara-required-properties-exist")
+            if "ExtendedError" in str(f.get("path", []))
+        ]
+        assert len(allof_findings) == 0
+
+    def test_array_items_description_passes(self):
+        spec = _VALID_SPEC + (
+            "    ListSchema:\n"
+            "      type: object\n"
+            "      properties:\n"
+            "        items_list:\n"
+            "          type: array\n"
+            "          description: A list\n"
+            "          items:\n"
+            "            type: string\n"
+            "            description: An item\n"
+        )
+        findings = _run_spectral(spec)
+        items_findings = [
+            f for f in _findings_for(findings, "camara-array-items-description")
+            if "ListSchema" in str(f.get("path", []))
+        ]
+        assert len(items_findings) == 0
+
+    def test_array_items_description_fails(self):
+        spec = _VALID_SPEC + (
+            "    ListSchema:\n"
+            "      type: object\n"
+            "      properties:\n"
+            "        items_list:\n"
+            "          type: array\n"
+            "          description: A list\n"
+            "          items:\n"
+            "            type: string\n"
+        )
+        findings = _run_spectral(spec)
+        assert "camara-array-items-description" in _codes(findings)
+
+    def test_array_items_ref_skipped(self):
+        """$ref items should be skipped (target schema has own description)."""
+        spec = _VALID_SPEC + (
+            "    ListSchema:\n"
+            "      type: object\n"
+            "      properties:\n"
+            "        errors:\n"
+            "          type: array\n"
+            "          description: List of errors\n"
+            "          items:\n"
+            '            $ref: "#/components/schemas/ErrorInfo"\n'
+        )
+        findings = _run_spectral(spec)
+        items_findings = [
+            f for f in _findings_for(findings, "camara-array-items-description")
+            if "ListSchema" in str(f.get("path", []))
+        ]
+        assert len(items_findings) == 0

From bd57db52937649449a0a137c613f7a6d6c032d52 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 10 Apr 2026 12:28:27 +0200
Subject: [PATCH 062/157] fix(release-automation): caller template references
 v1-rc

The caller template still pointed to @ra-v1-rc (v0.3.0 state), but
repositories that opt into the v1-rc validation framework should consume
both caller workflows from the same v1-rc tag on the validation-framework
branch. The onboarding campaign deploys this template verbatim, so the
reference must be correct in the template itself.

Changes the uses: line in release_automation/workflows/release-automation-caller.yml
from @ra-v1-rc to @v1-rc. No behavioral change to the reusable workflow
itself; both tags currently resolve to different commits on different
branches, and v1-rc is the intended target for validation-framework
consumers.

The v1-rc tag will be moved forward to include this fix after merge.
---
 release_automation/workflows/release-automation-caller.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/release_automation/workflows/release-automation-caller.yml b/release_automation/workflows/release-automation-caller.yml
index e9a8f38a..88cf724f 100644
--- a/release_automation/workflows/release-automation-caller.yml
+++ b/release_automation/workflows/release-automation-caller.yml
@@ -80,5 +80,5 @@ jobs:
        github.event.pull_request.merged == true &&
        startsWith(github.event.pull_request.base.ref, 'release-snapshot/'))
 
-    uses: camaraproject/tooling/.github/workflows/release-automation-reusable.yml@ra-v1-rc
+    uses: camaraproject/tooling/.github/workflows/release-automation-reusable.yml@v1-rc
     secrets: inherit

From 564149d2937f7dbd3deb86ae4db2f75c43912034 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 10:42:22 +0200
Subject: [PATCH 063/157] refactor(issue-manager): remove deferred snapshot
 history placeholders
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Remove SnapshotHistoryEntry, append_to_history(), and
mark_snapshot_discarded() — the snapshot history table feature was
dropped in favor of the comment trail design. Remove corresponding
tests (9 tests removed).
---
 release_automation/scripts/issue_manager.py   | 129 +-------------
 .../tests/test_issue_manager.py               | 158 +-----------------
 2 files changed, 2 insertions(+), 285 deletions(-)

diff --git a/release_automation/scripts/issue_manager.py b/release_automation/scripts/issue_manager.py
index 587f43da..57ed5751 100644
--- a/release_automation/scripts/issue_manager.py
+++ b/release_automation/scripts/issue_manager.py
@@ -2,42 +2,16 @@
 Issue manager for CAMARA release automation.
 
 This module provides functionality for managing Release Issue content,
-including updating reserved sections, maintaining snapshot history,
-and generating standardized titles.
+including updating reserved sections and generating standardized titles.
 """
 
 import re
-from dataclasses import dataclass
 from datetime import datetime, timezone
 from typing import Any, Dict, Optional
 
 from . import config
 
 
-@dataclass
-class SnapshotHistoryEntry:
-    """
-    Represents an entry in the snapshot history table.
-
-    Note: HISTORY section is deferred to backlog (not MVP).
-    This dataclass is preserved for future implementation.
-
-    Attributes:
-        snapshot_id: Unique identifier (e.g., "r4.1-abc1234")
-        status: Either "Current" or "Discarded"
-        created_at: ISO timestamp when snapshot was created
-        discarded_at: ISO timestamp when discarded (if applicable)
-        reason: Reason for discarding (if applicable)
-        release_review_branch: The release-review branch name
-    """
-    snapshot_id: str
-    status: str  # "Current" or "Discarded"
-    created_at: str
-    discarded_at: Optional[str] = None
-    reason: Optional[str] = None
-    release_review_branch: str = ""
-
-
 class IssueManager:
     """
     Manages Release Issue content - updating reserved sections
@@ -52,9 +26,6 @@ class IssueManager:
         - STATE: Current release state, timestamp, and active artifact links
         - CONFIG: Release configuration (APIs, dependencies)
         - ACTIONS: Valid actions for the current state
-
-    Note: HISTORY section has been deferred to backlog (not MVP).
-    The comment trail serves as the audit log.
     """
 
     # Pattern for matching sections (use .format(name=section_name))
@@ -113,103 +84,6 @@ def get_section_content(self, body: str, section: str) -> Optional[str]:
         match = re.search(pattern, body, flags=re.DOTALL)
         return match.group(1) if match else None
 
-    def append_to_history(self, body: str, entry: SnapshotHistoryEntry) -> str:
-        """
-        Add a new entry to the snapshot history table.
-
-        Note: HISTORY section is deferred to backlog (not MVP).
-        This method is preserved for future implementation.
-
-        The entry is inserted after the table header row.
-
-        Args:
-            body: The current issue body
-            entry: Snapshot history entry to add
-
-        Returns:
-            Updated issue body with new history row
-        """
-        # Format the new row
-        discarded = entry.discarded_at or "—"
-        reason = entry.reason or "—"
-
-        new_row = (
-            f"| `{entry.snapshot_id}` | **{entry.status}** | "
-            f"{entry.created_at} | {discarded} | {reason} | "
-            f"`{entry.release_review_branch}` |"
-        )
-
-        # Find the HISTORY section and the table header
-        # Table format:
-        # | Snapshot | Status | Created | Discarded | Reason | Review Branch |
-        # |----------|--------|---------|-----------|--------|---------------|
-        # | ... rows ... |
-
-        history_content = self.get_section_content(body, "HISTORY")
-        if history_content is None:
-            return body
-
-        # Find the header separator line (|---...|) and insert after it
-        lines = history_content.split('\n')
-        insert_index = None
-
-        for i, line in enumerate(lines):
-            # Look for the separator line (contains |---|)
-            if re.match(r'\s*\|[-|]+\|\s*$', line):
-                insert_index = i + 1
-                break
-
-        if insert_index is None:
-            # No table found, just append at the end
-            new_content = history_content.rstrip() + '\n' + new_row
-        else:
-            # Insert the new row after the separator
-            lines.insert(insert_index, new_row)
-            new_content = '\n'.join(lines)
-
-        return self.update_section(body, "HISTORY", new_content)
-
-    def mark_snapshot_discarded(
-        self,
-        body: str,
-        snapshot_id: str,
-        reason: str
-    ) -> str:
-        """
-        Update an existing snapshot entry from 'Current' to 'Discarded'.
-
-        Note: HISTORY section is deferred to backlog (not MVP).
-        This method is preserved for future implementation.
-
-        Finds the row with the matching snapshot_id and updates:
-        - Status: Current → Discarded
-        - Discarded: — → current timestamp
-        - Reason: — → provided reason
-
-        Args:
-            body: The current issue body
-            snapshot_id: The snapshot ID to update
-            reason: Reason for discarding
-
-        Returns:
-            Updated issue body with modified history row
-        """
-        timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M")
-
-        # Pattern to match the specific row
-        # | `snapshot_id` | **Current** | created_at | — | — | `branch` |
-        pattern = (
-            rf"\| `{re.escape(snapshot_id)}` \| \*\*Current\*\* \| "
-            rf"([^|]+) \| — \| — \| ([^|]+) \|"
-        )
-
-        replacement = (
-            f"| `{snapshot_id}` | Discarded | "
-            f"\\1| {timestamp} | {reason} | \\2|"
-        )
-
-        return re.sub(pattern, replacement, body)
-
     def generate_title(
         self,
         release_tag: str,
@@ -422,7 +296,6 @@ def generate_issue_body_template(
         Generate a complete issue body template for a new Release Issue.
 
         This creates the initial structure with all reserved sections.
-        The HISTORY section has been deferred to backlog (not MVP).
 
         Args:
             release_tag: Release tag (e.g., "r4.1")
diff --git a/release_automation/tests/test_issue_manager.py b/release_automation/tests/test_issue_manager.py
index 024fafa2..d3dc6403 100644
--- a/release_automation/tests/test_issue_manager.py
+++ b/release_automation/tests/test_issue_manager.py
@@ -8,43 +8,7 @@
 from datetime import datetime, timezone
 from unittest.mock import patch
 
-from release_automation.scripts.issue_manager import (
-    IssueManager,
-    SnapshotHistoryEntry,
-)
-
-
-class TestSnapshotHistoryEntry:
-    """Tests for SnapshotHistoryEntry dataclass."""
-
-    def test_create_current_entry(self):
-        """Test creating an entry for a current snapshot."""
-        entry = SnapshotHistoryEntry(
-            snapshot_id="r4.1-abc1234",
-            status="Current",
-            created_at="2026-01-30 10:00",
-            release_review_branch="release-review/r4.1-abc1234"
-        )
-
-        assert entry.snapshot_id == "r4.1-abc1234"
-        assert entry.status == "Current"
-        assert entry.discarded_at is None
-        assert entry.reason is None
-
-    def test_create_discarded_entry(self):
-        """Test creating an entry for a discarded snapshot."""
-        entry = SnapshotHistoryEntry(
-            snapshot_id="r4.1-abc1234",
-            status="Discarded",
-            created_at="2026-01-30 10:00",
-            discarded_at="2026-01-30 12:00",
-            reason="API validation failed",
-            release_review_branch="release-review/r4.1-abc1234"
-        )
-
-        assert entry.status == "Discarded"
-        assert entry.discarded_at == "2026-01-30 12:00"
-        assert entry.reason == "API validation failed"
+from release_automation.scripts.issue_manager import IssueManager
 
 
 class TestIssueManagerUpdateSection:
@@ -146,107 +110,6 @@ def test_get_nonexistent_section_returns_none(self):
         assert content is None
 
 
-class TestIssueManagerAppendToHistory:
-    """Tests for append_to_history method."""
-
-    def test_append_first_entry(self):
-        """Test appending the first entry to an empty history table."""
-        manager = IssueManager()
-
-        body = """
-| Snapshot | Status | Created | Discarded | Reason | Review Branch |
-|----------|--------|---------|-----------|--------|---------------|
-"""
-
-        entry = SnapshotHistoryEntry(
-            snapshot_id="r4.1-abc1234",
-            status="Current",
-            created_at="2026-01-30 10:00",
-            release_review_branch="release-review/r4.1-abc1234"
-        )
-
-        result = manager.append_to_history(body, entry)
-
-        assert "`r4.1-abc1234`" in result
-        assert "**Current**" in result
-        assert "2026-01-30 10:00" in result
-        assert "`release-review/r4.1-abc1234`" in result
-
-    def test_append_second_entry(self):
-        """Test appending a second entry (newest at top)."""
-        manager = IssueManager()
-
-        body = """
-| Snapshot | Status | Created | Discarded | Reason | Review Branch |
-|----------|--------|---------|-----------|--------|---------------|
-| `r4.1-abc1234` | Discarded | 2026-01-29 10:00 | 2026-01-29 12:00 | Failed | `release-review/r4.1-abc1234` |
-"""
-
-        entry = SnapshotHistoryEntry(
-            snapshot_id="r4.1-def5678",
-            status="Current",
-            created_at="2026-01-30 10:00",
-            release_review_branch="release-review/r4.1-def5678"
-        )
-
-        result = manager.append_to_history(body, entry)
-
-        # New entry should be present
-        assert "`r4.1-def5678`" in result
-        # Old entry should still be present
-        assert "`r4.1-abc1234`" in result
-        # New entry should appear before old entry
-        new_pos = result.find("r4.1-def5678")
-        old_pos = result.find("r4.1-abc1234")
-        assert new_pos < old_pos
-
-    def test_append_with_defaults_for_optional_fields(self):
-        """Test that optional fields default to em-dash."""
-        manager = IssueManager()
-
-        body = """
-| Snapshot | Status | Created | Discarded | Reason | Review Branch |
-|----------|--------|---------|-----------|--------|---------------|
-"""
-
-        entry = SnapshotHistoryEntry(
-            snapshot_id="r4.1-abc1234",
-            status="Current",
-            created_at="2026-01-30 10:00",
-            release_review_branch="release-review/r4.1-abc1234"
-            # discarded_at and reason are None
-        )
-
-        result = manager.append_to_history(body, entry)
-
-        # Should have em-dashes for discarded and reason
-        assert "| — |" in result
-
-
-class TestIssueManagerMarkSnapshotDiscarded:
-    """Tests for mark_snapshot_discarded method."""
-
-    @patch('release_automation.scripts.issue_manager.datetime')
-    def test_mark_discarded(self, mock_datetime):
-        """Test marking a snapshot as discarded."""
-        mock_datetime.now.return_value = datetime(2026, 1, 30, 12, 0, tzinfo=timezone.utc)
-
-        manager = IssueManager()
-
-        body = """| `r4.1-abc1234` | **Current** | 2026-01-30 10:00 | — | — | `release-review/r4.1-abc1234` |"""
-
-        result = manager.mark_snapshot_discarded(
-            body,
-            snapshot_id="r4.1-abc1234",
-            reason="Validation failed"
-        )
-
-        assert "Discarded" in result
-        assert "2026-01-30 12:00" in result
-        assert "Validation failed" in result
-        assert "**Current**" not in result
-
-
 class TestIssueManagerGenerateTitle:
     """Tests for generate_title method."""
 
@@ -515,25 +378,6 @@ def test_update_section_with_special_chars_in_content(self):
 
         assert new_content in result
 
-    def test_history_without_table_structure(self):
-        """Test appending to history when table structure is missing."""
-        manager = IssueManager()
-
-        body = """
-Some malformed content without table
-"""
-
-        entry = SnapshotHistoryEntry(
-            snapshot_id="r4.1-abc1234",
-            status="Current",
-            created_at="2026-01-30 10:00",
-            release_review_branch="release-review/r4.1-abc1234"
-        )
-
-        # Should not crash, just append
-        result = manager.append_to_history(body, entry)
-        assert "`r4.1-abc1234`" in result
-
     def test_get_section_with_nested_comments(self):
         """Test getting section that might have nested HTML comments."""
         manager = IssueManager()

From c01c2398e7747c82a9f49af2142c454443d9ec77 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 11:00:22 +0200
Subject: [PATCH 064/157] refactor(state-manager): simplify state derivation
 interface

Consolidate derive_state() and get_current_release_info() into a single
derive_state() method that derives release_tag from repository artifacts
instead of requiring it as input. Returns ReleaseInfoResult with the
complete picture: release_tag, state, snapshot info, and config errors.

Changes:
- derive_state() no longer takes release_tag parameter; reads
  release-plan.yaml and release-metadata.yaml to determine it
- Returns ReleaseInfoResult instead of plain ReleaseState enum
- Includes retry_draft_release support for eventual consistency
- Uses strict config validation (errors are failures, not states)
- Adds meta_release field to ReleaseInfoResult
- Removes get_current_release_info() and lenient _read_release_plan()
- Removes deprecated release_tag input from derive-release-state action
- Fixes release_type bug (was referencing undefined 'snapshot' variable)
- Updates all callers: issue_sync.py, snapshot_creator.py, action.yml
- Updates tests to use new interface (557 tests pass)
---
 .../docs/technical-architecture.md            |   2 +-
 release_automation/scripts/issue_sync.py      |  19 +-
 .../scripts/snapshot_creator.py               |   8 +-
 release_automation/scripts/state_manager.py   | 251 +++++++-----------
 release_automation/tests/test_issue_sync.py   |  22 +-
 .../tests/test_snapshot_creator.py            |  46 +++-
 .../tests/test_state_manager.py               | 198 +++++++-------
 .../derive-release-state/action.yml           |  18 +-
 8 files changed, 285 insertions(+), 279 deletions(-)

diff --git a/release_automation/docs/technical-architecture.md b/release_automation/docs/technical-architecture.md
index 5e57b5a7..1b9fe96f 100644
--- a/release_automation/docs/technical-architecture.md
+++ b/release_automation/docs/technical-architecture.md
@@ -179,7 +179,7 @@ class ReleaseState(Enum):
 
 - `ReleaseStateManager` — Derives state and reads snapshot metadata from artifacts
 - `SnapshotInfo` — Data read from `release-metadata.yaml` on the snapshot branch (snapshot ID, branches, APIs with calculated versions, dependencies)
-- `ReleaseInfoResult` — Return type from `get_current_release_info()`, includes either state data or a `ConfigurationError`
+- `ReleaseInfoResult` — Return type from `derive_state()`, includes either state data or a `ConfigurationError`
 
 ### 2.2 Version Calculator (`version_calculator.py`)
 
diff --git a/release_automation/scripts/issue_sync.py b/release_automation/scripts/issue_sync.py
index faf3d525..114224a6 100644
--- a/release_automation/scripts/issue_sync.py
+++ b/release_automation/scripts/issue_sync.py
@@ -147,11 +147,20 @@ def sync_release_issue(
         self.ensure_labels_exist()
 
         # Derive current state
-        state = state_override or self.state_manager.derive_state(
-            release_tag,
-            retry_draft_release=True,
-        )
-        context_source = "override" if state_override else "re-derived"
+        if state_override:
+            state = state_override
+            context_source = "override"
+        else:
+            release_info = self.state_manager.derive_state(
+                retry_draft_release=True,
+            )
+            if not release_info.success:
+                return SyncResult(
+                    action="none",
+                    reason=f"config_error: {release_info.config_error.message}",
+                )
+            state = release_info.state
+            context_source = "re-derived"
         print(
             f"Issue sync effective context: source={context_source}, "
             f"state={state.value}, "
diff --git a/release_automation/scripts/snapshot_creator.py b/release_automation/scripts/snapshot_creator.py
index 2c1c4863..cc813b77 100644
--- a/release_automation/scripts/snapshot_creator.py
+++ b/release_automation/scripts/snapshot_creator.py
@@ -480,7 +480,13 @@ def validate_preconditions(self, release_tag: str) -> List[str]:
         errors = []
 
         # Check current state
-        state = self.state_manager.derive_state(release_tag)
+        release_info = self.state_manager.derive_state()
+        if not release_info.success:
+            errors.append(
+                f"Configuration error: {release_info.config_error.message}"
+            )
+            return errors
+        state = release_info.state
 
         if state == ReleaseState.PUBLISHED:
             errors.append(
diff --git a/release_automation/scripts/state_manager.py b/release_automation/scripts/state_manager.py
index d2ce59d1..04cf9ec7 100644
--- a/release_automation/scripts/state_manager.py
+++ b/release_automation/scripts/state_manager.py
@@ -93,9 +93,9 @@ class ConfigurationError:
 @dataclass
 class ReleaseInfoResult:
     """
-    Result of get_current_release_info() call.
+    Result of derive_state() call.
 
-    This replaces the plain dict return type to distinguish between:
+    Distinguishes between:
     - Success: Valid state derived from repository artifacts
     - Error: Configuration problem that prevents state derivation
 
@@ -110,6 +110,7 @@ class ReleaseInfoResult:
     config_error: Optional[ConfigurationError] = None
     release_issue_number: Optional[int] = None  # GitHub issue number if found
     release_type: Optional[str] = None  # Release type if available
+    meta_release: Optional[str] = None  # Meta-release name (e.g., "Sync26")
 
     def to_dict(self) -> Dict[str, Any]:
         """
@@ -128,6 +129,7 @@ def to_dict(self) -> Dict[str, Any]:
                 "config_error_type": None,
                 "release_issue_number": self.release_issue_number,
                 "release_type": self.release_type,
+                "meta_release": self.meta_release,
             }
         else:
             return {
@@ -139,6 +141,7 @@ def to_dict(self) -> Dict[str, Any]:
                 "config_error_type": self.config_error.error_type if self.config_error else "unknown",
                 "release_issue_number": None,
                 "release_type": None,
+                "meta_release": None,
             }
 
 
@@ -162,55 +165,119 @@ def __init__(self, github_client: GitHubClient):
 
     def derive_state(
         self,
-        release_tag: str,
         retry_draft_release: bool = False
-    ) -> ReleaseState:
+    ) -> ReleaseInfoResult:
         """
-        Derive the current release state from repository artifacts.
+        Derive the current release state and tag from repository artifacts.
 
-        The derivation follows this priority order:
-        1. If tag exists → PUBLISHED
-        2. If snapshot branch exists:
+        This is the single entry point for state derivation. It determines
+        the release_tag from the authoritative source and derives the state:
+
+        1. Read and validate release-plan.yaml (config errors → failure)
+        2. If tag exists → PUBLISHED
+        3. If snapshot branch exists:
+           - Read release_tag from release-metadata.yaml on snapshot
            - If draft release exists → DRAFT_READY
            - Otherwise → SNAPSHOT_ACTIVE
-        3. If release-plan.yaml defines this release:
-           - If target_release_type is "none" → NOT_PLANNED
-           - Otherwise → PLANNED
-        4. Default → NOT_PLANNED
+        4. If release-plan.yaml target_release_type != "none" → PLANNED
+        5. Otherwise → NOT_PLANNED
 
         Args:
-            release_tag: Release tag to check (e.g., "r4.1")
-            retry_draft_release: Retry draft-release detection for eventual consistency
+            retry_draft_release: Retry draft-release detection for eventual
+                consistency (useful when called right after draft creation)
 
         Returns:
-            Current ReleaseState for the given release tag
+            ReleaseInfoResult with either:
+                - success=True: release_tag, state, snapshot_branch, source
+                - success=False: config_error with details
         """
-        # Step 1: Check if tag exists → PUBLISHED
-        if self.gh.tag_exists(release_tag):
-            return ReleaseState.PUBLISHED
+        # Step 1: Read and validate release-plan.yaml
+        plan, config_error = self._read_release_plan_with_validation()
+
+        if config_error:
+            return ReleaseInfoResult(success=False, config_error=config_error)
 
-        # Step 2: Check for snapshot branch
-        snapshot_branches = self.gh.list_branches(f"{config.SNAPSHOT_BRANCH_PREFIX}{release_tag}-*")
+        plan_release_tag = plan["repository"]["target_release_tag"]
+        plan_release_type = plan["repository"].get("target_release_type")
+        meta_release = plan["repository"].get("meta_release")
+
+        # Step 2: Check if tag exists → PUBLISHED
+        if self.gh.tag_exists(plan_release_tag):
+            return ReleaseInfoResult(
+                success=True,
+                release_tag=plan_release_tag,
+                state=ReleaseState.PUBLISHED,
+                snapshot_branch=None,
+                source="tag",
+                release_issue_number=self.find_release_issue(plan_release_tag),
+                release_type=plan_release_type,
+                meta_release=meta_release,
+            )
+
+        # Step 3: Check for snapshot branches
+        snapshot_branches = self.gh.list_branches(
+            f"{config.SNAPSHOT_BRANCH_PREFIX}{plan_release_tag}-*"
+        )
 
         if snapshot_branches:
-            # Step 3: Check for draft release
-            if self._draft_release_exists(release_tag, retry=retry_draft_release):
-                return ReleaseState.DRAFT_READY
-            return ReleaseState.SNAPSHOT_ACTIVE
+            snapshot_branch = snapshot_branches[0].name
+            metadata = self._read_release_metadata(snapshot_branch)
 
-        # Step 4: No snapshot - check release-plan.yaml for PLANNED state
-        plan = self._read_release_plan()
-        if plan:
-            target_tag = plan.get("repository", {}).get("target_release_tag")
-            release_type = plan.get("repository", {}).get("target_release_type")
+            if metadata:
+                metadata_release_tag = metadata.get(
+                    "repository", {}
+                ).get("release_tag")
+                metadata_release_type = metadata.get(
+                    "repository", {}
+                ).get("release_type")
+            else:
+                # Fall back to extracting from branch name
+                snapshot_id = snapshot_branch.replace(
+                    config.SNAPSHOT_BRANCH_PREFIX, ""
+                )
+                metadata_release_tag = (
+                    snapshot_id.split("-")[0] if "-" in snapshot_id
+                    else snapshot_id
+                )
+                metadata_release_type = None
 
-            if target_tag == release_tag:
-                if release_type and release_type.lower() != "none":
-                    return ReleaseState.PLANNED
-                elif release_type and release_type.lower() == "none":
-                    return ReleaseState.NOT_PLANNED
+            effective_tag = metadata_release_tag or plan_release_tag
 
-        return ReleaseState.NOT_PLANNED
+            # Check for draft release
+            if self._draft_release_exists(
+                effective_tag, retry=retry_draft_release
+            ):
+                state = ReleaseState.DRAFT_READY
+            else:
+                state = ReleaseState.SNAPSHOT_ACTIVE
+
+            return ReleaseInfoResult(
+                success=True,
+                release_tag=effective_tag,
+                state=state,
+                snapshot_branch=snapshot_branch,
+                source="release-metadata.yaml",
+                release_issue_number=self.find_release_issue(effective_tag),
+                release_type=metadata_release_type,
+                meta_release=meta_release,
+            )
+
+        # Step 4: No snapshot — use release-plan.yaml state
+        if plan_release_type and plan_release_type.lower() != "none":
+            state = ReleaseState.PLANNED
+        else:
+            state = ReleaseState.NOT_PLANNED
+
+        return ReleaseInfoResult(
+            success=True,
+            release_tag=plan_release_tag,
+            state=state,
+            snapshot_branch=None,
+            source="release-plan.yaml",
+            release_issue_number=self.find_release_issue(plan_release_tag),
+            release_type=plan_release_type,
+            meta_release=meta_release,
+        )
 
     def _draft_release_exists(self, release_tag: str, retry: bool = False) -> bool:
         """
@@ -363,100 +430,6 @@ def find_release_issue(self, release_tag: str) -> Optional[int]:
 
         return None
 
-    def get_current_release_info(self) -> ReleaseInfoResult:
-        """
-        Get the current release tag and state from repository artifacts.
-
-        This method determines the release_tag from the authoritative source:
-        - If a snapshot branch exists: from release-metadata.yaml on the snapshot
-        - Otherwise: from release-plan.yaml on main branch
-
-        The state is derived accordingly:
-        - PUBLISHED: if tag exists
-        - DRAFT_READY: if snapshot branch and draft release exist
-        - SNAPSHOT_ACTIVE: if snapshot branch exists
-        - PLANNED: if release-plan.yaml has target_release_type != none
-        - NOT_PLANNED: if release-plan.yaml has target_release_type == none or missing
-
-        Configuration errors are returned as error results, NOT as NOT_PLANNED state.
-
-        Returns:
-            ReleaseInfoResult with either:
-                - success=True: release_tag, state, snapshot_branch, source
-                - success=False: config_error with details
-        """
-        # First, try to read release-plan.yaml and handle configuration errors
-        plan, config_error = self._read_release_plan_with_validation()
-
-        if config_error:
-            return ReleaseInfoResult(success=False, config_error=config_error)
-
-        # At this point, plan is valid with all required fields
-        plan_release_tag = plan["repository"]["target_release_tag"]
-        plan_release_type = plan["repository"].get("target_release_type")
-
-        # Check if the planned release is already published
-        if self.gh.tag_exists(plan_release_tag):
-            return ReleaseInfoResult(
-                success=True,
-                release_tag=plan_release_tag,
-                state=ReleaseState.PUBLISHED,
-                snapshot_branch=None,
-                source="tag",
-                release_issue_number=self.find_release_issue(plan_release_tag),
-                release_type=plan_release_type
-            )
-
-        # Check for any snapshot branches for the planned release
-        snapshot_branches = self.gh.list_branches(f"{config.SNAPSHOT_BRANCH_PREFIX}{plan_release_tag}-*")
-
-        if snapshot_branches:
-            # Snapshot exists - read release_tag from release-metadata.yaml
-            snapshot_branch = snapshot_branches[0].name
-            metadata = self._read_release_metadata(snapshot_branch)
-
-            if metadata:
-                metadata_release_tag = metadata.get("repository", {}).get("release_tag")
-            else:
-                # Fall back to extracting from branch name
-                # release-snapshot/r4.1-abc1234 → r4.1
-                snapshot_id = snapshot_branch.replace(config.SNAPSHOT_BRANCH_PREFIX, "")
-                metadata_release_tag = snapshot_id.split("-")[0] if "-" in snapshot_id else snapshot_id
-
-            # Determine if draft ready
-            if self.gh.draft_release_exists(metadata_release_tag or plan_release_tag):
-                state = ReleaseState.DRAFT_READY
-            else:
-                state = ReleaseState.SNAPSHOT_ACTIVE
-
-            effective_tag = metadata_release_tag or plan_release_tag
-            return ReleaseInfoResult(
-                success=True,
-                release_tag=effective_tag,
-                state=state,
-                snapshot_branch=snapshot_branch,
-                source="release-metadata.yaml",
-                release_issue_number=self.find_release_issue(effective_tag),
-                release_type=snapshot.release_type if 'snapshot' in locals() and snapshot else None
-            )
-
-        # No snapshot - use release-plan.yaml state
-        if plan_release_type and plan_release_type.lower() != "none":
-            state = ReleaseState.PLANNED
-        else:
-            # target_release_type is "none" or missing - intentional NOT_PLANNED
-            state = ReleaseState.NOT_PLANNED
-
-        return ReleaseInfoResult(
-            success=True,
-            release_tag=plan_release_tag,
-            state=state,
-            snapshot_branch=None,
-            source="release-plan.yaml",
-            release_issue_number=self.find_release_issue(plan_release_tag),
-            release_type=plan_release_type
-        )
-
     def _read_release_plan_with_validation(
         self, ref: str = "main"
     ) -> tuple[Optional[dict], Optional[ConfigurationError]]:
@@ -524,26 +497,6 @@ def _read_release_plan_with_validation(
 
         return plan, None
 
-    def _read_release_plan(self, ref: str = "main") -> Optional[dict]:
-        """
-        Read and parse release-plan.yaml from the repository.
-
-        Args:
-            ref: Branch, tag, or commit to read from
-
-        Returns:
-            Parsed YAML content as dict, or None if file doesn't exist or is invalid
-        """
-        content = self.gh.get_file_content(config.RELEASE_PLAN_FILE, ref)
-        if not content:
-            return None
-
-        try:
-            return yaml.safe_load(content)
-        except yaml.YAMLError as e:
-            print(f"Warning: Failed to parse release-plan.yaml from {ref}: {e}")
-            return None
-
     def _read_release_metadata(self, ref: str) -> Optional[dict]:
         """
         Read and parse release-metadata.yaml from a branch.
diff --git a/release_automation/tests/test_issue_sync.py b/release_automation/tests/test_issue_sync.py
index ab5c0ccf..4ef43de4 100644
--- a/release_automation/tests/test_issue_sync.py
+++ b/release_automation/tests/test_issue_sync.py
@@ -13,7 +13,7 @@
     WORKFLOW_MARKER,
     REQUIRED_LABELS,
 )
-from release_automation.scripts.state_manager import ReleaseState
+from release_automation.scripts.state_manager import ReleaseInfoResult, ReleaseState
 
 
 class TestSyncResult:
@@ -181,7 +181,9 @@ def test_creates_issue_when_planned_and_no_issue(self):
             }
         }
 
-        state_manager.derive_state.return_value = ReleaseState.PLANNED
+        state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=True, state=ReleaseState.PLANNED,
+            release_tag="r4.1", source="release-plan.yaml")
         gh.search_issues.return_value = []
         gh.create_issue.return_value = {"number": 1, "title": "Release r4.1 (RC)"}
         issue_manager.generate_title.return_value = "Release r4.1 (RC) — Sync26"
@@ -204,7 +206,9 @@ def test_always_updates_issue_in_planned_state(self):
             }
         }
 
-        state_manager.derive_state.return_value = ReleaseState.PLANNED
+        state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=True, state=ReleaseState.PLANNED,
+            release_tag="r4.1", source="release-plan.yaml")
         gh.search_issues.return_value = [
             {
                 "number": 1,
@@ -234,7 +238,9 @@ def test_updates_issue_when_state_changes(self):
             }
         }
 
-        state_manager.derive_state.return_value = ReleaseState.SNAPSHOT_ACTIVE
+        state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=True, state=ReleaseState.SNAPSHOT_ACTIVE,
+            release_tag="r4.1", source="release-metadata.yaml")
         gh.search_issues.return_value = [
             {
                 "number": 1,
@@ -265,7 +271,9 @@ def test_no_action_when_not_planned_and_no_issue(self):
             }
         }
 
-        state_manager.derive_state.return_value = ReleaseState.NOT_PLANNED
+        state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=True, state=ReleaseState.NOT_PLANNED,
+            release_tag="r4.1", source="release-plan.yaml")
         gh.search_issues.return_value = []
 
         result = manager.sync_release_issue(release_plan)
@@ -708,7 +716,9 @@ def test_ensures_labels_before_operations(self):
         gh = MagicMock()
         state_manager = MagicMock()
         gh.get_label.return_value = None  # All labels missing
-        state_manager.derive_state.return_value = ReleaseState.NOT_PLANNED
+        state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=True, state=ReleaseState.NOT_PLANNED,
+            release_tag="r4.1", source="release-plan.yaml")
         gh.search_issues.return_value = []
 
         manager = IssueSyncManager(gh, state_manager, MagicMock(), MagicMock())
diff --git a/release_automation/tests/test_snapshot_creator.py b/release_automation/tests/test_snapshot_creator.py
index 1209f6d4..f67cb7c6 100644
--- a/release_automation/tests/test_snapshot_creator.py
+++ b/release_automation/tests/test_snapshot_creator.py
@@ -22,7 +22,7 @@
     InvalidStateError,
     TransformationError,
 )
-from release_automation.scripts.state_manager import ReleaseState
+from release_automation.scripts.state_manager import ReleaseInfoResult, ReleaseState
 from release_automation.scripts.mechanical_transformer import TransformationResult
 from release_automation.scripts.git_operations import PullRequestInfo, GitOperationsError
 
@@ -89,7 +89,9 @@ def mock_metadata_generator():
 def mock_state_manager():
     """Create a mock ReleaseStateManager."""
     mgr = Mock()
-    mgr.derive_state.return_value = ReleaseState.PLANNED
+    mgr.derive_state.return_value = ReleaseInfoResult(
+        success=True, state=ReleaseState.PLANNED,
+        release_tag="r4.1", source="release-plan.yaml")
     return mgr
 
 
@@ -267,9 +269,16 @@ def test_preserves_release_tag_format(self, snapshot_creator):
 class TestValidatePreconditions:
     """Tests for precondition validation."""
 
+    def _make_result(self, state):
+        """Helper to create a ReleaseInfoResult for a given state."""
+        return ReleaseInfoResult(
+            success=True, state=state,
+            release_tag="r4.1", source="release-plan.yaml")
+
     def test_valid_planned_state(self, snapshot_creator, mock_state_manager):
         """Test validation passes for PLANNED state."""
-        mock_state_manager.derive_state.return_value = ReleaseState.PLANNED
+        mock_state_manager.derive_state.return_value = self._make_result(
+            ReleaseState.PLANNED)
 
         errors = snapshot_creator.validate_preconditions("r4.1")
 
@@ -277,7 +286,8 @@ def test_valid_planned_state(self, snapshot_creator, mock_state_manager):
 
     def test_invalid_published_state(self, snapshot_creator, mock_state_manager):
         """Test validation fails for PUBLISHED state."""
-        mock_state_manager.derive_state.return_value = ReleaseState.PUBLISHED
+        mock_state_manager.derive_state.return_value = self._make_result(
+            ReleaseState.PUBLISHED)
 
         errors = snapshot_creator.validate_preconditions("r4.1")
 
@@ -286,7 +296,8 @@ def test_invalid_published_state(self, snapshot_creator, mock_state_manager):
 
     def test_invalid_snapshot_active_state(self, snapshot_creator, mock_state_manager):
         """Test validation fails for SNAPSHOT_ACTIVE state."""
-        mock_state_manager.derive_state.return_value = ReleaseState.SNAPSHOT_ACTIVE
+        mock_state_manager.derive_state.return_value = self._make_result(
+            ReleaseState.SNAPSHOT_ACTIVE)
 
         errors = snapshot_creator.validate_preconditions("r4.1")
 
@@ -295,7 +306,8 @@ def test_invalid_snapshot_active_state(self, snapshot_creator, mock_state_manage
 
     def test_invalid_draft_ready_state(self, snapshot_creator, mock_state_manager):
         """Test validation fails for DRAFT_READY state."""
-        mock_state_manager.derive_state.return_value = ReleaseState.DRAFT_READY
+        mock_state_manager.derive_state.return_value = self._make_result(
+            ReleaseState.DRAFT_READY)
 
         errors = snapshot_creator.validate_preconditions("r4.1")
 
@@ -304,13 +316,29 @@ def test_invalid_draft_ready_state(self, snapshot_creator, mock_state_manager):
 
     def test_invalid_not_planned_state(self, snapshot_creator, mock_state_manager):
         """Test validation fails for NOT_PLANNED state."""
-        mock_state_manager.derive_state.return_value = ReleaseState.NOT_PLANNED
+        mock_state_manager.derive_state.return_value = self._make_result(
+            ReleaseState.NOT_PLANNED)
 
         errors = snapshot_creator.validate_preconditions("r4.1")
 
         assert len(errors) == 1
         assert "not planned" in errors[0]
 
+    def test_config_error_returns_error(self, snapshot_creator, mock_state_manager):
+        """Test that config errors from derive_state() are surfaced."""
+        from release_automation.scripts.state_manager import ConfigurationError
+        mock_state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=False,
+            config_error=ConfigurationError(
+                error_type="missing_file",
+                message="No release-plan.yaml found",
+                file_path="release-plan.yaml"))
+
+        errors = snapshot_creator.validate_preconditions("r4.1")
+
+        assert len(errors) == 1
+        assert "Configuration error" in errors[0]
+
 
 # --- Tests for create_snapshot ---
 
@@ -374,7 +402,9 @@ def test_validation_failure_returns_early(
         sample_release_plan,
     ):
         """Test that validation failure returns early without creating snapshot."""
-        mock_state_manager.derive_state.return_value = ReleaseState.PUBLISHED
+        mock_state_manager.derive_state.return_value = ReleaseInfoResult(
+            success=True, state=ReleaseState.PUBLISHED,
+            release_tag="r4.1", source="tag")
 
         config = SnapshotConfig(release_tag="r4.1")
         result = snapshot_creator.create_snapshot(sample_release_plan, config)
diff --git a/release_automation/tests/test_state_manager.py b/release_automation/tests/test_state_manager.py
index 72427201..d64e5f83 100644
--- a/release_automation/tests/test_state_manager.py
+++ b/release_automation/tests/test_state_manager.py
@@ -40,62 +40,84 @@ def state_manager(mock_github_client):
 
 
 class TestDeriveState:
-    """Tests for derive_state method."""
+    """Tests for derive_state method.
+
+    Verifies state derivation priority:
+    1. Config validation (errors → failure)
+    2. PUBLISHED (tag exists)
+    3. DRAFT_READY (snapshot + draft release)
+    4. SNAPSHOT_ACTIVE (snapshot, no draft)
+    5. PLANNED / NOT_PLANNED (from release-plan.yaml)
+    """
+
+    VALID_PLAN = """
+repository:
+  target_release_tag: r4.1
+  target_release_type: initial
+"""
 
     def test_published_when_tag_exists(self, state_manager, mock_github_client):
         """Tag exists → PUBLISHED state."""
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
         mock_github_client.tag_exists.return_value = True
 
-        state = state_manager.derive_state("r4.1")
+        result = state_manager.derive_state()
 
-        assert state == ReleaseState.PUBLISHED
-        mock_github_client.tag_exists.assert_called_once_with("r4.1")
+        assert result.success
+        assert result.state == ReleaseState.PUBLISHED
+        assert result.release_tag == "r4.1"
+        assert result.source == "tag"
 
     def test_draft_ready_when_snapshot_and_draft_release(
         self, state_manager, mock_github_client
     ):
         """Snapshot branch + draft release → DRAFT_READY state."""
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
         mock_github_client.tag_exists.return_value = False
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
         ]
         mock_github_client.draft_release_exists.return_value = True
 
-        state = state_manager.derive_state("r4.1")
+        result = state_manager.derive_state()
 
-        assert state == ReleaseState.DRAFT_READY
-        mock_github_client.list_branches.assert_called_once_with(
-            "release-snapshot/r4.1-*"
-        )
-        mock_github_client.draft_release_exists.assert_called_once_with("r4.1")
+        assert result.success
+        assert result.state == ReleaseState.DRAFT_READY
+        assert result.source == "release-metadata.yaml"
 
     def test_snapshot_active_when_snapshot_no_draft(
         self, state_manager, mock_github_client
     ):
         """Snapshot branch exists, no draft release → SNAPSHOT_ACTIVE state."""
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
         mock_github_client.tag_exists.return_value = False
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
         ]
         mock_github_client.draft_release_exists.return_value = False
 
-        state = state_manager.derive_state("r4.1")
+        result = state_manager.derive_state()
 
-        assert state == ReleaseState.SNAPSHOT_ACTIVE
+        assert result.success
+        assert result.state == ReleaseState.SNAPSHOT_ACTIVE
+        assert result.snapshot_branch == "release-snapshot/r4.1-abc1234"
 
     @patch("release_automation.scripts.state_manager.time.sleep")
     def test_draft_ready_retries_when_enabled(
         self, mock_sleep, state_manager, mock_github_client
     ):
         """Draft release detection retries before concluding DRAFT_READY."""
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
+        mock_github_client.tag_exists.return_value = False
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
         ]
         mock_github_client.draft_release_exists.side_effect = [False, False, True]
 
-        state = state_manager.derive_state("r4.1", retry_draft_release=True)
+        result = state_manager.derive_state(retry_draft_release=True)
 
-        assert state == ReleaseState.DRAFT_READY
+        assert result.success
+        assert result.state == ReleaseState.DRAFT_READY
         assert mock_github_client.draft_release_exists.call_count == 3
         assert mock_sleep.call_count == 2
 
@@ -104,32 +126,34 @@ def test_snapshot_active_when_retry_exhausted(
         self, mock_sleep, state_manager, mock_github_client
     ):
         """Retry exhaustion falls back to SNAPSHOT_ACTIVE."""
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
+        mock_github_client.tag_exists.return_value = False
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
         ]
         mock_github_client.draft_release_exists.side_effect = [False, False, False]
 
-        state = state_manager.derive_state("r4.1", retry_draft_release=True)
+        result = state_manager.derive_state(retry_draft_release=True)
 
-        assert state == ReleaseState.SNAPSHOT_ACTIVE
+        assert result.success
+        assert result.state == ReleaseState.SNAPSHOT_ACTIVE
         assert mock_github_client.draft_release_exists.call_count == 3
         assert mock_sleep.call_count == 2
 
     def test_planned_when_release_plan_defines_release(
         self, state_manager, mock_github_client
     ):
-        """release-plan.yaml with matching target → PLANNED state."""
+        """release-plan.yaml with valid release type → PLANNED state."""
         mock_github_client.tag_exists.return_value = False
         mock_github_client.list_branches.return_value = []
-        mock_github_client.get_file_content.return_value = """
-repository:
-  target_release_tag: r4.1
-  target_release_type: initial
-"""
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
 
-        state = state_manager.derive_state("r4.1")
+        result = state_manager.derive_state()
 
-        assert state == ReleaseState.PLANNED
+        assert result.success
+        assert result.state == ReleaseState.PLANNED
+        assert result.release_tag == "r4.1"
+        assert result.source == "release-plan.yaml"
 
     def test_not_planned_when_release_type_is_none(
         self, state_manager, mock_github_client
@@ -143,59 +167,47 @@ def test_not_planned_when_release_type_is_none(
   target_release_type: none
 """
 
-        state = state_manager.derive_state("r4.1")
+        result = state_manager.derive_state()
 
-        assert state == ReleaseState.NOT_PLANNED
+        assert result.success
+        assert result.state == ReleaseState.NOT_PLANNED
+        assert result.release_tag == "r4.1"
 
-    def test_not_planned_when_tag_mismatch(self, state_manager, mock_github_client):
-        """release-plan.yaml with different tag → NOT_PLANNED state."""
+    def test_includes_meta_release(self, state_manager, mock_github_client):
+        """meta_release from release-plan.yaml is included in result."""
         mock_github_client.tag_exists.return_value = False
         mock_github_client.list_branches.return_value = []
         mock_github_client.get_file_content.return_value = """
 repository:
-  target_release_tag: r5.0
+  target_release_tag: r4.1
   target_release_type: initial
+  meta_release: Sync26
 """
 
-        state = state_manager.derive_state("r4.1")
-
-        assert state == ReleaseState.NOT_PLANNED
-
-    def test_not_planned_when_no_release_plan(self, state_manager, mock_github_client):
-        """No release-plan.yaml → NOT_PLANNED state."""
-        mock_github_client.tag_exists.return_value = False
-        mock_github_client.list_branches.return_value = []
-        mock_github_client.get_file_content.return_value = None
+        result = state_manager.derive_state()
 
-        state = state_manager.derive_state("r4.1")
-
-        assert state == ReleaseState.NOT_PLANNED
-
-    def test_not_planned_when_malformed_yaml(self, state_manager, mock_github_client):
-        """Malformed release-plan.yaml → NOT_PLANNED state."""
-        mock_github_client.tag_exists.return_value = False
-        mock_github_client.list_branches.return_value = []
-        mock_github_client.get_file_content.return_value = "{{invalid yaml::"
-
-        state = state_manager.derive_state("r4.1")
-
-        assert state == ReleaseState.NOT_PLANNED
+        assert result.success
+        assert result.meta_release == "Sync26"
 
-    def test_not_planned_when_missing_repository_section(
+    def test_snapshot_uses_metadata_release_tag(
         self, state_manager, mock_github_client
     ):
-        """release-plan.yaml without repository section → NOT_PLANNED state."""
+        """Snapshot path reads release_tag from release-metadata.yaml."""
+        mock_github_client.get_file_content.side_effect = [
+            self.VALID_PLAN,  # release-plan.yaml (validation)
+            "repository:\n  release_tag: r4.1\n  release_type: pre-release-rc",
+        ]
         mock_github_client.tag_exists.return_value = False
-        mock_github_client.list_branches.return_value = []
-        mock_github_client.get_file_content.return_value = """
-apis:
-  - name: quality-on-demand
-    version: 1.0.0
-"""
+        mock_github_client.list_branches.return_value = [
+            Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
+        ]
+        mock_github_client.draft_release_exists.return_value = False
 
-        state = state_manager.derive_state("r4.1")
+        result = state_manager.derive_state()
 
-        assert state == ReleaseState.NOT_PLANNED
+        assert result.success
+        assert result.release_tag == "r4.1"
+        assert result.release_type == "pre-release-rc"
 
 
 class TestGetCurrentSnapshot:
@@ -295,60 +307,58 @@ def test_returns_current_snapshot_when_exists(
 class TestStateTransitions:
     """Integration tests for state transition scenarios."""
 
+    VALID_PLAN = """
+repository:
+  target_release_tag: r4.1
+  target_release_type: initial
+"""
+
     def test_full_lifecycle_happy_path(self, mock_github_client):
         """Test state transitions through the happy path."""
         manager = ReleaseStateManager(mock_github_client)
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
 
         # Initial state: PLANNED
-        mock_github_client.get_file_content.return_value = """
-repository:
-  target_release_tag: r4.1
-  target_release_type: initial
-"""
-        assert manager.derive_state("r4.1") == ReleaseState.PLANNED
+        assert manager.derive_state().state == ReleaseState.PLANNED
 
         # After /create-snapshot: SNAPSHOT_ACTIVE
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
         ]
-        assert manager.derive_state("r4.1") == ReleaseState.SNAPSHOT_ACTIVE
+        assert manager.derive_state().state == ReleaseState.SNAPSHOT_ACTIVE
 
         # After PR merge creates draft: DRAFT_READY
         mock_github_client.draft_release_exists.return_value = True
-        assert manager.derive_state("r4.1") == ReleaseState.DRAFT_READY
+        assert manager.derive_state().state == ReleaseState.DRAFT_READY
 
         # After release published: PUBLISHED
         mock_github_client.tag_exists.return_value = True
-        assert manager.derive_state("r4.1") == ReleaseState.PUBLISHED
+        assert manager.derive_state().state == ReleaseState.PUBLISHED
 
     def test_discard_and_retry_path(self, mock_github_client):
         """Test state transitions through discard and retry path."""
         manager = ReleaseStateManager(mock_github_client)
+        mock_github_client.get_file_content.return_value = self.VALID_PLAN
 
         # Start with SNAPSHOT_ACTIVE
-        mock_github_client.get_file_content.return_value = """
-repository:
-  target_release_tag: r4.1
-  target_release_type: initial
-"""
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-abc1234", sha="abc1234")
         ]
-        assert manager.derive_state("r4.1") == ReleaseState.SNAPSHOT_ACTIVE
+        assert manager.derive_state().state == ReleaseState.SNAPSHOT_ACTIVE
 
         # After /discard-snapshot: back to PLANNED
         mock_github_client.list_branches.return_value = []
-        assert manager.derive_state("r4.1") == ReleaseState.PLANNED
+        assert manager.derive_state().state == ReleaseState.PLANNED
 
         # New /create-snapshot: SNAPSHOT_ACTIVE again
         mock_github_client.list_branches.return_value = [
             Branch(name="release-snapshot/r4.1-def5678", sha="def5678")
         ]
-        assert manager.derive_state("r4.1") == ReleaseState.SNAPSHOT_ACTIVE
+        assert manager.derive_state().state == ReleaseState.SNAPSHOT_ACTIVE
 
 
-class TestGetCurrentReleaseInfoErrors:
-    """Tests for get_current_release_info() configuration error handling.
+class TestDeriveStateErrors:
+    """Tests for derive_state() configuration error handling.
 
     Configuration errors should return error results, not NOT_PLANNED state.
     """
@@ -357,7 +367,7 @@ def test_returns_error_when_file_missing(self, state_manager, mock_github_client
         """Missing release-plan.yaml returns config error, not NOT_PLANNED."""
         mock_github_client.get_file_content.return_value = None
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert not result.success
         assert result.config_error is not None
@@ -370,7 +380,7 @@ def test_returns_error_when_yaml_malformed(self, state_manager, mock_github_clie
         """Malformed YAML returns config error, not NOT_PLANNED."""
         mock_github_client.get_file_content.return_value = "{{invalid yaml:: missing"
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert not result.success
         assert result.config_error is not None
@@ -382,7 +392,7 @@ def test_returns_error_when_yaml_empty(self, state_manager, mock_github_client):
         """Empty YAML (null) returns config error."""
         mock_github_client.get_file_content.return_value = ""
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert not result.success
         assert result.config_error is not None
@@ -396,7 +406,7 @@ def test_returns_error_when_repository_section_missing(
 apis:
   - api_name: quality-on-demand
 """
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert not result.success
         assert result.config_error is not None
@@ -411,7 +421,7 @@ def test_returns_error_when_target_tag_missing(
 repository:
   target_release_type: initial
 """
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert not result.success
         assert result.config_error is not None
@@ -430,7 +440,7 @@ def test_returns_not_planned_for_intentional_none(
         mock_github_client.list_branches.return_value = []
         mock_github_client.tag_exists.return_value = False
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert result.success
         assert result.state == ReleaseState.NOT_PLANNED
@@ -448,7 +458,7 @@ def test_returns_not_planned_when_release_type_missing(
         mock_github_client.list_branches.return_value = []
         mock_github_client.tag_exists.return_value = False
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert result.success
         assert result.state == ReleaseState.NOT_PLANNED
@@ -466,7 +476,7 @@ def test_returns_planned_for_valid_config(
         mock_github_client.list_branches.return_value = []
         mock_github_client.tag_exists.return_value = False
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert result.success
         assert result.state == ReleaseState.PLANNED
@@ -477,7 +487,7 @@ def test_to_dict_on_error_result(self, state_manager, mock_github_client):
         """to_dict() returns proper structure for error results."""
         mock_github_client.get_file_content.return_value = None
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
         result_dict = result.to_dict()
 
         assert result_dict["release_tag"] is None
@@ -496,7 +506,7 @@ def test_to_dict_on_success_result(self, state_manager, mock_github_client):
         mock_github_client.tag_exists.return_value = False
         mock_github_client.search_issues.return_value = []
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
         result_dict = result.to_dict()
 
         assert result_dict["release_tag"] == "r4.1"
@@ -595,8 +605,8 @@ def test_handles_none_title(self, state_manager, mock_github_client):
         assert result is None
 
 
-class TestGetCurrentReleaseInfoWithIssue:
-    """Tests for get_current_release_info including release_issue_number."""
+class TestDeriveStateWithIssue:
+    """Tests for derive_state including release_issue_number."""
 
     def test_includes_issue_number_when_found(self, state_manager, mock_github_client):
         """Includes release issue number when issue exists."""
@@ -615,7 +625,7 @@ def test_includes_issue_number_when_found(self, state_manager, mock_github_clien
             }
         ]
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert result.success
         assert result.release_issue_number == 123
@@ -633,7 +643,7 @@ def test_issue_number_is_none_when_not_found(
         mock_github_client.tag_exists.return_value = False
         mock_github_client.search_issues.return_value = []
 
-        result = state_manager.get_current_release_info()
+        result = state_manager.derive_state()
 
         assert result.success
         assert result.release_issue_number is None
diff --git a/shared-actions/derive-release-state/action.yml b/shared-actions/derive-release-state/action.yml
index 2e36000e..f8d7dd8f 100644
--- a/shared-actions/derive-release-state/action.yml
+++ b/shared-actions/derive-release-state/action.yml
@@ -10,12 +10,6 @@ description: |
   - SNAPSHOT_ACTIVE/DRAFT_READY: from release-metadata.yaml on snapshot branch
   - PUBLISHED: from the git tag
 
-inputs:
-  release_tag:
-    description: "Release tag to check - DEPRECATED: leave empty to auto-derive from repository artifacts"
-    required: false
-    default: ""
-
 outputs:
   release_tag:
     description: "Release tag from authoritative source (release-plan.yaml or release-metadata.yaml)"
@@ -90,7 +84,6 @@ runs:
       env:
         GITHUB_TOKEN: ${{ github.token }}
         GITHUB_SERVER_URL: ${{ github.server_url }}
-        RELEASE_TAG_INPUT: ${{ inputs.release_tag }}
         REPO: ${{ github.repository }}
         SCRIPTS_PATH: ${{ github.action_path }}/../../release_automation/scripts
       run: |
@@ -112,14 +105,13 @@ runs:
         # Initialize clients
         repo = os.environ['REPO']
         token = os.environ.get('GITHUB_TOKEN')
-        release_tag_input = os.environ.get('RELEASE_TAG_INPUT', '').strip()
         server_url = os.environ.get('GITHUB_SERVER_URL', 'https://github.com')
 
         gh = GitHubClient(repo=repo, token=token)
         manager = ReleaseStateManager(github_client=gh)
 
-        # Get release info from repository artifacts (authoritative source)
-        release_info = manager.get_current_release_info()
+        # Derive state from repository artifacts (authoritative source)
+        release_info = manager.derive_state()
         output_file = os.environ['GITHUB_OUTPUT']
 
         # Handle configuration errors
@@ -172,11 +164,7 @@ runs:
         if snapshot and snapshot.src_commit_sha:
             src_commit_sha_short = snapshot.src_commit_sha[:7]
             
-        # Read meta_release from release-plan.yaml (raw cycle name, e.g., "Sync26")
-        meta_release = ""
-        plan = manager._read_release_plan()
-        if plan:
-            meta_release = plan.get("repository", {}).get("meta_release", "") or ""
+        meta_release = release_info.meta_release or ""
 
         # Write outputs to GITHUB_OUTPUT
         with open(output_file, 'a') as f:

From 9f6613d53c4e9faf20c159e053a5b8a314fd0480 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 11:51:13 +0200
Subject: [PATCH 065/157] fix(derive-state): read release plan for PLANNED
 state output context

The no-snapshot branch in action.yml referenced an undefined `plan`
variable after the consolidation of derive_state(). Re-read the plan
via _read_release_plan_with_validation() to provide API and dependency
data in workflow outputs for the PLANNED state.
---
 shared-actions/derive-release-state/action.yml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/shared-actions/derive-release-state/action.yml b/shared-actions/derive-release-state/action.yml
index f8d7dd8f..abca19e8 100644
--- a/shared-actions/derive-release-state/action.yml
+++ b/shared-actions/derive-release-state/action.yml
@@ -193,11 +193,12 @@ runs:
                 f.write(f"commonalities_release={snapshot.commonalities_release}\n")
                 f.write(f"identity_consent_management_release={snapshot.identity_consent_management_release}\n")
             else:
-                # No snapshot — use plan data (already read above) for PLANNED state context
+                # No snapshot — read plan data for PLANNED state context
                 plan_apis = []
                 plan_commonalities = ""
                 plan_icm = ""
                 plan_rtype = release_info.release_type or ""
+                plan, _ = manager._read_release_plan_with_validation()
                 if plan:
                     for api in plan.get("apis", []):
                         plan_apis.append({

From 2f96a0a433e97a38fa67c88c2d22b43f930aabf5 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 13:41:18 +0200
Subject: [PATCH 066/157] fix(validation): use file output for Spectral to
 avoid 64KB stdout truncation

Spectral (Node.js) can silently truncate piped stdout at 64KB due to a
known Node.js flushing issue, causing the adapter to report 0 findings
on large repos. Switch from capture_output to Spectral's --output flag
which writes JSON directly to a temp file, bypassing the pipe entirely.

Also adds tests for temp file cleanup and >64KB output handling.
---
 validation/engines/spectral_adapter.py    | 106 +++++++++++++---------
 validation/tests/test_spectral_adapter.py |  94 +++++++++++++++----
 2 files changed, 142 insertions(+), 58 deletions(-)

diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index fe46969e..e1af2023 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -13,7 +13,9 @@
 
 import json
 import logging
+import os
 import subprocess
+import tempfile
 from dataclasses import dataclass
 from pathlib import Path, PurePosixPath
 from typing import List, Optional
@@ -292,10 +294,13 @@ def run_spectral(
 ) -> SpectralResult:
     """Invoke Spectral CLI and capture structured output.
 
-    Uses ``--format json`` for machine-readable output.  The default
-    ``--fail-severity error`` means exit 0 for warnings-only and exit 1
-    when errors are present — both are normal operation with valid JSON
-    on stdout.
+    Uses ``--format json`` for machine-readable output.  Output is written
+    to a temporary file via Spectral's ``--output`` flag to avoid Node.js
+    stdout pipe truncation on large result sets (>64 KB).
+
+    The default ``--fail-severity error`` means exit 0 for warnings-only
+    and exit 1 when errors are present — both are normal operation with
+    valid JSON in the output file.
 
     Args:
         ruleset_path: Path to the Spectral ruleset file.
@@ -307,48 +312,67 @@ def run_spectral(
     Returns:
         :class:`SpectralResult` with parsed findings and status.
     """
-    cmd = [
-        "spectral",
-        "lint",
-        "--format", "json",
-        "--quiet",
-        "--ruleset", str(ruleset_path),
-        *spec_patterns,
-    ]
-
+    # Create a temp file for Spectral JSON output.  Placed in cwd to stay
+    # on the same filesystem.  delete=False so we control cleanup.
+    fd, output_path = tempfile.mkstemp(suffix=".json", dir=str(cwd))
+    output_file = Path(output_path)
     try:
-        result = subprocess.run(
-            cmd,
-            capture_output=True,
-            text=True,
-            cwd=str(cwd),
-            timeout=300,
-        )
-    except FileNotFoundError:
-        return SpectralResult(
-            findings=[],
-            success=False,
-            error_message="Spectral CLI not found — is @stoplight/spectral-cli installed?",
-        )
-    except subprocess.TimeoutExpired:
+        # Close the fd immediately — Spectral will open the file by name.
+        os.close(fd)
+
+        cmd = [
+            "spectral",
+            "lint",
+            "--format", "json",
+            "--quiet",
+            "--output", str(output_file),
+            "--ruleset", str(ruleset_path),
+            *spec_patterns,
+        ]
+
+        try:
+            result = subprocess.run(
+                cmd,
+                capture_output=True,
+                text=True,
+                cwd=str(cwd),
+                timeout=300,
+            )
+        except FileNotFoundError:
+            return SpectralResult(
+                findings=[],
+                success=False,
+                error_message="Spectral CLI not found — is @stoplight/spectral-cli installed?",
+            )
+        except subprocess.TimeoutExpired:
+            return SpectralResult(
+                findings=[],
+                success=False,
+                error_message="Spectral timed out after 300 seconds",
+            )
+
+        # Exit 0 or 1: normal operation (findings may or may not exist).
+        if result.returncode in (0, 1):
+            if output_file.exists() and output_file.stat().st_size > 0:
+                json_text = output_file.read_text(encoding="utf-8")
+            else:
+                logger.warning(
+                    "Spectral output file is empty or missing (exit %d)",
+                    result.returncode,
+                )
+                json_text = ""
+            findings = parse_spectral_output(json_text, repo_root=str(cwd))
+            return SpectralResult(findings=findings, success=True)
+
+        # Exit 2+: Spectral runtime error.
+        stderr = result.stderr.strip() if result.stderr else "unknown error"
         return SpectralResult(
             findings=[],
             success=False,
-            error_message="Spectral timed out after 300 seconds",
+            error_message=f"Spectral exited with code {result.returncode}: {stderr}",
         )
-
-    # Exit 0 or 1: normal operation (findings may or may not exist).
-    if result.returncode in (0, 1):
-        findings = parse_spectral_output(result.stdout, repo_root=str(cwd))
-        return SpectralResult(findings=findings, success=True)
-
-    # Exit 2+: Spectral runtime error.
-    stderr = result.stderr.strip() if result.stderr else "unknown error"
-    return SpectralResult(
-        findings=[],
-        success=False,
-        error_message=f"Spectral exited with code {result.returncode}: {stderr}",
-    )
+    finally:
+        output_file.unlink(missing_ok=True)
 
 
 def _make_error_finding(message: str) -> dict:
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index e18c6656..b826f6b0 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -401,12 +401,30 @@ def test_external_file_findings_downgraded_to_hint(self):
 # ---------------------------------------------------------------------------
 
 
+def _spectral_side_effect(
+    json_content: str,
+    returncode: int = 0,
+    stderr: str = "",
+):
+    """Create a subprocess.run side_effect that writes JSON to the --output file.
+
+    Simulates Spectral's behaviour: it writes results to the file specified
+    by ``--output`` and exits with the given return code.
+    """
+    def side_effect(cmd, **kwargs):
+        output_idx = cmd.index("--output")
+        output_path = Path(cmd[output_idx + 1])
+        output_path.write_text(json_content, encoding="utf-8")
+        return subprocess.CompletedProcess(
+            args=cmd, returncode=returncode, stdout="", stderr=stderr,
+        )
+    return side_effect
+
+
 class TestRunSpectral:
     @patch("validation.engines.spectral_adapter.subprocess.run")
     def test_exit_0_no_findings(self, mock_run, tmp_path):
-        mock_run.return_value = subprocess.CompletedProcess(
-            args=[], returncode=0, stdout="[]", stderr="",
-        )
+        mock_run.side_effect = _spectral_side_effect("[]", returncode=0)
         result = run_spectral(
             tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path,
         )
@@ -416,11 +434,8 @@ def test_exit_0_no_findings(self, mock_run, tmp_path):
 
     @patch("validation.engines.spectral_adapter.subprocess.run")
     def test_exit_1_with_findings(self, mock_run, tmp_path):
-        mock_run.return_value = subprocess.CompletedProcess(
-            args=[],
-            returncode=1,
-            stdout=json.dumps([SAMPLE_SPECTRAL_FINDING]),
-            stderr="",
+        mock_run.side_effect = _spectral_side_effect(
+            json.dumps([SAMPLE_SPECTRAL_FINDING]), returncode=1,
         )
         result = run_spectral(
             tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path,
@@ -431,8 +446,8 @@ def test_exit_1_with_findings(self, mock_run, tmp_path):
 
     @patch("validation.engines.spectral_adapter.subprocess.run")
     def test_exit_2_runtime_error(self, mock_run, tmp_path):
-        mock_run.return_value = subprocess.CompletedProcess(
-            args=[], returncode=2, stdout="", stderr="Error: invalid ruleset",
+        mock_run.side_effect = _spectral_side_effect(
+            "", returncode=2, stderr="Error: invalid ruleset",
         )
         result = run_spectral(
             tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path,
@@ -465,9 +480,8 @@ def test_findings_paths_normalised_by_cwd(self, mock_run, tmp_path):
             **SAMPLE_SPECTRAL_FINDING,
             "source": f"{tmp_path}/code/API_definitions/quality-on-demand.yaml",
         }
-        mock_run.return_value = subprocess.CompletedProcess(
-            args=[], returncode=1,
-            stdout=json.dumps([abs_finding]), stderr="",
+        mock_run.side_effect = _spectral_side_effect(
+            json.dumps([abs_finding]), returncode=1,
         )
         result = run_spectral(
             tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path,
@@ -476,20 +490,66 @@ def test_findings_paths_normalised_by_cwd(self, mock_run, tmp_path):
         assert result.findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
 
     @patch("validation.engines.spectral_adapter.subprocess.run")
-    def test_command_includes_ruleset_and_patterns(self, mock_run, tmp_path):
-        mock_run.return_value = subprocess.CompletedProcess(
-            args=[], returncode=0, stdout="[]", stderr="",
-        )
+    def test_command_includes_output_flag_and_patterns(self, mock_run, tmp_path):
+        mock_run.side_effect = _spectral_side_effect("[]", returncode=0)
         ruleset = tmp_path / ".spectral-r4.yaml"
         run_spectral(ruleset, ["code/API_definitions/*.yaml"], cwd=tmp_path)
         call_args = mock_run.call_args
         cmd = call_args[0][0]
         assert "--ruleset" in cmd
         assert "--quiet" in cmd
+        assert "--output" in cmd
         assert str(ruleset) in cmd
         assert "code/API_definitions/*.yaml" in cmd
         assert call_args[1]["cwd"] == str(tmp_path)
 
+    @patch("validation.engines.spectral_adapter.subprocess.run")
+    def test_temp_file_cleaned_up_on_success(self, mock_run, tmp_path):
+        """Temp output file is removed after successful invocation."""
+        mock_run.side_effect = _spectral_side_effect("[]", returncode=0)
+        run_spectral(tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path)
+        # No leftover .json files in the working directory.
+        remaining = list(tmp_path.glob("*.json"))
+        assert remaining == []
+
+    @patch("validation.engines.spectral_adapter.subprocess.run")
+    def test_temp_file_cleaned_up_on_error(self, mock_run, tmp_path):
+        """Temp output file is removed even when Spectral fails."""
+        mock_run.side_effect = _spectral_side_effect(
+            "", returncode=2, stderr="boom",
+        )
+        run_spectral(tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path)
+        remaining = list(tmp_path.glob("*.json"))
+        assert remaining == []
+
+    @patch("validation.engines.spectral_adapter.subprocess.run")
+    def test_temp_file_cleaned_up_on_timeout(self, mock_run, tmp_path):
+        """Temp output file is removed when Spectral times out."""
+        mock_run.side_effect = subprocess.TimeoutExpired(cmd="spectral", timeout=300)
+        run_spectral(tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path)
+        remaining = list(tmp_path.glob("*.json"))
+        assert remaining == []
+
+    @patch("validation.engines.spectral_adapter.subprocess.run")
+    def test_large_output_over_64kb(self, mock_run, tmp_path):
+        """Output larger than 64 KB is correctly read from file (the original bug)."""
+        # Generate >64 KB of JSON findings.
+        findings_data = []
+        for i in range(200):
+            findings_data.append({
+                **SAMPLE_SPECTRAL_FINDING,
+                "message": f"Finding {i}: {'x' * 300}",
+            })
+        large_json = json.dumps(findings_data)
+        assert len(large_json) > 65536, "Test data must exceed 64 KB"
+
+        mock_run.side_effect = _spectral_side_effect(large_json, returncode=1)
+        result = run_spectral(
+            tmp_path / ".spectral.yaml", ["*.yaml"], cwd=tmp_path,
+        )
+        assert result.success is True
+        assert len(result.findings) == 200
+
 
 # ---------------------------------------------------------------------------
 # TestRunSpectralEngine

From c100d7e84cc6659bfaffeb7383c5a6a8ecd1ea3d Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 13:41:31 +0200
Subject: [PATCH 067/157] fix(workflows): migrate app-id to client-id for
 create-github-app-token v3

Rename deprecated app-id parameter to client-id across all 12 call sites
in release-automation-reusable.yml and validation.yml. Also rename org
variables from RELEASE_APP_ID/VALIDATION_APP_ID to
RELEASE_APP_CLIENT_ID/VALIDATION_APP_CLIENT_ID to reflect the new
semantics (Client ID != App ID).

Requires org variable updates: set RELEASE_APP_CLIENT_ID and
VALIDATION_APP_CLIENT_ID to the respective GitHub App Client IDs.
---
 .../workflows/release-automation-reusable.yml | 44 +++++++++----------
 .github/workflows/validation.yml              |  4 +-
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 82b92b84..ba91325c 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -422,10 +422,10 @@ jobs:
 
       - name: Generate App Token
         id: app-token
-        if: steps.decide.outputs.action == 'post_comment' && vars.RELEASE_APP_ID != ''
+        if: steps.decide.outputs.action == 'post_comment' && vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Checkout tooling (for slash command)
@@ -798,10 +798,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Checkout tooling
@@ -922,10 +922,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Discard Snapshot
@@ -1032,10 +1032,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Delete Draft Release
@@ -1160,10 +1160,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Checkout tooling
@@ -1239,10 +1239,10 @@ jobs:
 
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Execute Publish Flow
@@ -1377,10 +1377,10 @@ jobs:
 
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Copy README release info from tag
@@ -1596,10 +1596,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Extract CHANGELOG release notes
@@ -1837,10 +1837,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Checkout tooling
@@ -1946,10 +1946,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Checkout tooling
@@ -2117,10 +2117,10 @@ jobs:
     steps:
       - name: Generate App Token
         id: app-token
-        if: vars.RELEASE_APP_ID != ''
+        if: vars.RELEASE_APP_CLIENT_ID != ''
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.RELEASE_APP_ID }}
+          client-id: ${{ vars.RELEASE_APP_CLIENT_ID }}
           private-key: ${{ secrets.RELEASE_APP_PRIVATE_KEY }}
 
       - name: Checkout tooling
diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index d1d3f2d1..878921d3 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -182,11 +182,11 @@ jobs:
         if: >-
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
-          && vars.VALIDATION_APP_ID != ''
+          && vars.VALIDATION_APP_CLIENT_ID != ''
         continue-on-error: true
         uses: actions/create-github-app-token@v3
         with:
-          app-id: ${{ vars.VALIDATION_APP_ID }}
+          client-id: ${{ vars.VALIDATION_APP_CLIENT_ID }}
           private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
 
       # ── Step 9: Resolve write access ───────────────────────────

From 88b877ff8067de50158df870b63302b2d7dbfad9 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 10 Apr 2026 22:52:41 +0200
Subject: [PATCH 068/157] feat(validation): P-015 warn on implicit-subscription
 pattern
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add postfilter override so P-015 (check-event-type-format) stays error
on explicit-subscription APIs and downgrades to warn on implicit-
subscription APIs. The current detector only finds event-type enums
inside schemas whose name contains "eventtype". r4.1-era implicit-
subscription APIs typically inline the enum at
CloudEvent.properties.type.enum with no named EventType schema, which
the detector cannot see. The r4.2 migration path replaces inline
CloudEvent with a $ref to CAMARA_event_common.yaml plus a named
ApiEventType schema, at which point the rule detects correctly.

Adds a hint pointing to the implicit-subscription API template tracked
in camaraproject/Commonalities#608.

Metadata-only change — no detector code change. api_pattern is already
a supported condition field in the postfilter. Bumps the expected hint
count from 10 to 11 and adds a targeted regression test for the P-015
override shape.
---
 validation/rules/python-rules.yaml            | 18 +++++++++++++
 .../tests/test_rule_metadata_integrity.py     | 25 +++++++++++++++++--
 2 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 963f160c..7196f0d7 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -131,6 +131,14 @@
 
 # P-015: check-event-type-format (DG-086)
 # Event types must follow org.camaraproject....
+#
+# Level is conditional on api_pattern:
+#   - explicit-subscription: error (named EventType schema expected today)
+#   - implicit-subscription: warn (r4.1-era specs often inline the enum at
+#     CloudEvent.properties.type.enum, which the detector cannot see; the
+#     r4.2 migration path replaces inline CloudEvent with a $ref to
+#     CAMARA_event_common.yaml and a named ApiEventType schema, at which
+#     point this rule detects the event type correctly).
 - id: P-015
   engine: python
   engine_rule: check-event-type-format
@@ -138,6 +146,16 @@
     api_pattern: [explicit-subscription, implicit-subscription]
   conditional_level:
     default: error
+    overrides:
+      - condition:
+          api_pattern: [implicit-subscription]
+        level: warn
+  hint: >-
+    Define a named event type schema (e.g. ApiEventType) that constrains
+    the CloudEvent `type` value via allOf, rather than inlining the enum
+    directly in CloudEvent.properties.type.enum. See the implicit-events
+    API template in Commonalities artifacts/api-templates/ (tracked in
+    camaraproject/Commonalities#608).
 
 # P-016: check-sinkcredential-not-in-response (DG-092)
 # sinkCredential must not appear in subscription 2xx response schemas.
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index 55008ac4..019e2c60 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -306,11 +306,32 @@ def test_hints_are_exception_not_norm(self, all_rules):
         """
         with_hints = [r.id for r in all_rules if r.hint is not None]
         with_overrides = [r.id for r in all_rules if r.message_override is not None]
-        assert len(with_hints) == 10, (
-            f"Expected 10 explicit hints (update test if adding hints): "
+        assert len(with_hints) == 11, (
+            f"Expected 11 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
         assert len(with_overrides) == 0, (
             f"Expected 0 message overrides (update test if adding overrides): "
             f"{with_overrides}"
         )
+
+    def test_p015_conditional_on_api_pattern(self, rule_index):
+        """P-015 stays error on explicit-subscription, warn on implicit.
+
+        Implicit-subscription APIs using the r4.1-era inline CloudEvent
+        pattern (enum at CloudEvent.properties.type.enum) cannot be
+        detected by the check, so the rule downgrades to warn until the
+        r4.2 migration to $ref + named ApiEventType schema is complete.
+        """
+        rule = rule_index[("python", "check-event-type-format")]
+        assert rule.id == "P-015"
+        assert rule.conditional_level is not None
+        assert rule.conditional_level.default == "error"
+        overrides = rule.conditional_level.overrides
+        assert len(overrides) == 1
+        assert overrides[0].condition == {
+            "api_pattern": ["implicit-subscription"],
+        }
+        assert overrides[0].level == "warn"
+        assert rule.hint is not None
+        assert "Commonalities#608" in rule.hint

From 65a0fece6830315d717f23c9a2493b492f2a05fd Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 16:10:10 +0200
Subject: [PATCH 069/157] fix(validation): drop all sourceless Spectral phantom
 findings

Broaden the phantom finding filter to drop any Spectral finding with an
empty source path, not just owasp:api4:2023-string-restricted at line
0:0. Spectral's $ref resolution can produce findings on internally
resolved copies with non-zero line numbers and from any rule, all
lacking a source file path. These duplicate real findings on the actual
source files.

Observed on ReleaseTest PR #77: 3 S-313 findings with empty path and
lines 233/321/359 from resolved $ref copies passed the old filter.
---
 validation/engines/spectral_adapter.py    | 26 +++++++++++------------
 validation/tests/test_spectral_adapter.py | 25 +++++++++++++++++-----
 2 files changed, 32 insertions(+), 19 deletions(-)

diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index e1af2023..11958ae0 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -253,20 +253,18 @@ def parse_spectral_output(
     findings = []
     for item in data:
         try:
-            # The OWASP string-restricted rule uses a deep recursive JSONPath
-            # that can traverse Spectral's internally-resolved $ref copies,
-            # producing phantom findings with no source file and range 0:0.
-            # Drop these — they duplicate real findings on the actual source.
-            if (
-                item.get("code") == "owasp:api4:2023-string-restricted"
-                and not item.get("source")
-            ):
-                start = item.get("range", {}).get("start", {})
-                if start.get("line", 0) == 0 and start.get("character", 0) == 0:
-                    logger.debug(
-                        "Dropping phantom string-restricted finding (resolved $ref)"
-                    )
-                    continue
+            # Spectral's $ref resolution can produce phantom findings with
+            # no source file — the rule fires on internally-resolved copies
+            # rather than actual source files.  These duplicate real findings
+            # that have proper source paths.  Drop any finding without a
+            # source, regardless of line number.
+            if not item.get("source"):
+                logger.debug(
+                    "Dropping phantom finding without source file: %s line %s",
+                    item.get("code", "?"),
+                    item.get("range", {}).get("start", {}).get("line", "?"),
+                )
+                continue
             findings.append(normalize_finding(item, repo_root=repo_root))
         except (KeyError, TypeError) as exc:
             logger.warning("Skipping malformed Spectral finding: %s", exc)
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index b826f6b0..0d975c05 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -351,8 +351,8 @@ def test_repo_root_normalises_paths(self):
         findings = parse_spectral_output(raw, repo_root="/runner/work")
         assert findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
 
-    def test_string_restricted_phantom_dropped(self):
-        """Phantom string-restricted findings (no source, range 0:0) are dropped."""
+    def test_sourceless_phantom_dropped(self):
+        """Phantom findings without a source file are dropped regardless of rule."""
         phantom = {
             "code": "owasp:api4:2023-string-restricted",
             "message": "Schema of type string should specify a format.",
@@ -367,8 +367,23 @@ def test_string_restricted_phantom_dropped(self):
         assert len(findings) == 1
         assert findings[0]["engine_rule"] == "camara-parameter-casing-convention"
 
-    def test_other_rule_sourceless_not_dropped(self):
-        """Sourceless findings from other rules are kept (only string-restricted filtered)."""
+    def test_sourceless_nonzero_line_also_dropped(self):
+        """Sourceless findings with non-zero lines are still dropped (resolved $ref copies)."""
+        phantom = {
+            "code": "owasp:api4:2023-string-restricted",
+            "message": "Schema of type string should specify a format.",
+            "severity": 1,
+            "source": "",
+            "path": ["components", "schemas", "Foo", "properties", "bar"],
+            "range": {"start": {"line": 233, "character": 14},
+                      "end": {"line": 233, "character": 40}},
+        }
+        raw = json.dumps([phantom])
+        findings = parse_spectral_output(raw)
+        assert len(findings) == 0
+
+    def test_sourceless_other_rule_also_dropped(self):
+        """Sourceless findings from any rule are dropped — not just string-restricted."""
         other = {
             "code": "owasp:api4:2023-string-limit",
             "message": "Schema of type string must specify maxLength.",
@@ -380,7 +395,7 @@ def test_other_rule_sourceless_not_dropped(self):
         }
         raw = json.dumps([other])
         findings = parse_spectral_output(raw)
-        assert len(findings) == 1
+        assert len(findings) == 0
 
     def test_external_file_findings_downgraded_to_hint(self):
         """Findings from common schemas (followed via $ref) become hints."""

From 55d22d62f14b07cae44c1c69e33e4c60e9f4598a Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 16:37:13 +0200
Subject: [PATCH 070/157] Revert "fix(validation): drop all sourceless Spectral
 phantom findings"

This reverts commit 65a0fece6830315d717f23c9a2493b492f2a05fd.
---
 validation/engines/spectral_adapter.py    | 26 ++++++++++++-----------
 validation/tests/test_spectral_adapter.py | 25 +++++-----------------
 2 files changed, 19 insertions(+), 32 deletions(-)

diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index 11958ae0..e1af2023 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -253,18 +253,20 @@ def parse_spectral_output(
     findings = []
     for item in data:
         try:
-            # Spectral's $ref resolution can produce phantom findings with
-            # no source file — the rule fires on internally-resolved copies
-            # rather than actual source files.  These duplicate real findings
-            # that have proper source paths.  Drop any finding without a
-            # source, regardless of line number.
-            if not item.get("source"):
-                logger.debug(
-                    "Dropping phantom finding without source file: %s line %s",
-                    item.get("code", "?"),
-                    item.get("range", {}).get("start", {}).get("line", "?"),
-                )
-                continue
+            # The OWASP string-restricted rule uses a deep recursive JSONPath
+            # that can traverse Spectral's internally-resolved $ref copies,
+            # producing phantom findings with no source file and range 0:0.
+            # Drop these — they duplicate real findings on the actual source.
+            if (
+                item.get("code") == "owasp:api4:2023-string-restricted"
+                and not item.get("source")
+            ):
+                start = item.get("range", {}).get("start", {})
+                if start.get("line", 0) == 0 and start.get("character", 0) == 0:
+                    logger.debug(
+                        "Dropping phantom string-restricted finding (resolved $ref)"
+                    )
+                    continue
             findings.append(normalize_finding(item, repo_root=repo_root))
         except (KeyError, TypeError) as exc:
             logger.warning("Skipping malformed Spectral finding: %s", exc)
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index 0d975c05..b826f6b0 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -351,8 +351,8 @@ def test_repo_root_normalises_paths(self):
         findings = parse_spectral_output(raw, repo_root="/runner/work")
         assert findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
 
-    def test_sourceless_phantom_dropped(self):
-        """Phantom findings without a source file are dropped regardless of rule."""
+    def test_string_restricted_phantom_dropped(self):
+        """Phantom string-restricted findings (no source, range 0:0) are dropped."""
         phantom = {
             "code": "owasp:api4:2023-string-restricted",
             "message": "Schema of type string should specify a format.",
@@ -367,23 +367,8 @@ def test_sourceless_phantom_dropped(self):
         assert len(findings) == 1
         assert findings[0]["engine_rule"] == "camara-parameter-casing-convention"
 
-    def test_sourceless_nonzero_line_also_dropped(self):
-        """Sourceless findings with non-zero lines are still dropped (resolved $ref copies)."""
-        phantom = {
-            "code": "owasp:api4:2023-string-restricted",
-            "message": "Schema of type string should specify a format.",
-            "severity": 1,
-            "source": "",
-            "path": ["components", "schemas", "Foo", "properties", "bar"],
-            "range": {"start": {"line": 233, "character": 14},
-                      "end": {"line": 233, "character": 40}},
-        }
-        raw = json.dumps([phantom])
-        findings = parse_spectral_output(raw)
-        assert len(findings) == 0
-
-    def test_sourceless_other_rule_also_dropped(self):
-        """Sourceless findings from any rule are dropped — not just string-restricted."""
+    def test_other_rule_sourceless_not_dropped(self):
+        """Sourceless findings from other rules are kept (only string-restricted filtered)."""
         other = {
             "code": "owasp:api4:2023-string-limit",
             "message": "Schema of type string must specify maxLength.",
@@ -395,7 +380,7 @@ def test_sourceless_other_rule_also_dropped(self):
         }
         raw = json.dumps([other])
         findings = parse_spectral_output(raw)
-        assert len(findings) == 0
+        assert len(findings) == 1
 
     def test_external_file_findings_downgraded_to_hint(self):
         """Findings from common schemas (followed via $ref) become hints."""

From 894f4940349c8f04fd99036279758a12dbae6c6f Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 12 Apr 2026 19:30:27 +0200
Subject: [PATCH 071/157] fix(validation): invoke Spectral per-file to fix $ref
 source attribution

Spectral's DocumentInventory cache causes source attribution loss when
multiple input files share external $ref targets (spectral#2640). Switch
from a single glob invocation to per-file invocation so each file gets
a fresh document cache.

Also adds cross-file deduplication for findings from shared schemas
(e.g. code/common/) that appear once per invocation, and removes the
now-unnecessary phantom filter for sourceless string-restricted findings.
---
 validation/engines/spectral_adapter.py    |  96 ++++++---
 validation/tests/test_spectral_adapter.py | 225 +++++++++++++++++-----
 2 files changed, 243 insertions(+), 78 deletions(-)

diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index e1af2023..dfa86d8f 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -11,6 +11,7 @@
 
 from __future__ import annotations
 
+import glob as glob_mod
 import json
 import logging
 import os
@@ -253,20 +254,6 @@ def parse_spectral_output(
     findings = []
     for item in data:
         try:
-            # The OWASP string-restricted rule uses a deep recursive JSONPath
-            # that can traverse Spectral's internally-resolved $ref copies,
-            # producing phantom findings with no source file and range 0:0.
-            # Drop these — they duplicate real findings on the actual source.
-            if (
-                item.get("code") == "owasp:api4:2023-string-restricted"
-                and not item.get("source")
-            ):
-                start = item.get("range", {}).get("start", {})
-                if start.get("line", 0) == 0 and start.get("character", 0) == 0:
-                    logger.debug(
-                        "Dropping phantom string-restricted finding (resolved $ref)"
-                    )
-                    continue
             findings.append(normalize_finding(item, repo_root=repo_root))
         except (KeyError, TypeError) as exc:
             logger.warning("Skipping malformed Spectral finding: %s", exc)
@@ -388,6 +375,38 @@ def _make_error_finding(message: str) -> dict:
     }
 
 
+def _resolve_spec_files(patterns: List[str], cwd: Path) -> List[str]:
+    """Resolve glob patterns to individual file paths (relative to *cwd*).
+
+    Returns a sorted, deduplicated list of relative POSIX-style paths.
+    """
+    files: List[str] = []
+    for pattern in patterns:
+        matched = sorted(glob_mod.glob(str(cwd / pattern)))
+        for abspath in matched:
+            rel = str(PurePosixPath(Path(abspath).relative_to(cwd)))
+            if rel not in files:
+                files.append(rel)
+    return files
+
+
+def _deduplicate_findings(findings: List[dict]) -> List[dict]:
+    """Drop duplicate findings from per-file Spectral runs.
+
+    When the same external schema is resolved independently by multiple
+    input files, identical findings appear once per invocation.  Keep
+    only the first occurrence based on ``(path, line, engine_rule)``.
+    """
+    seen: set[tuple] = set()
+    result: List[dict] = []
+    for f in findings:
+        key = (f.get("path", ""), f.get("line", 0), f.get("engine_rule", ""))
+        if key not in seen:
+            seen.add(key)
+            result.append(f)
+    return result
+
+
 def run_spectral_engine(
     repo_path: Path,
     config_dir: Path,
@@ -396,10 +415,19 @@ def run_spectral_engine(
 ) -> List[dict]:
     """Top-level entry point for the orchestrator.
 
-    Selects the appropriate ruleset, invokes Spectral, and returns a list
-    of findings conforming to the common findings model.  On adapter-level
-    errors (Spectral not installed, runtime error) a single error finding
-    is returned instead of raising.
+    Selects the appropriate ruleset, invokes Spectral **per file**, and
+    returns a deduplicated list of findings conforming to the common
+    findings model.
+
+    Per-file invocation works around a Spectral document-inventory caching
+    bug (`stoplightio/spectral#2640
+    `_) that causes
+    source attribution loss when multiple input files share external
+    ``$ref`` targets.
+
+    On adapter-level errors (Spectral not installed, runtime error) an
+    error finding is emitted for the affected file and processing
+    continues with the remaining files.
 
     Args:
         repo_path: Root of the repository being validated.
@@ -417,11 +445,29 @@ def run_spectral_engine(
     ruleset = select_ruleset_path(commonalities_release, config_dir)
     logger.info("Using Spectral ruleset: %s", ruleset)
 
-    result = run_spectral(ruleset, spec_patterns, cwd=repo_path)
-
-    if not result.success:
-        logger.error("Spectral engine error: %s", result.error_message)
-        return [_make_error_finding(result.error_message)]
+    spec_files = _resolve_spec_files(spec_patterns, repo_path)
+    if not spec_files:
+        logger.warning("No spec files matched patterns: %s", spec_patterns)
+        return []
 
-    logger.info("Spectral produced %d finding(s)", len(result.findings))
-    return result.findings
+    all_findings: List[dict] = []
+    for spec_file in spec_files:
+        result = run_spectral(ruleset, [spec_file], cwd=repo_path)
+        if not result.success:
+            logger.error("Spectral error on %s: %s", spec_file, result.error_message)
+            all_findings.append(_make_error_finding(
+                f"{result.error_message} ({spec_file})"
+            ))
+            continue
+        logger.info("Spectral: %s — %d finding(s)", spec_file, len(result.findings))
+        all_findings.extend(result.findings)
+
+    deduped = _deduplicate_findings(all_findings)
+    if len(deduped) < len(all_findings):
+        logger.info(
+            "Spectral dedup: %d → %d finding(s) (dropped %d cross-file duplicates)",
+            len(all_findings), len(deduped), len(all_findings) - len(deduped),
+        )
+    logger.info("Spectral produced %d finding(s) across %d file(s)",
+                len(deduped), len(spec_files))
+    return deduped
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index b826f6b0..81d60bda 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -13,7 +13,9 @@
     DEFAULT_RULESET,
     ENGINE_NAME,
     SpectralResult,
+    _deduplicate_findings,
     _normalize_path,
+    _resolve_spec_files,
     derive_api_name,
     map_severity,
     normalize_finding,
@@ -351,9 +353,10 @@ def test_repo_root_normalises_paths(self):
         findings = parse_spectral_output(raw, repo_root="/runner/work")
         assert findings[0]["path"] == "code/API_definitions/quality-on-demand.yaml"
 
-    def test_string_restricted_phantom_dropped(self):
-        """Phantom string-restricted findings (no source, range 0:0) are dropped."""
-        phantom = {
+    def test_sourceless_findings_pass_through(self):
+        """Sourceless findings are not filtered — per-file invocation
+        avoids the shared-cache bug that caused them (spectral#2640)."""
+        sourceless = {
             "code": "owasp:api4:2023-string-restricted",
             "message": "Schema of type string should specify a format.",
             "severity": 1,
@@ -362,25 +365,9 @@ def test_string_restricted_phantom_dropped(self):
             "range": {"start": {"line": 0, "character": 0},
                       "end": {"line": 0, "character": 0}},
         }
-        raw = json.dumps([SAMPLE_SPECTRAL_FINDING, phantom])
+        raw = json.dumps([SAMPLE_SPECTRAL_FINDING, sourceless])
         findings = parse_spectral_output(raw)
-        assert len(findings) == 1
-        assert findings[0]["engine_rule"] == "camara-parameter-casing-convention"
-
-    def test_other_rule_sourceless_not_dropped(self):
-        """Sourceless findings from other rules are kept (only string-restricted filtered)."""
-        other = {
-            "code": "owasp:api4:2023-string-limit",
-            "message": "Schema of type string must specify maxLength.",
-            "severity": 1,
-            "source": "",
-            "path": ["components", "schemas", "Foo", "properties", "bar"],
-            "range": {"start": {"line": 0, "character": 0},
-                      "end": {"line": 0, "character": 0}},
-        }
-        raw = json.dumps([other])
-        findings = parse_spectral_output(raw)
-        assert len(findings) == 1
+        assert len(findings) == 2
 
     def test_external_file_findings_downgraded_to_hint(self):
         """Findings from common schemas (followed via $ref) become hints."""
@@ -556,56 +543,188 @@ def test_large_output_over_64kb(self, mock_run, tmp_path):
 # ---------------------------------------------------------------------------
 
 
+class TestResolveSpecFiles:
+    def test_glob_resolves_to_individual_files(self, tmp_path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "alpha.yaml").touch()
+        (api_dir / "beta.yaml").touch()
+
+        files = _resolve_spec_files(["code/API_definitions/*.yaml"], tmp_path)
+        assert files == [
+            "code/API_definitions/alpha.yaml",
+            "code/API_definitions/beta.yaml",
+        ]
+
+    def test_no_matches_returns_empty(self, tmp_path):
+        assert _resolve_spec_files(["nonexistent/*.yaml"], tmp_path) == []
+
+    def test_deduplicates_overlapping_patterns(self, tmp_path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        (api_dir / "api.yaml").touch()
+
+        files = _resolve_spec_files(
+            ["code/API_definitions/*.yaml", "code/API_definitions/api.yaml"],
+            tmp_path,
+        )
+        assert files == ["code/API_definitions/api.yaml"]
+
+    def test_multiple_patterns(self, tmp_path):
+        api_dir = tmp_path / "code" / "API_definitions"
+        bundled_dir = tmp_path / "bundled"
+        api_dir.mkdir(parents=True)
+        bundled_dir.mkdir()
+        (api_dir / "api.yaml").touch()
+        (bundled_dir / "bundled.yaml").touch()
+
+        files = _resolve_spec_files(
+            ["code/API_definitions/*.yaml", "bundled/*.yaml"], tmp_path,
+        )
+        assert "code/API_definitions/api.yaml" in files
+        assert "bundled/bundled.yaml" in files
+
+
+# ---------------------------------------------------------------------------
+# TestDeduplicateFindings
+# ---------------------------------------------------------------------------
+
+
+class TestDeduplicateFindings:
+    def test_identical_findings_deduped(self):
+        f1 = {"path": "common.yaml", "line": 72, "engine_rule": "rule-a",
+               "level": "hint", "message": "msg"}
+        f2 = {"path": "common.yaml", "line": 72, "engine_rule": "rule-a",
+               "level": "hint", "message": "msg"}
+        assert len(_deduplicate_findings([f1, f2])) == 1
+
+    def test_different_lines_kept(self):
+        f1 = {"path": "common.yaml", "line": 72, "engine_rule": "rule-a"}
+        f2 = {"path": "common.yaml", "line": 76, "engine_rule": "rule-a"}
+        assert len(_deduplicate_findings([f1, f2])) == 2
+
+    def test_different_rules_kept(self):
+        f1 = {"path": "common.yaml", "line": 72, "engine_rule": "rule-a"}
+        f2 = {"path": "common.yaml", "line": 72, "engine_rule": "rule-b"}
+        assert len(_deduplicate_findings([f1, f2])) == 2
+
+    def test_different_files_kept(self):
+        f1 = {"path": "api-a.yaml", "line": 10, "engine_rule": "rule-a"}
+        f2 = {"path": "api-b.yaml", "line": 10, "engine_rule": "rule-a"}
+        assert len(_deduplicate_findings([f1, f2])) == 2
+
+    def test_preserves_order(self):
+        findings = [
+            {"path": "b.yaml", "line": 1, "engine_rule": "r1"},
+            {"path": "a.yaml", "line": 1, "engine_rule": "r1"},
+            {"path": "b.yaml", "line": 1, "engine_rule": "r1"},  # dup
+        ]
+        result = _deduplicate_findings(findings)
+        assert len(result) == 2
+        assert result[0]["path"] == "b.yaml"
+        assert result[1]["path"] == "a.yaml"
+
+    def test_empty_list(self):
+        assert _deduplicate_findings([]) == []
+
+
+# ---------------------------------------------------------------------------
+# TestRunSpectralEngine
+# ---------------------------------------------------------------------------
+
+
 class TestRunSpectralEngine:
-    @patch("validation.engines.spectral_adapter.run_spectral")
-    def test_normal_execution(self, mock_run, tmp_path):
-        findings = [{"engine": "spectral", "engine_rule": "r1", "level": "warn",
-                      "message": "m", "path": "f.yaml", "line": 1}]
-        mock_run.return_value = SpectralResult(findings=findings, success=True)
+    def _make_spec_files(self, tmp_path, names):
+        """Create spec files and return the tmp_path for use as repo_path."""
+        api_dir = tmp_path / "code" / "API_definitions"
+        api_dir.mkdir(parents=True)
+        for name in names:
+            (api_dir / name).touch()
         (tmp_path / ".spectral.yaml").touch()
+        return tmp_path
+
+    @patch("validation.engines.spectral_adapter.run_spectral")
+    def test_invokes_spectral_per_file(self, mock_run, tmp_path):
+        """Each spec file gets its own Spectral invocation."""
+        repo = self._make_spec_files(tmp_path, ["alpha.yaml", "beta.yaml"])
+        mock_run.return_value = SpectralResult(findings=[], success=True)
+
+        run_spectral_engine(repo, repo)
+        assert mock_run.call_count == 2
+        # Each call gets a single-element list.
+        calls = [c[0][1] for c in mock_run.call_args_list]
+        assert ["code/API_definitions/alpha.yaml"] in calls
+        assert ["code/API_definitions/beta.yaml"] in calls
 
-        result = run_spectral_engine(tmp_path, tmp_path, commonalities_release="r4.1")
-        assert result == findings
+    @patch("validation.engines.spectral_adapter.run_spectral")
+    def test_merges_findings_across_files(self, mock_run, tmp_path):
+        repo = self._make_spec_files(tmp_path, ["a.yaml", "b.yaml"])
+
+        def per_file(ruleset, patterns, cwd):
+            name = patterns[0].split("/")[-1]
+            return SpectralResult(
+                findings=[{"engine": "spectral", "engine_rule": "r1",
+                           "level": "warn", "message": name,
+                           "path": patterns[0], "line": 1}],
+                success=True,
+            )
+        mock_run.side_effect = per_file
+
+        result = run_spectral_engine(repo, repo)
+        assert len(result) == 2
 
     @patch("validation.engines.spectral_adapter.run_spectral")
-    def test_spectral_error_returns_error_finding(self, mock_run, tmp_path):
+    def test_deduplicates_common_file_findings(self, mock_run, tmp_path):
+        """Findings from shared code/common/ schemas are deduped across files."""
+        repo = self._make_spec_files(tmp_path, ["a.yaml", "b.yaml"])
+        common_finding = {"engine": "spectral", "engine_rule": "owasp-rule",
+                          "level": "hint", "message": "msg",
+                          "path": "code/common/CAMARA_common.yaml", "line": 72}
+
         mock_run.return_value = SpectralResult(
-            findings=[], success=False, error_message="CLI not found",
+            findings=[common_finding], success=True,
         )
-        (tmp_path / ".spectral.yaml").touch()
 
-        result = run_spectral_engine(tmp_path, tmp_path)
+        result = run_spectral_engine(repo, repo)
+        # Same finding from two files → kept once.
         assert len(result) == 1
-        assert result[0]["level"] == "error"
-        assert result[0]["engine_rule"] == "spectral-execution-error"
-        assert "CLI not found" in result[0]["message"]
 
     @patch("validation.engines.spectral_adapter.run_spectral")
-    def test_default_spec_patterns(self, mock_run, tmp_path):
-        mock_run.return_value = SpectralResult(findings=[], success=True)
-        (tmp_path / ".spectral.yaml").touch()
-
-        run_spectral_engine(tmp_path, tmp_path)
-        call_args = mock_run.call_args
-        assert call_args[0][1] == ["code/API_definitions/*.yaml"]
+    def test_error_on_one_file_continues_others(self, mock_run, tmp_path):
+        repo = self._make_spec_files(tmp_path, ["good.yaml", "bad.yaml"])
+        good_finding = {"engine": "spectral", "engine_rule": "r1",
+                        "level": "warn", "message": "m",
+                        "path": "code/API_definitions/good.yaml", "line": 1}
+
+        def per_file(ruleset, patterns, cwd):
+            if "bad.yaml" in patterns[0]:
+                return SpectralResult(findings=[], success=False,
+                                      error_message="CLI not found")
+            return SpectralResult(findings=[good_finding], success=True)
+        mock_run.side_effect = per_file
+
+        result = run_spectral_engine(repo, repo)
+        # One real finding + one error finding for the bad file.
+        assert len(result) == 2
+        error_findings = [f for f in result if f["level"] == "error"]
+        assert len(error_findings) == 1
+        assert "bad.yaml" in error_findings[0]["message"]
 
     @patch("validation.engines.spectral_adapter.run_spectral")
-    def test_custom_spec_patterns(self, mock_run, tmp_path):
-        mock_run.return_value = SpectralResult(findings=[], success=True)
+    def test_no_matching_files_returns_empty(self, mock_run, tmp_path):
         (tmp_path / ".spectral.yaml").touch()
-
-        custom = ["bundled/*.yaml"]
-        run_spectral_engine(tmp_path, tmp_path, spec_patterns=custom)
-        call_args = mock_run.call_args
-        assert call_args[0][1] == custom
+        # No spec files created.
+        result = run_spectral_engine(tmp_path, tmp_path)
+        assert result == []
+        mock_run.assert_not_called()
 
     @patch("validation.engines.spectral_adapter.run_spectral")
     def test_ruleset_selection_uses_commonalities(self, mock_run, tmp_path):
         """Verifies that the correct ruleset is selected and passed."""
-        mock_run.return_value = SpectralResult(findings=[], success=True)
+        repo = self._make_spec_files(tmp_path, ["api.yaml"])
         r4 = tmp_path / ".spectral-r4.yaml"
         r4.touch()
+        mock_run.return_value = SpectralResult(findings=[], success=True)
 
-        run_spectral_engine(tmp_path, tmp_path, commonalities_release="r4.2")
-        call_args = mock_run.call_args
-        assert call_args[0][0] == r4
+        run_spectral_engine(repo, repo, commonalities_release="r4.2")
+        assert mock_run.call_args[0][0] == r4

From 4f5f46488d01a1f124803b9da18bbac28a2c8e71 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 13 Apr 2026 12:17:18 +0200
Subject: [PATCH 072/157] feat(validation): add P-020 inline CloudEvent warning
 and P-006 hint text
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

P-020 (check-cloudevent-via-ref) — new Python check that warns when a
subscription API defines components.schemas.CloudEvent inline (top-level
properties present) instead of consuming it via $ref to
CAMARA_event_common.yaml. Detection ignores the $ref-only and allOf-with-
$ref migration forms (no top-level properties). Applies to explicit- and
implicit-subscription APIs.

P-006 (check-test-files-exist) — strengthen severity model so the
lifecycle expectation is visible on every release type:
  - default (alpha, wip, non-rc): hint
  - initial (0.x) + rc/public:    warn
  - stable  (>=1.x) + rc/public:  error
Static hint text added explaining the maturity-vs-release-type matrix.

Tests: 7 new TestCheckCloudEventViaRef cases. Rule integrity counters
bumped (python: 19 -> 20, hints: 11 -> 13). Rule inventory tracks P-020
as NEW-004.
---
 validation/engines/python_checks/__init__.py  |   2 +
 .../python_checks/subscription_checks.py      |  59 ++++++++++
 validation/rules/python-rules.yaml            |  35 ++++++
 validation/rules/rule-inventory.yaml          |  10 +-
 .../tests/test_python_checks_subscription.py  | 104 +++++++++++++++++-
 .../tests/test_rule_metadata_integrity.py     |   6 +-
 6 files changed, 210 insertions(+), 6 deletions(-)

diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py
index 0acf6602..dc4344eb 100644
--- a/validation/engines/python_checks/__init__.py
+++ b/validation/engines/python_checks/__init__.py
@@ -14,6 +14,7 @@
 from .release_plan_checks import check_orphan_api_definitions, check_release_plan_semantics
 from .release_review_checks import check_release_review_file_restriction
 from .subscription_checks import (
+    check_cloudevent_via_ref,
     check_event_type_format,
     check_sinkcredential_not_in_response,
     check_subscription_filename,
@@ -44,6 +45,7 @@
     CheckDescriptor("check-subscription-filename", CheckScope.API, check_subscription_filename),
     CheckDescriptor("check-event-type-format", CheckScope.API, check_event_type_format),
     CheckDescriptor("check-sinkcredential-not-in-response", CheckScope.API, check_sinkcredential_not_in_response),
+    CheckDescriptor("check-cloudevent-via-ref", CheckScope.API, check_cloudevent_via_ref),
     CheckDescriptor("check-conflict-deprecated", CheckScope.API, check_conflict_deprecated),
     CheckDescriptor("check-contextcode-format", CheckScope.API, check_contextcode_format),
     # --- Repo-level checks (run once) ---
diff --git a/validation/engines/python_checks/subscription_checks.py b/validation/engines/python_checks/subscription_checks.py
index fbc30788..66795839 100644
--- a/validation/engines/python_checks/subscription_checks.py
+++ b/validation/engines/python_checks/subscription_checks.py
@@ -220,3 +220,62 @@ def check_sinkcredential_not_in_response(
             )
 
     return findings
+
+
+# ---------------------------------------------------------------------------
+# P-020: check-cloudevent-via-ref
+# ---------------------------------------------------------------------------
+
+
+def check_cloudevent_via_ref(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Warn when CloudEvent is defined inline instead of via $ref.
+
+    Subscription APIs should consume the shared CloudEvent schema from
+    CAMARA_event_common.yaml via ``$ref`` (or ``allOf`` + ``$ref``) rather
+    than maintaining a local inline copy. Inline copies drift from the
+    Commonalities source and block bundling-based reuse.
+
+    Detection: the rule fires when ``components.schemas.CloudEvent`` is
+    present and has a top-level ``properties`` key. The ``$ref``-only
+    form and the ``allOf: [{$ref: ...}]`` migration form have no
+    top-level ``properties`` and are not flagged.
+    """
+    api = context.apis[0]
+
+    if api.api_pattern not in ("explicit-subscription", "implicit-subscription"):
+        return []
+
+    if api.spec_file.endswith("CAMARA_event_common.yaml"):
+        return []
+
+    spec = load_yaml_safe(repo_path / api.spec_file)
+    if spec is None:
+        return []
+
+    schemas = spec.get("components", {}).get("schemas", {})
+    if not isinstance(schemas, dict):
+        return []
+
+    cloudevent = schemas.get("CloudEvent")
+    if not isinstance(cloudevent, dict):
+        return []
+
+    if "properties" not in cloudevent:
+        return []
+
+    return [
+        make_finding(
+            engine_rule="check-cloudevent-via-ref",
+            level="warn",
+            message=(
+                f"CloudEvent is defined inline in {api.spec_file}. "
+                f"Consume the shared schema from CAMARA_event_common.yaml "
+                f"via $ref instead of maintaining a local copy."
+            ),
+            path=api.spec_file,
+            line=1,
+            api_name=api.api_name,
+        )
+    ]
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 7196f0d7..cba25cf3 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -41,6 +41,11 @@
     default: error
 
 # P-006: check-test-files-exist
+#
+# Severity is a function of API maturity and release type:
+#   - default (alpha, wip, any non-rc release): hint
+#   - initial (0.x) + rc/public release: warn
+#   - stable  (>=1.x) + rc/public release: error
 - id: P-006
   engine: python
   engine_rule: check-test-files-exist
@@ -50,7 +55,17 @@
       - condition:
           target_api_maturity: [stable]
           target_release_type: [pre-release-rc, public-release]
+        level: error
+      - condition:
+          target_api_maturity: [initial]
+          target_release_type: [pre-release-rc, public-release]
         level: warn
+  hint: >-
+    Test files are optional for alpha releases but expected before the
+    first release candidate. On stable (>=1.x) APIs they are required
+    at rc and public release; on initial (0.x) APIs they are strongly
+    recommended. See CAMARA Testing Guidelines for the expected file
+    layout.
 
 # P-007: check-test-file-version
 # Parses the Feature line of .feature files to extract the version
@@ -194,3 +209,23 @@
   engine_rule: check-orphan-api-definitions
   conditional_level:
     default: warn
+
+# P-020: check-cloudevent-via-ref
+# Subscription APIs should consume CloudEvent via $ref to
+# CAMARA_event_common.yaml rather than maintaining a local inline copy.
+# Detection: components.schemas.CloudEvent exists with top-level
+# `properties`. The $ref-only and `allOf: [{$ref: ...}]` forms have no
+# top-level `properties` and are not flagged.
+- id: P-020
+  engine: python
+  engine_rule: check-cloudevent-via-ref
+  applicability:
+    api_pattern: [explicit-subscription, implicit-subscription]
+  conditional_level:
+    default: warn
+  hint: >-
+    Replace the local CloudEvent schema with
+    `$ref: './CAMARA_event_common.yaml#/components/schemas/CloudEvent'`,
+    or an allOf combining the $ref with an API-specific ApiEventType
+    schema. See implicit-events API template in Commonalities
+    artifacts/api-templates/.
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 7a0bc330..2957c14a 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -14,7 +14,7 @@ version: 1
 generated: 2026-04-07
 
 summary:
-  total_implemented: 141
+  total_implemented: 142
   total_gap: 0
   total_manual: 25
   total_pending: 0
@@ -22,7 +22,7 @@ summary:
   by_engine:
     spectral: 84
     gherkin: 25
-    python: 19
+    python: 20
     yamllint: 13
 
 # ---------------------------------------------------------------------------
@@ -199,6 +199,12 @@ gap_rules:
     status: implemented
     rule_id: P-019
 
+  - audit_id: NEW-004
+    description: "CloudEvent schema should be consumed via $ref to CAMARA_event_common.yaml, not maintained as a local inline copy"
+    target_engine: python
+    status: implemented
+    rule_id: P-020
+
 # ---------------------------------------------------------------------------
 # Fixes needed — implemented rules with incorrect behavior
 # ---------------------------------------------------------------------------
diff --git a/validation/tests/test_python_checks_subscription.py b/validation/tests/test_python_checks_subscription.py
index 5e2eb82b..f835954a 100644
--- a/validation/tests/test_python_checks_subscription.py
+++ b/validation/tests/test_python_checks_subscription.py
@@ -1,4 +1,4 @@
-"""Unit tests for subscription checks (P-014, P-015, P-016)."""
+"""Unit tests for subscription checks (P-014, P-015, P-016, P-020)."""
 
 from __future__ import annotations
 
@@ -8,6 +8,7 @@
 
 from validation.context import ApiContext, ValidationContext
 from validation.engines.python_checks.subscription_checks import (
+    check_cloudevent_via_ref,
     check_event_type_format,
     check_sinkcredential_not_in_response,
     check_subscription_filename,
@@ -392,3 +393,104 @@ def test_external_ref_sinkcredential_detected(self, tmp_path: Path):
         ctx = _make_context(api_name=api_name)
         findings = check_sinkcredential_not_in_response(tmp_path, ctx)
         assert len(findings) == 1
+
+
+# ---------------------------------------------------------------------------
+# P-020: check-cloudevent-via-ref
+# ---------------------------------------------------------------------------
+
+
+def _spec_with_cloudevent(cloudevent_schema: dict) -> dict:
+    """Build a minimal subscription spec with a CloudEvent schema."""
+    return {
+        "openapi": "3.0.3",
+        "info": {"title": "Test", "version": "wip"},
+        "paths": {"/subscriptions": {"post": {"responses": {"201": {}}}}},
+        "components": {"schemas": {"CloudEvent": cloudevent_schema}},
+    }
+
+
+class TestCheckCloudEventViaRef:
+    def test_inline_cloudevent_warns(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _spec_with_cloudevent({
+            "type": "object",
+            "required": ["id", "type"],
+            "properties": {
+                "id": {"type": "string"},
+                "type": {
+                    "type": "string",
+                    "enum": [f"org.camaraproject.{api_name}.v0.status-changed"],
+                },
+            },
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        findings = check_cloudevent_via_ref(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert findings[0]["engine_rule"] == "check-cloudevent-via-ref"
+        assert "inline" in findings[0]["message"]
+        assert findings[0]["api_name"] == api_name
+
+    def test_ref_only_cloudevent_ok(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = _spec_with_cloudevent({
+            "$ref": "./CAMARA_event_common.yaml#/components/schemas/CloudEvent",
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_cloudevent_via_ref(tmp_path, ctx) == []
+
+    def test_allof_with_ref_ok(self, tmp_path: Path):
+        """allOf + $ref migration form has no top-level properties — no finding."""
+        api_name = "device-status-subscriptions"
+        spec = _spec_with_cloudevent({
+            "allOf": [
+                {"$ref": "./CAMARA_event_common.yaml#/components/schemas/CloudEvent"},
+            ],
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_cloudevent_via_ref(tmp_path, ctx) == []
+
+    def test_no_cloudevent_schema_ok(self, tmp_path: Path):
+        api_name = "device-status-subscriptions"
+        spec = {
+            "openapi": "3.0.3",
+            "info": {"title": "Test", "version": "wip"},
+            "paths": {"/subscriptions": {"post": {"responses": {"201": {}}}}},
+            "components": {"schemas": {"OtherSchema": {"type": "object"}}},
+        }
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name)
+        assert check_cloudevent_via_ref(tmp_path, ctx) == []
+
+    def test_implicit_subscription_inline_warns(self, tmp_path: Path):
+        api_name = "device-status"
+        spec = _spec_with_cloudevent({
+            "type": "object",
+            "properties": {
+                "type": {"type": "string", "enum": ["foo"]},
+            },
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name, api_pattern="implicit-subscription")
+        findings = check_cloudevent_via_ref(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+
+    def test_request_response_skip(self, tmp_path: Path):
+        """Request-response APIs do not define CloudEvent — skip even if inline."""
+        api_name = "device-status"
+        spec = _spec_with_cloudevent({
+            "type": "object",
+            "properties": {"type": {"type": "string"}},
+        })
+        _write_spec(tmp_path, api_name=api_name, spec_content=spec)
+        ctx = _make_context(api_name=api_name, api_pattern="request-response")
+        assert check_cloudevent_via_ref(tmp_path, ctx) == []
+
+    def test_missing_spec_file(self, tmp_path: Path):
+        ctx = _make_context()
+        assert check_cloudevent_via_ref(tmp_path, ctx) == []
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index 019e2c60..e159618b 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -77,7 +77,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 19
+        assert counts["python"] == 20
         assert counts["spectral"] == 84
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13
@@ -306,8 +306,8 @@ def test_hints_are_exception_not_norm(self, all_rules):
         """
         with_hints = [r.id for r in all_rules if r.hint is not None]
         with_overrides = [r.id for r in all_rules if r.message_override is not None]
-        assert len(with_hints) == 11, (
-            f"Expected 11 explicit hints (update test if adding hints): "
+        assert len(with_hints) == 13, (
+            f"Expected 13 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
         assert len(with_overrides) == 0, (

From 52fed7c1b74ab7c2dcc0dbd74ddf32c27e03b061 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 13 Apr 2026 12:36:39 +0200
Subject: [PATCH 073/157] feat(validation): default stage advisory for
 progressive rollout

Flip the central validation-config.yaml default stage from disabled to
advisory. Repos with explicit overrides under repositories: are
unaffected (ReleaseTest stays at enabled). Repos that currently fall
through to the default can now be exercised via workflow_dispatch;
pull_request events remain skipped at the stage gate.
---
 validation/config/validation-config.yaml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/validation/config/validation-config.yaml b/validation/config/validation-config.yaml
index f47fb572..ede0f59f 100644
--- a/validation/config/validation-config.yaml
+++ b/validation/config/validation-config.yaml
@@ -5,7 +5,7 @@
 version: 1
 
 defaults:
-  stage: disabled
+  stage: advisory
 
 fork_owners:
   - hdamker

From 0fa745b31c9255cd6a52461c1d6b317f47155167 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 13 Apr 2026 23:54:01 +0200
Subject: [PATCH 074/157] Add regression runner and baseline-pilot
 infrastructure

Introduce a script-based regression runner that dispatches the validation
framework against regression/* branches of a test repository, downloads
findings, and diffs them against a committed regression-expected.yaml
fixture on each branch. This closes the Phase 3 gap documented in
WS07: rule and engine changes can now be verified against stable spec
snapshots.

New files:
- validation/schemas/regression-expected-schema.yaml: JSON Schema for the
  fixture format. Uses (rule_id, path, level) match keys with "at least N"
  count semantics. Falls back to (engine/engine_rule, path, level) for
  findings without a framework rule_id. Exact/subset match modes.
- validation/scripts/regression_runner.py: single-module CLI runner with
  GitHub I/O via gh subprocess, dispatch/poll/download orchestration,
  pure-logic diff and capture functions. Handles annotated-tag dereference
  for tooling_ref, dispatch->run-id disambiguation via timestamp + headSha,
  and base64-decoded content fetch from the GitHub contents API.
- validation/scripts/README.md: usage, prerequisites, exit codes, the
  v1-rc pinning constraint and recapture flow.
- validation/tests/test_regression_runner.py: 24 pure-logic unit tests
  (loader/schema/normalize/diff/capture/markdown, no gh calls).

Updated:
- validation/rules/rule-inventory.yaml: populate tested_rules and bump
  total_tested from 0 to 5 for P-006, S-211, S-313, S-314, S-316, all
  pinned by the new regression/r4.1-main-baseline branch on ReleaseTest.

End-to-end verified on camaraproject/ReleaseTest:
- Capture run 24368275621 (success in ~50s) produced 27 findings in 9
  unique match keys, pinned to v1-rc commit b4c1c3e.
- Verify run 24368472366 (success) PASSed the runner against the
  committed fixture with 27/27 matched, 0 missing, 0 unexpected.
- Four perturbation scenarios (delete expected entry, inflate count,
  wrong summary, baseline sanity) all produced the expected
  PASS/FAIL outcomes via direct diff_findings calls.

Note: the caller workflow hardcodes @v1-rc and does not forward
workflow_dispatch inputs, so OIDC inside the reusable locks to whatever
commit v1-rc currently points at. Fixtures are implicitly pinned to that
ref; recapture when v1-rc moves.
---
 validation/rules/rule-inventory.yaml          |  19 +-
 .../schemas/regression-expected-schema.yaml   | 127 +++
 validation/scripts/README.md                  |  98 ++
 validation/scripts/regression_runner.py       | 883 ++++++++++++++++++
 validation/tests/test_regression_runner.py    | 448 +++++++++
 5 files changed, 1569 insertions(+), 6 deletions(-)
 create mode 100644 validation/schemas/regression-expected-schema.yaml
 create mode 100644 validation/scripts/README.md
 create mode 100644 validation/scripts/regression_runner.py
 create mode 100644 validation/tests/test_regression_runner.py

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 2957c14a..cc24583c 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 0
+  total_tested: 5
   by_engine:
     spectral: 84
     gherkin: 25
@@ -285,12 +285,19 @@ pending_rules:
 # Source: private-dev-docs/validation-framework/reviews/testing-guidelines-audit.md
 
 # ---------------------------------------------------------------------------
-# Tested rules — verified via regression branches (Phase 1b)
+# Tested rules — verified via regression branches (WS07 Phase 3)
 # ---------------------------------------------------------------------------
-# Updated as regression branches verify rules.
-# Format: rule_id: regression_branch (or list of branches)
-
-tested_rules: {}
+# Updated as regression branches verify rules. Each rule lists the branches
+# where its expected behaviour is pinned by a regression-expected.yaml
+# fixture. Populated by scripts/regression_runner.py runs (capture mode).
+# Format: rule_id: [branch, ...]
+
+tested_rules:
+  P-006: [regression/r4.1-main-baseline]
+  S-211: [regression/r4.1-main-baseline]
+  S-313: [regression/r4.1-main-baseline]
+  S-314: [regression/r4.1-main-baseline]
+  S-316: [regression/r4.1-main-baseline]
 
 # ---------------------------------------------------------------------------
 # Manual rules — require human judgment
diff --git a/validation/schemas/regression-expected-schema.yaml b/validation/schemas/regression-expected-schema.yaml
new file mode 100644
index 00000000..10352463
--- /dev/null
+++ b/validation/schemas/regression-expected-schema.yaml
@@ -0,0 +1,127 @@
+$schema: "http://json-schema.org/draft-07/schema#"
+title: CAMARA Validation Regression Expected Findings
+description: |
+  Schema for regression-expected.yaml fixtures committed to regression/* branches
+  of test repositories. Each fixture declares the expected validation findings for
+  a specific themed branch; the regression runner dispatches the validation
+  framework against the branch, downloads the findings, and diffs them against
+  this file.
+
+  Match semantics are set-based on (rule_id, path, level) tuples, with per-key
+  "at least N" count thresholds. Line numbers and finding messages are NOT part
+  of the match key - they drift as specs evolve.
+
+type: object
+additionalProperties: false
+required:
+  - schema_version
+  - branch
+  - findings
+
+properties:
+  schema_version:
+    type: integer
+    enum: [1]
+    description: Schema version for forward compatibility.
+
+  branch:
+    type: string
+    description: >
+      The regression branch this fixture pins. Informational; the runner
+      resolves the branch from its filter arguments, not from this field.
+
+  description:
+    type: string
+    description: Human-readable summary of what this branch tests.
+
+  captured_at:
+    type: string
+    format: date-time
+    description: ISO-8601 timestamp when the fixture was last captured.
+
+  captured_from_run:
+    type: string
+    description: URL of the workflow run that produced this fixture.
+
+  tooling_ref:
+    type: string
+    description: >
+      40-character SHA of the tooling commit that produced the expected findings.
+      For dispatches from API repositories, this is the SHA that v1-rc pointed
+      at during capture. If v1-rc moves, the fixture must be recaptured.
+    pattern: "^[0-9a-f]{40}$"
+
+  match_mode:
+    type: string
+    enum: [exact, subset]
+    default: exact
+    description: >
+      exact  - any actual finding whose match key is not in `findings` causes
+               FAIL. Count surpluses also cause FAIL.
+      subset - extra actual findings are allowed; only missing expected findings
+               cause FAIL. Use as an escape hatch for flaky rules.
+
+  summary:
+    type: object
+    additionalProperties: false
+    description: >
+      Expected aggregate counts across all findings. Compared against
+      summary.json.counts before per-finding diffing. A mismatch here causes
+      FAIL even if no per-finding mismatch exists.
+    properties:
+      errors:
+        type: integer
+        minimum: 0
+      warnings:
+        type: integer
+        minimum: 0
+      hints:
+        type: integer
+        minimum: 0
+
+  findings:
+    type: array
+    description: >
+      Expected findings. Each entry identifies a unique (rule_id, path, level)
+      tuple (or (engine/engine_rule, path, level) fallback). Duplicate match
+      keys within a file are rejected - collapse them into a single entry with
+      `count`.
+    items:
+      type: object
+      additionalProperties: false
+      required:
+        - path
+        - level
+      properties:
+        rule_id:
+          type: string
+          pattern: "^[A-Z]-[0-9]{3}$"
+          description: Framework rule ID (e.g. S-042, P-015).
+        engine:
+          type: string
+          enum: [spectral, yamllint, gherkin, python]
+          description: >
+            Validation engine that produced the finding. Required only when
+            rule_id is absent.
+        engine_rule:
+          type: string
+          description: >
+            Native rule identifier within the engine. Required only when
+            rule_id is absent.
+        path:
+          type: string
+          description: Repository-relative file path (exact match, no globs).
+        level:
+          type: string
+          enum: [error, warn, hint]
+        count:
+          type: integer
+          minimum: 1
+          default: 1
+          description: >
+            Minimum number of matching findings required. "At least N" semantics
+            in both exact and subset modes. A spec fix that removes one of three
+            duplicate hints is not a regression.
+      oneOf:
+        - required: [rule_id]
+        - required: [engine, engine_rule]
diff --git a/validation/scripts/README.md b/validation/scripts/README.md
new file mode 100644
index 00000000..aa428069
--- /dev/null
+++ b/validation/scripts/README.md
@@ -0,0 +1,98 @@
+# Validation Framework — Scripts
+
+CLI entry points for the validation framework. Callable both from reusable
+workflow steps and from a developer workstation.
+
+## `validate-release-plan.py`
+
+Validates `release-plan.yaml` files against the JSON schema and semantic rules.
+Called by `pr_validation` via `shared-actions/validate-release-plan`. Do not
+modify its CLI or exit codes without updating that action.
+
+```
+python3 validate-release-plan.py  [--check-files]
+```
+
+## `regression_runner.py`
+
+Dispatches the validation framework against `regression/*` branches of a test
+repository, downloads findings, and diffs them against the committed
+`.regression/regression-expected.yaml` fixture on each branch.
+
+### Prerequisites
+
+- Python 3.11+ with `pyyaml` and `jsonschema`
+- `gh` CLI installed and authenticated (`gh auth status` must be green)
+- The test repo must have the Validation Framework caller workflow installed
+  (`.github/workflows/camara-validation.yml`)
+- Each `regression/*` branch must contain `.regression/regression-expected.yaml`
+  conforming to `validation/schemas/regression-expected-schema.yaml`
+
+### Run
+
+```
+python3 validation/scripts/regression_runner.py \
+    --repo camaraproject/ReleaseTest \
+    [--branch-filter 'regression/r4.1-*'] \
+    [--workflow-file camara-validation.yml] \
+    [--poll-interval 15] [--poll-timeout 1800] \
+    [--summary-file regression-summary.md]
+```
+
+Exit codes:
+
+| Code | Meaning |
+|---|---|
+| 0 | all branches PASS |
+| 1 | one or more branches FAIL (diff mismatch) |
+| 2 | infrastructure failure (gh error, timeout, missing artifact, schema invalid) |
+
+### Capture a new fixture
+
+```
+python3 validation/scripts/regression_runner.py \
+    --repo camaraproject/ReleaseTest \
+    --capture regression/r4.1-main-baseline \
+    --out /tmp/expected.yaml \
+    [--capture-description "baseline"]
+```
+
+Review the generated file, commit it to the branch at
+`.regression/regression-expected.yaml`, then re-run the runner without
+`--capture` to verify PASS.
+
+### Fixture match semantics
+
+- Match key is `(rule_id, path, level)` — or `(engine/engine_rule, path, level)`
+  when the framework has no `rule_id` for the rule.
+- Line numbers and messages are **not** part of the match key.
+- `count` means "at least N" in both `exact` and `subset` modes.
+- `match_mode: exact` (default) fails on unexpected extra findings;
+  `match_mode: subset` allows extras and only fails on missing expected findings.
+- The optional top-level `summary` block is checked against
+  `summary.json.counts` before per-finding diffing; any mismatch there is a
+  separate failure axis.
+
+### Tooling ref pinning (known constraint)
+
+The caller workflow hardcodes `uses: camaraproject/tooling/.github/workflows/validation.yml@v1-rc`
+and does not forward `workflow_dispatch` inputs to the reusable. OIDC
+resolution inside the reusable therefore locks to whatever commit `v1-rc`
+currently points at — a local `gh workflow run` cannot override this.
+
+Fixtures are implicitly pinned to that ref. Record the current `v1-rc` SHA in
+each branch's `REGRESSION.md` (`gh api repos/camaraproject/tooling/git/refs/tags/v1-rc --jq '.object.sha'`).
+If `v1-rc` moves, recapture the fixtures.
+
+### Troubleshooting
+
+- **`gh CLI not found`** — install from  and run
+  `gh auth login`.
+- **`timed out waiting for dispatched run to appear`** — GitHub Actions
+  backlog; retry after a minute, or raise `--poll-timeout`.
+- **`findings.json not found in downloaded artifact`** — the workflow run
+  probably failed before the output step. Check the run URL printed in
+  the log.
+- **Capture-then-verify fails on immediate re-run** — the validation output
+  is non-deterministic for this branch. Treat as a framework bug, not a
+  runner bug; stop and investigate.
diff --git a/validation/scripts/regression_runner.py b/validation/scripts/regression_runner.py
new file mode 100644
index 00000000..4866d475
--- /dev/null
+++ b/validation/scripts/regression_runner.py
@@ -0,0 +1,883 @@
+#!/usr/bin/env python3
+"""
+CAMARA Validation Framework — Regression Runner
+
+Dispatches the Validation Framework against regression/* branches of a test
+repository, downloads findings, and diffs them against a committed
+regression-expected.yaml fixture on each branch.
+
+Usage:
+    python3 regression_runner.py --repo camaraproject/ReleaseTest \\
+        [--branch-filter 'regression/r4.1-*'] \\
+        [--workflow-file camara-validation.yml] \\
+        [--poll-interval 15] [--poll-timeout 1800]
+
+    # Capture an expected-findings fixture from a fresh run:
+    python3 regression_runner.py --repo camaraproject/ReleaseTest \\
+        --capture regression/r4.1-main-baseline --out /tmp/expected.yaml
+
+Exit codes:
+    0  all branches PASS (or capture succeeded)
+    1  one or more branches FAIL (diff mismatch)
+    2  infrastructure failure (gh error, timeout, missing artifact, invalid schema)
+
+Design reference: private-dev-docs/validation-framework/session-logs/
+  (initial session — WS07 Phase 3 regression infrastructure)
+"""
+
+from __future__ import annotations
+
+import argparse
+import base64
+import fnmatch
+import json
+import logging
+import re
+import subprocess
+import sys
+import tempfile
+import time
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, Iterable
+
+try:
+    import yaml
+except ImportError:
+    print("Error: pyyaml package is required. Install with: pip install pyyaml")
+    sys.exit(2)
+
+try:
+    import jsonschema
+    from jsonschema import Draft7Validator
+except ImportError:
+    print("Error: jsonschema package is required. Install with: pip install jsonschema")
+    sys.exit(2)
+
+
+logger = logging.getLogger("regression_runner")
+
+
+# ---------------------------------------------------------------------------
+# Types and errors
+# ---------------------------------------------------------------------------
+
+
+class InfrastructureError(RuntimeError):
+    """Raised for gh errors, missing artifacts, schema failures, or timeouts.
+
+    Distinct from a failed diff (which is a regression, not infrastructure).
+    Infrastructure errors map to exit code 2; diff failures map to exit 1.
+    """
+
+
+# Match key = (rule_key, path, level). rule_key is the framework rule_id
+# when present, otherwise f"{engine}/{engine_rule}". Level is the
+# post-filter level string ("error", "warn", "hint").
+MatchKey = tuple[str, str, str]
+
+
+@dataclass
+class DiffReport:
+    branch: str
+    match_mode: str
+    matched: int
+    missing: list[dict[str, Any]] = field(default_factory=list)
+    unexpected: list[dict[str, Any]] = field(default_factory=list)
+    summary_mismatch: str | None = None
+
+    @property
+    def passed(self) -> bool:
+        return (
+            not self.missing
+            and not self.unexpected
+            and self.summary_mismatch is None
+        )
+
+
+# ---------------------------------------------------------------------------
+# Paths
+# ---------------------------------------------------------------------------
+
+
+def _repo_root() -> Path:
+    return Path(__file__).resolve().parents[2]
+
+
+def _schema_path() -> Path:
+    return _repo_root() / "validation" / "schemas" / "regression-expected-schema.yaml"
+
+
+# ---------------------------------------------------------------------------
+# Pure logic — loader, normalize, diff, capture
+# ---------------------------------------------------------------------------
+
+
+def load_expected(source: str | Path) -> dict[str, Any]:
+    """Load and schema-validate a regression-expected.yaml fixture.
+
+    Accepts a Path (file on disk) or a raw YAML string. Raises
+    InfrastructureError on schema violations so that the runner maps to
+    exit code 2 rather than treating a malformed fixture as a regression.
+    """
+    if isinstance(source, Path):
+        text = source.read_text(encoding="utf-8")
+        origin = str(source)
+    else:
+        text = source
+        origin = ""
+
+    try:
+        data = yaml.safe_load(text)
+    except yaml.YAMLError as exc:
+        raise InfrastructureError(f"{origin}: YAML parse error: {exc}") from exc
+
+    if not isinstance(data, dict):
+        raise InfrastructureError(f"{origin}: expected a YAML mapping at the root")
+
+    schema_path = _schema_path()
+    if not schema_path.exists():
+        raise InfrastructureError(f"Schema file not found: {schema_path}")
+    schema = yaml.safe_load(schema_path.read_text(encoding="utf-8"))
+
+    validator = Draft7Validator(schema)
+    errors = sorted(validator.iter_errors(data), key=lambda e: list(e.absolute_path))
+    if errors:
+        lines = [f"{origin}: schema validation failed:"]
+        for err in errors:
+            path = ".".join(str(p) for p in err.absolute_path) or ""
+            lines.append(f"  at {path}: {err.message}")
+        raise InfrastructureError("\n".join(lines))
+
+    # Reject duplicate match keys within the fixture — they must be
+    # collapsed into one entry with `count`.
+    seen: dict[MatchKey, int] = {}
+    for idx, item in enumerate(data.get("findings", [])):
+        key = _expected_key(item)
+        if key in seen:
+            raise InfrastructureError(
+                f"{origin}: duplicate finding entry at index {idx} "
+                f"(match key already at index {seen[key]}). "
+                f"Collapse into one entry with count."
+            )
+        seen[key] = idx
+
+    return data
+
+
+def _expected_key(entry: dict[str, Any]) -> MatchKey:
+    """Compute the match key for an expected-finding entry."""
+    if "rule_id" in entry:
+        rule_key = entry["rule_id"]
+    else:
+        rule_key = f"{entry['engine']}/{entry['engine_rule']}"
+    return (rule_key, entry["path"], entry["level"])
+
+
+def normalize_finding(finding: dict[str, Any]) -> MatchKey:
+    """Compute the match key for an actual finding dict from findings.json.
+
+    Deliberately ignores `line`, `column`, `message`, `api_name`, `hint`,
+    and any engine-specific extras. Uses `rule_id` when present, falling
+    back to `engine/engine_rule` otherwise.
+    """
+    rule_id = finding.get("rule_id")
+    if rule_id:
+        rule_key = rule_id
+    else:
+        engine = finding.get("engine", "?")
+        engine_rule = finding.get("engine_rule", "?")
+        rule_key = f"{engine}/{engine_rule}"
+    return (rule_key, finding.get("path", ""), finding.get("level", ""))
+
+
+def _index_expected(findings: list[dict[str, Any]]) -> dict[MatchKey, int]:
+    counts: dict[MatchKey, int] = {}
+    for entry in findings:
+        counts[_expected_key(entry)] = entry.get("count", 1)
+    return counts
+
+
+def _index_actual(findings: list[dict[str, Any]]) -> dict[MatchKey, int]:
+    counts: dict[MatchKey, int] = {}
+    for finding in findings:
+        key = normalize_finding(finding)
+        counts[key] = counts.get(key, 0) + 1
+    return counts
+
+
+def _check_summary(
+    expected: dict[str, Any] | None,
+    actual_summary: dict[str, Any] | None,
+) -> str | None:
+    if expected is None:
+        return None
+    if actual_summary is None:
+        return "expected `summary` block but no summary.json was found"
+    counts = actual_summary.get("counts", {})
+    mismatches: list[str] = []
+    for key in ("errors", "warnings", "hints"):
+        if key not in expected:
+            continue
+        want = expected[key]
+        have = counts.get(key, 0)
+        if want != have:
+            mismatches.append(f"{key}: expected={want} actual={have}")
+    if mismatches:
+        return "; ".join(mismatches)
+    return None
+
+
+def diff_findings(
+    expected: dict[str, Any],
+    actual: list[dict[str, Any]],
+    actual_summary: dict[str, Any] | None = None,
+) -> DiffReport:
+    """Diff actual findings against an expected fixture.
+
+    Match key is (rule_id_or_engine_rule, path, level). `count` is minimum
+    required — surpluses are only a failure in `exact` match_mode. Line
+    numbers and messages are deliberately ignored.
+    """
+    mode = expected.get("match_mode", "exact")
+    expected_counts = _index_expected(expected.get("findings", []))
+    actual_counts = _index_actual(actual)
+
+    missing: list[dict[str, Any]] = []
+    unexpected: list[dict[str, Any]] = []
+    matched = 0
+
+    for key, need in expected_counts.items():
+        have = actual_counts.get(key, 0)
+        matched += min(need, have)
+        if have < need:
+            missing.append(
+                {
+                    "rule": key[0],
+                    "path": key[1],
+                    "level": key[2],
+                    "expected": need,
+                    "actual": have,
+                }
+            )
+
+    if mode == "exact":
+        for key, have in actual_counts.items():
+            need = expected_counts.get(key, 0)
+            if have > need:
+                unexpected.append(
+                    {
+                        "rule": key[0],
+                        "path": key[1],
+                        "level": key[2],
+                        "expected": need,
+                        "actual": have,
+                    }
+                )
+
+    summary_mismatch = _check_summary(expected.get("summary"), actual_summary)
+
+    return DiffReport(
+        branch=expected.get("branch", ""),
+        match_mode=mode,
+        matched=matched,
+        missing=missing,
+        unexpected=unexpected,
+        summary_mismatch=summary_mismatch,
+    )
+
+
+def capture_to_yaml(
+    actual: list[dict[str, Any]],
+    *,
+    branch: str,
+    run_url: str | None,
+    tooling_ref: str | None,
+    description: str | None = None,
+) -> str:
+    """Group actual findings into a regression-expected.yaml document.
+
+    Collapses duplicate match keys into a single entry with `count`. Emits
+    deterministic ordering (sorted by rule_key, path, level) so that
+    repeated captures produce identical output for clean diffs.
+    """
+    counts: dict[MatchKey, int] = {}
+    for finding in actual:
+        key = normalize_finding(finding)
+        counts[key] = counts.get(key, 0) + 1
+
+    # Aggregate counts (matches summary.json["counts"] shape used by the
+    # VF output pipeline).
+    errors = sum(1 for f in actual if f.get("level") == "error")
+    warnings = sum(1 for f in actual if f.get("level") == "warn")
+    hints = sum(1 for f in actual if f.get("level") == "hint")
+
+    findings_entries: list[dict[str, Any]] = []
+    for (rule_key, path, level), count in sorted(counts.items()):
+        entry: dict[str, Any] = {}
+        if re.match(r"^[A-Z]-[0-9]{3}$", rule_key):
+            entry["rule_id"] = rule_key
+        else:
+            engine, _, engine_rule = rule_key.partition("/")
+            entry["engine"] = engine
+            entry["engine_rule"] = engine_rule
+        entry["path"] = path
+        entry["level"] = level
+        if count > 1:
+            entry["count"] = count
+        findings_entries.append(entry)
+
+    doc: dict[str, Any] = {
+        "schema_version": 1,
+        "branch": branch,
+    }
+    if description:
+        doc["description"] = description
+    doc["captured_at"] = (
+        datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+    )
+    if run_url:
+        doc["captured_from_run"] = run_url
+    if tooling_ref:
+        doc["tooling_ref"] = tooling_ref
+    doc["summary"] = {
+        "errors": errors,
+        "warnings": warnings,
+        "hints": hints,
+    }
+    doc["match_mode"] = "exact"
+    doc["findings"] = findings_entries
+
+    return yaml.safe_dump(doc, sort_keys=False, default_flow_style=False)
+
+
+# ---------------------------------------------------------------------------
+# Reporting
+# ---------------------------------------------------------------------------
+
+
+def render_markdown(reports: dict[str, DiffReport]) -> str:
+    """Render a per-branch PASS/FAIL summary as markdown."""
+    total = len(reports)
+    passed = sum(1 for r in reports.values() if r.passed)
+    lines: list[str] = []
+    lines.append(f"## Regression Runner — {passed}/{total} branches PASS")
+    lines.append("")
+    lines.append("| Branch | Result | Matched | Missing | Unexpected | Summary |")
+    lines.append("|---|---|---:|---:|---:|---|")
+    for branch, report in sorted(reports.items()):
+        status = "PASS" if report.passed else "FAIL"
+        summary_note = report.summary_mismatch or "-"
+        lines.append(
+            f"| `{branch}` | {status} | {report.matched} | "
+            f"{len(report.missing)} | {len(report.unexpected)} | {summary_note} |"
+        )
+    for branch, report in sorted(reports.items()):
+        if report.passed:
+            continue
+        lines.append("")
+        lines.append(f"### `{branch}` — diff detail")
+        if report.summary_mismatch:
+            lines.append(f"- **summary mismatch**: {report.summary_mismatch}")
+        for entry in report.missing:
+            lines.append(
+                f"- **missing** `{entry['rule']}` at `{entry['path']}` "
+                f"({entry['level']}): expected {entry['expected']}, actual {entry['actual']}"
+            )
+        for entry in report.unexpected:
+            lines.append(
+                f"- **unexpected** `{entry['rule']}` at `{entry['path']}` "
+                f"({entry['level']}): expected {entry['expected']}, actual {entry['actual']}"
+            )
+    return "\n".join(lines) + "\n"
+
+
+# ---------------------------------------------------------------------------
+# GitHub I/O (via gh CLI subprocess)
+# ---------------------------------------------------------------------------
+
+
+def gh(args: list[str], *, parse_json: bool = False) -> Any:
+    """Run `gh ` and return stdout (optionally JSON-parsed).
+
+    Raises InfrastructureError on non-zero exit. Stderr is captured and
+    included in the exception message for diagnosis.
+    """
+    cmd = ["gh", *args]
+    logger.debug("gh call: %s", " ".join(cmd))
+    try:
+        result = subprocess.run(
+            cmd, capture_output=True, text=True, check=True
+        )
+    except FileNotFoundError as exc:
+        raise InfrastructureError(
+            "gh CLI not found — install https://cli.github.com and run `gh auth login`"
+        ) from exc
+    except subprocess.CalledProcessError as exc:
+        raise InfrastructureError(
+            f"gh {' '.join(args)}: exit {exc.returncode}\n"
+            f"stderr: {exc.stderr.strip()}"
+        ) from exc
+    if parse_json:
+        try:
+            return json.loads(result.stdout)
+        except json.JSONDecodeError as exc:
+            raise InfrastructureError(
+                f"gh {' '.join(args)}: could not parse stdout as JSON: {exc}"
+            ) from exc
+    return result.stdout
+
+
+def list_regression_branches(repo: str, pattern: str) -> list[str]:
+    """Return branch names on *repo* matching *pattern* (fnmatch glob)."""
+    branches = gh(
+        ["api", f"repos/{repo}/branches", "--paginate", "--jq", ".[].name"]
+    )
+    names = [line.strip() for line in branches.splitlines() if line.strip()]
+    return sorted(name for name in names if fnmatch.fnmatch(name, pattern))
+
+
+def fetch_expected(repo: str, branch: str) -> dict[str, Any]:
+    """Fetch and validate `.regression/regression-expected.yaml` from *branch*."""
+    path = ".regression/regression-expected.yaml"
+    try:
+        payload = gh(
+            [
+                "api",
+                f"repos/{repo}/contents/{path}",
+                "-H", "Accept: application/vnd.github+json",
+                "--jq", ".content",
+                "-X", "GET",
+                "-f", f"ref={branch}",
+            ]
+        )
+    except InfrastructureError as exc:
+        raise InfrastructureError(
+            f"{repo}@{branch}: could not fetch {path} — {exc}"
+        ) from exc
+    content_b64 = payload.strip().replace("\n", "")
+    try:
+        text = base64.b64decode(content_b64).decode("utf-8")
+    except Exception as exc:  # noqa: BLE001 — any decode error is infra
+        raise InfrastructureError(
+            f"{repo}@{branch}: could not base64-decode {path}: {exc}"
+        ) from exc
+    return load_expected(text)
+
+
+def _resolve_tooling_ref(repo: str, tag: str) -> str:
+    """Dereference *tag* on *repo* to the underlying commit SHA.
+
+    Handles both lightweight tags (object.type == "commit") and annotated
+    tags (object.type == "tag", requiring one more dereference through
+    git/tags/{sha}).
+    """
+    ref = gh(
+        [
+            "api", f"repos/{repo}/git/refs/tags/{tag}",
+            "--jq", "[.object.type, .object.sha] | @tsv",
+        ]
+    ).strip()
+    if not ref or "\t" not in ref:
+        raise InfrastructureError(f"{repo}@{tag}: unexpected refs response: {ref!r}")
+    obj_type, obj_sha = ref.split("\t", 1)
+    if obj_type == "commit":
+        return obj_sha
+    if obj_type == "tag":
+        commit_sha = gh(
+            [
+                "api", f"repos/{repo}/git/tags/{obj_sha}",
+                "--jq", ".object.sha",
+            ]
+        ).strip()
+        if not re.match(r"^[0-9a-f]{40}$", commit_sha):
+            raise InfrastructureError(
+                f"{repo}@{tag}: dereferenced commit sha invalid: {commit_sha!r}"
+            )
+        return commit_sha
+    raise InfrastructureError(f"{repo}@{tag}: unsupported object type {obj_type!r}")
+
+
+def branch_tip_sha(repo: str, branch: str) -> str:
+    """Return the current tip SHA of *branch* on *repo*."""
+    data = gh(
+        ["api", f"repos/{repo}/branches/{branch}", "--jq", ".commit.sha"]
+    )
+    sha = data.strip()
+    if not re.match(r"^[0-9a-f]{40}$", sha):
+        raise InfrastructureError(
+            f"{repo}@{branch}: unexpected branch tip response: {sha!r}"
+        )
+    return sha
+
+
+def _iso_to_dt(stamp: str) -> datetime:
+    return datetime.strptime(stamp, "%Y-%m-%dT%H:%M:%SZ").replace(
+        tzinfo=timezone.utc
+    )
+
+
+def dispatch_validation(
+    repo: str,
+    branch: str,
+    *,
+    workflow_file: str,
+    startup_attempts: int = 15,
+    startup_interval: float = 2.0,
+) -> str:
+    """Dispatch *workflow_file* on *branch* of *repo* and return the run ID.
+
+    GitHub's `workflow run` endpoint does not return the created run ID, so
+    we record a UTC marker, call dispatch, then poll `gh run list` for a new
+    workflow_dispatch run whose `createdAt` is >= marker and whose `headSha`
+    matches the branch tip. Raises InfrastructureError on timeout.
+    """
+    sha = branch_tip_sha(repo, branch)
+    marker = datetime.now(timezone.utc).replace(microsecond=0)
+    gh(
+        [
+            "workflow", "run", workflow_file,
+            "--repo", repo,
+            "--ref", branch,
+        ]
+    )
+    logger.info("dispatched %s on %s@%s; polling for run id", workflow_file, repo, branch)
+
+    for _ in range(startup_attempts):
+        time.sleep(startup_interval)
+        runs = gh(
+            [
+                "run", "list",
+                "--repo", repo,
+                "--workflow", workflow_file,
+                "--branch", branch,
+                "--event", "workflow_dispatch",
+                "--json", "databaseId,createdAt,headSha,status,conclusion",
+                "--limit", "10",
+            ],
+            parse_json=True,
+        )
+        for run in runs:
+            try:
+                created = _iso_to_dt(run["createdAt"])
+            except (KeyError, ValueError):
+                continue
+            if created >= marker and run.get("headSha") == sha:
+                run_id = str(run["databaseId"])
+                logger.info("found dispatched run id=%s", run_id)
+                return run_id
+
+    raise InfrastructureError(
+        f"{repo}@{branch}: timed out waiting for dispatched run to appear "
+        f"(polled {startup_attempts} times)"
+    )
+
+
+def poll_run(
+    repo: str,
+    run_id: str,
+    *,
+    interval: int,
+    timeout: int,
+) -> str:
+    """Wait until *run_id* completes; return its conclusion string.
+
+    Raises InfrastructureError on timeout. A conclusion of "success" is the
+    only value that guarantees artifacts are ready; other conclusions still
+    produce a result and are returned for the caller to decide.
+    """
+    deadline = time.monotonic() + timeout
+    while True:
+        data = gh(
+            [
+                "run", "view", run_id,
+                "--repo", repo,
+                "--json", "status,conclusion",
+            ],
+            parse_json=True,
+        )
+        status = data.get("status")
+        conclusion = data.get("conclusion") or ""
+        logger.debug("run %s status=%s conclusion=%s", run_id, status, conclusion)
+        if status == "completed":
+            return conclusion
+        if time.monotonic() >= deadline:
+            raise InfrastructureError(
+                f"run {run_id} did not complete within {timeout}s "
+                f"(last status={status})"
+            )
+        time.sleep(interval)
+
+
+def download_findings(
+    repo: str,
+    run_id: str,
+    workdir: Path,
+    artifact_name: str = "validation-diagnostics",
+) -> tuple[list[dict[str, Any]], dict[str, Any] | None]:
+    """Download the validation-diagnostics artifact and load findings + summary.
+
+    Returns (findings_list, summary_dict_or_None). Raises InfrastructureError
+    if the artifact is missing or findings.json is not parseable.
+    """
+    workdir.mkdir(parents=True, exist_ok=True)
+    gh(
+        [
+            "run", "download", run_id,
+            "--repo", repo,
+            "--name", artifact_name,
+            "--dir", str(workdir),
+        ]
+    )
+    findings_path = workdir / "findings.json"
+    if not findings_path.exists():
+        # gh run download strips the artifact name from the path if --name is
+        # passed; but some versions preserve it. Check both.
+        nested = workdir / artifact_name / "findings.json"
+        if nested.exists():
+            findings_path = nested
+    if not findings_path.exists():
+        raise InfrastructureError(
+            f"findings.json not found in downloaded artifact at {workdir}"
+        )
+    try:
+        findings = json.loads(findings_path.read_text(encoding="utf-8"))
+    except json.JSONDecodeError as exc:
+        raise InfrastructureError(
+            f"findings.json is not valid JSON: {exc}"
+        ) from exc
+    if not isinstance(findings, list):
+        raise InfrastructureError(
+            f"findings.json root is not a list (got {type(findings).__name__})"
+        )
+
+    summary_path = findings_path.parent / "summary.json"
+    summary: dict[str, Any] | None = None
+    if summary_path.exists():
+        try:
+            summary = json.loads(summary_path.read_text(encoding="utf-8"))
+        except json.JSONDecodeError:
+            summary = None
+    return findings, summary
+
+
+# ---------------------------------------------------------------------------
+# Orchestration
+# ---------------------------------------------------------------------------
+
+
+def run_branch(
+    repo: str,
+    branch: str,
+    *,
+    workflow_file: str,
+    poll_interval: int,
+    poll_timeout: int,
+) -> DiffReport:
+    """Full per-branch check: fetch expected, dispatch, poll, download, diff."""
+    logger.info("[%s] fetching expected fixture", branch)
+    expected = fetch_expected(repo, branch)
+
+    logger.info("[%s] dispatching validation workflow", branch)
+    run_id = dispatch_validation(repo, branch, workflow_file=workflow_file)
+
+    logger.info("[%s] polling run %s", branch, run_id)
+    conclusion = poll_run(repo, run_id, interval=poll_interval, timeout=poll_timeout)
+    if conclusion not in {"success", "failure", "neutral"}:
+        raise InfrastructureError(
+            f"[{branch}] unexpected run conclusion: {conclusion}"
+        )
+
+    with tempfile.TemporaryDirectory(prefix="vf-regression-") as td:
+        workdir = Path(td)
+        logger.info("[%s] downloading diagnostics into %s", branch, workdir)
+        actual, summary = download_findings(repo, run_id, workdir)
+
+    report = diff_findings(expected, actual, actual_summary=summary)
+    report.branch = branch
+    return report
+
+
+def capture_branch(
+    repo: str,
+    branch: str,
+    *,
+    out_path: Path,
+    workflow_file: str,
+    poll_interval: int,
+    poll_timeout: int,
+    description: str | None,
+) -> Path:
+    """Dispatch the VF, download findings, and write a fresh expected fixture.
+
+    Writes to *out_path*; the caller commits it to the branch after review.
+    """
+    logger.info("[%s] CAPTURE: dispatching workflow", branch)
+    run_id = dispatch_validation(repo, branch, workflow_file=workflow_file)
+    logger.info("[%s] CAPTURE: polling run %s", branch, run_id)
+    conclusion = poll_run(repo, run_id, interval=poll_interval, timeout=poll_timeout)
+    logger.info("[%s] CAPTURE: run completed (%s)", branch, conclusion)
+
+    with tempfile.TemporaryDirectory(prefix="vf-capture-") as td:
+        workdir = Path(td)
+        actual, _summary = download_findings(repo, run_id, workdir)
+
+    # Resolve the tooling_ref the run used. Best-effort: dereference the
+    # current v1-rc tag to the underlying commit SHA. v1-rc is annotated, so
+    # the ref returns a tag object that must be dereferenced once more.
+    tooling_ref: str | None
+    try:
+        tooling_ref = _resolve_tooling_ref("camaraproject/tooling", "v1-rc")
+    except InfrastructureError:
+        tooling_ref = None
+
+    run_url = f"https://github.com/{repo}/actions/runs/{run_id}"
+
+    text = capture_to_yaml(
+        actual,
+        branch=branch,
+        run_url=run_url,
+        tooling_ref=tooling_ref,
+        description=description,
+    )
+    out_path.parent.mkdir(parents=True, exist_ok=True)
+    out_path.write_text(text, encoding="utf-8")
+    logger.info("[%s] CAPTURE: wrote %d findings to %s", branch, len(actual), out_path)
+    return out_path
+
+
+# ---------------------------------------------------------------------------
+# CLI
+# ---------------------------------------------------------------------------
+
+
+def _build_argparser() -> argparse.ArgumentParser:
+    parser = argparse.ArgumentParser(
+        prog="regression_runner.py",
+        description=__doc__,
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+    )
+    parser.add_argument(
+        "--repo",
+        required=True,
+        help="owner/repo of the test repository (e.g. camaraproject/ReleaseTest)",
+    )
+    parser.add_argument(
+        "--branch-filter",
+        default="regression/*",
+        help="fnmatch glob over branch names (default: %(default)s)",
+    )
+    parser.add_argument(
+        "--workflow-file",
+        default="camara-validation.yml",
+        help="caller workflow filename in the test repo (default: %(default)s)",
+    )
+    parser.add_argument(
+        "--poll-interval",
+        type=int,
+        default=15,
+        help="seconds between run status polls (default: %(default)s)",
+    )
+    parser.add_argument(
+        "--poll-timeout",
+        type=int,
+        default=1800,
+        help="max seconds to wait for a run to complete (default: %(default)s)",
+    )
+    parser.add_argument(
+        "--summary-file",
+        type=Path,
+        help="write a markdown summary report to this path",
+    )
+    parser.add_argument(
+        "--capture",
+        metavar="BRANCH",
+        help="CAPTURE MODE: dispatch against BRANCH and write a fresh "
+             "regression-expected.yaml to --out (skips diff/reporting)",
+    )
+    parser.add_argument(
+        "--out",
+        type=Path,
+        help="output path for --capture mode",
+    )
+    parser.add_argument(
+        "--capture-description",
+        help="description field for captured fixture (optional)",
+    )
+    parser.add_argument(
+        "-v", "--verbose",
+        action="store_true",
+        help="verbose logging",
+    )
+    return parser
+
+
+def _setup_logging(verbose: bool) -> None:
+    logging.basicConfig(
+        level=logging.DEBUG if verbose else logging.INFO,
+        format="%(asctime)s %(levelname)-5s %(message)s",
+        datefmt="%H:%M:%S",
+    )
+
+
+def main(argv: list[str] | None = None) -> int:
+    args = _build_argparser().parse_args(argv)
+    _setup_logging(args.verbose)
+
+    try:
+        if args.capture:
+            if not args.out:
+                print("error: --capture requires --out", file=sys.stderr)
+                return 2
+            capture_branch(
+                args.repo,
+                args.capture,
+                out_path=args.out,
+                workflow_file=args.workflow_file,
+                poll_interval=args.poll_interval,
+                poll_timeout=args.poll_timeout,
+                description=args.capture_description,
+            )
+            print(f"CAPTURE OK: wrote {args.out}")
+            return 0
+
+        branches = list_regression_branches(args.repo, args.branch_filter)
+        if not branches:
+            print(
+                f"No branches on {args.repo} match filter "
+                f"{args.branch_filter!r}",
+                file=sys.stderr,
+            )
+            return 2
+        logger.info("matched %d branch(es): %s", len(branches), ", ".join(branches))
+
+        reports: dict[str, DiffReport] = {}
+        for branch in branches:
+            report = run_branch(
+                args.repo,
+                branch,
+                workflow_file=args.workflow_file,
+                poll_interval=args.poll_interval,
+                poll_timeout=args.poll_timeout,
+            )
+            reports[branch] = report
+
+    except InfrastructureError as exc:
+        print(f"INFRA: {exc}", file=sys.stderr)
+        return 2
+
+    markdown = render_markdown(reports)
+    print(markdown)
+    if args.summary_file:
+        args.summary_file.write_text(markdown, encoding="utf-8")
+
+    passed = all(r.passed for r in reports.values())
+    total = len(reports)
+    passing = sum(1 for r in reports.values() if r.passed)
+    print(f"{'PASS' if passed else 'FAIL'}: {passing}/{total} branches", file=sys.stderr)
+    return 0 if passed else 1
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/validation/tests/test_regression_runner.py b/validation/tests/test_regression_runner.py
new file mode 100644
index 00000000..6681a427
--- /dev/null
+++ b/validation/tests/test_regression_runner.py
@@ -0,0 +1,448 @@
+"""Unit tests for validation.scripts.regression_runner.
+
+Covers pure-logic functions only: loader + schema validation, match-key
+normalization, diff semantics (exact/subset, counts, summary mismatch),
+capture→load round-trip, and markdown rendering. GitHub I/O helpers are
+verified manually during integration.
+"""
+
+from __future__ import annotations
+
+import importlib.util
+import sys
+from pathlib import Path
+
+import pytest
+import yaml
+
+# validation/scripts/ is not a package — load the module directly.
+_ROOT = Path(__file__).resolve().parents[2]
+_MODULE_PATH = _ROOT / "validation" / "scripts" / "regression_runner.py"
+_spec = importlib.util.spec_from_file_location("regression_runner", _MODULE_PATH)
+assert _spec is not None and _spec.loader is not None
+regression_runner = importlib.util.module_from_spec(_spec)
+sys.modules["regression_runner"] = regression_runner
+_spec.loader.exec_module(regression_runner)
+
+
+InfrastructureError = regression_runner.InfrastructureError
+DiffReport = regression_runner.DiffReport
+load_expected = regression_runner.load_expected
+normalize_finding = regression_runner.normalize_finding
+diff_findings = regression_runner.diff_findings
+capture_to_yaml = regression_runner.capture_to_yaml
+render_markdown = regression_runner.render_markdown
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _make_finding(
+    *,
+    rule_id: str | None = "S-042",
+    engine: str = "spectral",
+    engine_rule: str = "operation-tag-defined",
+    path: str = "code/API_definitions/sample-service.yaml",
+    line: int = 12,
+    level: str = "hint",
+    message: str = "Operation tag is not defined",
+) -> dict:
+    finding: dict = {
+        "engine": engine,
+        "engine_rule": engine_rule,
+        "level": level,
+        "message": message,
+        "path": path,
+        "line": line,
+    }
+    if rule_id is not None:
+        finding["rule_id"] = rule_id
+    return finding
+
+
+def _valid_fixture() -> dict:
+    return {
+        "schema_version": 1,
+        "branch": "regression/r4.1-main-baseline",
+        "description": "baseline",
+        "summary": {"errors": 0, "warnings": 0, "hints": 2},
+        "match_mode": "exact",
+        "findings": [
+            {
+                "rule_id": "S-042",
+                "path": "code/API_definitions/sample-service.yaml",
+                "level": "hint",
+                "count": 2,
+            },
+            {
+                "engine": "spectral",
+                "engine_rule": "oas3-api-servers",
+                "path": "code/API_definitions/sample-service.yaml",
+                "level": "hint",
+            },
+        ],
+    }
+
+
+def _dump(doc: dict) -> str:
+    return yaml.safe_dump(doc, sort_keys=False)
+
+
+# ---------------------------------------------------------------------------
+# load_expected
+# ---------------------------------------------------------------------------
+
+
+class TestLoadExpected:
+    def test_happy_path(self) -> None:
+        data = load_expected(_dump(_valid_fixture()))
+        assert data["branch"] == "regression/r4.1-main-baseline"
+        assert len(data["findings"]) == 2
+
+    def test_missing_schema_version(self) -> None:
+        fixture = _valid_fixture()
+        del fixture["schema_version"]
+        with pytest.raises(InfrastructureError, match="schema_version"):
+            load_expected(_dump(fixture))
+
+    def test_invalid_rule_id_pattern(self) -> None:
+        fixture = _valid_fixture()
+        fixture["findings"][0]["rule_id"] = "foo-123"
+        with pytest.raises(InfrastructureError, match="foo-123"):
+            load_expected(_dump(fixture))
+
+    def test_neither_rule_id_nor_engine_rule(self) -> None:
+        fixture = _valid_fixture()
+        fixture["findings"] = [
+            {
+                "path": "some.yaml",
+                "level": "error",
+            }
+        ]
+        with pytest.raises(InfrastructureError):
+            load_expected(_dump(fixture))
+
+    def test_duplicate_match_key_rejected(self) -> None:
+        fixture = _valid_fixture()
+        fixture["findings"] = [
+            {
+                "rule_id": "S-042",
+                "path": "x.yaml",
+                "level": "hint",
+            },
+            {
+                "rule_id": "S-042",
+                "path": "x.yaml",
+                "level": "hint",
+                "count": 2,
+            },
+        ]
+        with pytest.raises(InfrastructureError, match="duplicate"):
+            load_expected(_dump(fixture))
+
+    def test_invalid_yaml_root(self) -> None:
+        with pytest.raises(InfrastructureError, match="YAML mapping"):
+            load_expected("- not-a-mapping\n")
+
+    def test_invalid_match_mode(self) -> None:
+        fixture = _valid_fixture()
+        fixture["match_mode"] = "wibble"
+        with pytest.raises(InfrastructureError):
+            load_expected(_dump(fixture))
+
+
+# ---------------------------------------------------------------------------
+# normalize_finding
+# ---------------------------------------------------------------------------
+
+
+class TestNormalizeFinding:
+    def test_strips_line_column_message(self) -> None:
+        a = _make_finding(line=10, message="x")
+        b = _make_finding(line=200, message="completely different message")
+        assert normalize_finding(a) == normalize_finding(b)
+
+    def test_uses_rule_id_when_present(self) -> None:
+        f = _make_finding(rule_id="P-007")
+        key = normalize_finding(f)
+        assert key[0] == "P-007"
+
+    def test_engine_rule_fallback_when_rule_id_absent(self) -> None:
+        f = _make_finding(rule_id=None, engine="python", engine_rule="my-check")
+        key = normalize_finding(f)
+        assert key[0] == "python/my-check"
+
+
+# ---------------------------------------------------------------------------
+# diff_findings
+# ---------------------------------------------------------------------------
+
+
+class TestDiffFindings:
+    def test_zero_vs_zero(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/empty",
+            "match_mode": "exact",
+            "findings": [],
+        }))
+        report = diff_findings(expected, [])
+        assert report.passed
+        assert report.matched == 0
+
+    def test_baseline_clean_match(self) -> None:
+        fixture = _valid_fixture()
+        # Three actual hints total: S-042 ×2 + oas3-api-servers ×1
+        fixture["summary"] = {"errors": 0, "warnings": 0, "hints": 3}
+        expected = load_expected(_dump(fixture))
+        actual = [
+            _make_finding(rule_id="S-042", line=10),
+            _make_finding(rule_id="S-042", line=20),
+            _make_finding(
+                rule_id=None, engine="spectral", engine_rule="oas3-api-servers"
+            ),
+        ]
+        report = diff_findings(
+            expected,
+            actual,
+            actual_summary={
+                "counts": {
+                    "errors": 0, "warnings": 0, "hints": 3,
+                    "total": 3, "blocking": 0,
+                }
+            },
+        )
+        assert report.passed
+        assert report.matched == 3
+        assert not report.missing
+        assert not report.unexpected
+        assert report.summary_mismatch is None
+
+    def test_missing_finding(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "exact",
+            "findings": [
+                {"rule_id": "S-042", "path": "a.yaml", "level": "hint"},
+            ],
+        }))
+        report = diff_findings(expected, [])
+        assert not report.passed
+        assert len(report.missing) == 1
+        assert report.missing[0]["rule"] == "S-042"
+        assert report.missing[0]["expected"] == 1
+        assert report.missing[0]["actual"] == 0
+
+    def test_unexpected_extra_exact_mode(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "exact",
+            "findings": [],
+        }))
+        actual = [_make_finding(rule_id="S-042")]
+        report = diff_findings(expected, actual)
+        assert not report.passed
+        assert len(report.unexpected) == 1
+        assert report.unexpected[0]["rule"] == "S-042"
+
+    def test_unexpected_extra_subset_mode(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "subset",
+            "findings": [],
+        }))
+        actual = [_make_finding(rule_id="S-042")]
+        report = diff_findings(expected, actual)
+        assert report.passed
+        assert not report.unexpected
+
+    def test_count_shortfall(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "exact",
+            "findings": [
+                {"rule_id": "S-042", "path": "a.yaml", "level": "hint", "count": 3},
+            ],
+        }))
+        actual = [
+            _make_finding(rule_id="S-042", path="a.yaml"),
+            _make_finding(rule_id="S-042", path="a.yaml"),
+        ]
+        report = diff_findings(expected, actual)
+        assert not report.passed
+        assert len(report.missing) == 1
+        assert report.missing[0]["expected"] == 3
+        assert report.missing[0]["actual"] == 2
+
+    def test_count_surplus_exact_mode(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "exact",
+            "findings": [
+                {"rule_id": "S-042", "path": "a.yaml", "level": "hint", "count": 2},
+            ],
+        }))
+        actual = [
+            _make_finding(rule_id="S-042", path="a.yaml"),
+            _make_finding(rule_id="S-042", path="a.yaml"),
+            _make_finding(rule_id="S-042", path="a.yaml"),
+        ]
+        report = diff_findings(expected, actual)
+        assert not report.passed
+        assert len(report.unexpected) == 1
+        assert report.unexpected[0]["expected"] == 2
+        assert report.unexpected[0]["actual"] == 3
+
+    def test_count_surplus_subset_mode(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "subset",
+            "findings": [
+                {"rule_id": "S-042", "path": "a.yaml", "level": "hint", "count": 2},
+            ],
+        }))
+        actual = [
+            _make_finding(rule_id="S-042", path="a.yaml"),
+            _make_finding(rule_id="S-042", path="a.yaml"),
+            _make_finding(rule_id="S-042", path="a.yaml"),
+        ]
+        report = diff_findings(expected, actual)
+        assert report.passed
+        assert report.matched == 2  # min(expected=2, actual=3)
+
+    def test_summary_mismatch_on_counts(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "exact",
+            "summary": {"errors": 0, "warnings": 0, "hints": 0},
+            "findings": [],
+        }))
+        report = diff_findings(expected, [], actual_summary={
+            "counts": {"errors": 1, "warnings": 0, "hints": 0, "total": 1, "blocking": 1}
+        })
+        assert not report.passed
+        assert report.summary_mismatch is not None
+        assert "errors" in report.summary_mismatch
+
+    def test_scrambled_order_still_matches(self) -> None:
+        expected = load_expected(_dump({
+            "schema_version": 1,
+            "branch": "regression/x",
+            "match_mode": "exact",
+            "findings": [
+                {"rule_id": "S-042", "path": "a.yaml", "level": "hint"},
+                {"rule_id": "P-007", "path": "b.yaml", "level": "warn"},
+            ],
+        }))
+        actual = [
+            _make_finding(rule_id="P-007", path="b.yaml", level="warn"),
+            _make_finding(rule_id="S-042", path="a.yaml", level="hint"),
+        ]
+        report = diff_findings(expected, actual)
+        assert report.passed
+
+
+# ---------------------------------------------------------------------------
+# capture_to_yaml
+# ---------------------------------------------------------------------------
+
+
+class TestCaptureToYaml:
+    def test_roundtrip(self) -> None:
+        actual = [
+            _make_finding(rule_id="S-042", path="a.yaml", level="hint"),
+            _make_finding(rule_id="S-042", path="a.yaml", level="hint"),
+            _make_finding(rule_id=None, engine="python",
+                          engine_rule="check-x", path="b.yaml", level="warn"),
+        ]
+        text = capture_to_yaml(
+            actual,
+            branch="regression/r4.1-main-baseline",
+            run_url="https://github.com/camaraproject/ReleaseTest/actions/runs/1",
+            tooling_ref="b4c1c3e0000000000000000000000000000000b4",
+            description="baseline",
+        )
+        # Round-trips through the loader + schema
+        data = load_expected(text)
+        assert data["branch"] == "regression/r4.1-main-baseline"
+        assert data["summary"]["warnings"] == 1
+        assert data["summary"]["hints"] == 2
+        # Duplicates collapsed into count
+        rule_042 = next(f for f in data["findings"] if f.get("rule_id") == "S-042")
+        assert rule_042["count"] == 2
+        # Engine-rule entry uses engine+engine_rule fields, not rule_id
+        python_entry = next(
+            f for f in data["findings"]
+            if f.get("engine") == "python"
+        )
+        assert python_entry["engine_rule"] == "check-x"
+        assert "rule_id" not in python_entry
+
+    def test_deterministic_output(self) -> None:
+        actual = [
+            _make_finding(rule_id="S-099", path="z.yaml", level="hint"),
+            _make_finding(rule_id="S-001", path="a.yaml", level="hint"),
+        ]
+        text1 = capture_to_yaml(
+            actual, branch="regression/x", run_url=None, tooling_ref=None,
+        )
+        text2 = capture_to_yaml(
+            list(reversed(actual)), branch="regression/x", run_url=None, tooling_ref=None,
+        )
+        # captured_at differs by timestamp but findings ordering should match
+        doc1 = yaml.safe_load(text1)
+        doc2 = yaml.safe_load(text2)
+        assert doc1["findings"] == doc2["findings"]
+        # Sorted by rule_key
+        assert doc1["findings"][0]["rule_id"] == "S-001"
+        assert doc1["findings"][1]["rule_id"] == "S-099"
+
+
+# ---------------------------------------------------------------------------
+# render_markdown
+# ---------------------------------------------------------------------------
+
+
+class TestRenderMarkdown:
+    def test_mixed_pass_fail(self) -> None:
+        pass_report = DiffReport(
+            branch="regression/clean", match_mode="exact", matched=5,
+        )
+        fail_report = DiffReport(
+            branch="regression/broken",
+            match_mode="exact",
+            matched=1,
+            missing=[{"rule": "S-042", "path": "a.yaml", "level": "hint",
+                      "expected": 2, "actual": 1}],
+            unexpected=[{"rule": "S-099", "path": "z.yaml", "level": "warn",
+                         "expected": 0, "actual": 1}],
+        )
+        text = render_markdown({
+            "regression/clean": pass_report,
+            "regression/broken": fail_report,
+        })
+        assert "1/2 branches PASS" in text
+        assert "`regression/clean` | PASS" in text
+        assert "`regression/broken` | FAIL" in text
+        assert "missing" in text
+        assert "unexpected" in text
+        assert "S-042" in text
+        assert "S-099" in text
+
+    def test_all_pass_no_detail_section(self) -> None:
+        report = DiffReport(
+            branch="regression/clean", match_mode="exact", matched=27,
+        )
+        text = render_markdown({"regression/clean": report})
+        assert "1/1 branches PASS" in text
+        assert "PASS" in text
+        assert "diff detail" not in text

From dab3e9ffd47a2c4afffa410d0db84e3c78b07dc1 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 14 Apr 2026 09:35:24 +0200
Subject: [PATCH 075/157] Read tooling_ref from context.json instead of
 querying v1-rc
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The previous capture path queried camaraproject/tooling for the current
v1-rc tag SHA and embedded that as the fixture's tooling_ref. That's
wrong for the ReleaseTest canary, whose caller workflow targets
@validation-framework rather than @v1-rc — runs there pin to whatever
HEAD that branch points at, which can advance independently of v1-rc.

The orchestrator already records the actually-resolved tooling SHA in
its ValidationContext (context.json field tooling_ref), so download the
context alongside findings and summary, and read the SHA from there.
That's correct regardless of which ref the caller targets and removes
the need for a separate v1-rc API round-trip and annotated-tag
dereference.

download_findings() now returns a 3-tuple (findings, summary, context)
to surface the context dict to capture_branch. run_branch ignores the
context (it doesn't need it).

README updated to spell out the two cases (dark repos pinned to v1-rc
vs ReleaseTest pinned to validation-framework HEAD) and the fixture
maintenance flow on the canary.

Verified by recapturing against regression/r4.1-main-baseline:
[run 24386656531](https://github.com/camaraproject/ReleaseTest/actions/runs/24386656531).
The new fixture is byte-identical to the previous one except for
captured_at and captured_from_run; the tooling_ref still resolves to
b4c1c3e9b3f958b3df4d330ae61af19ab6eff22b because v1-rc and
validation-framework HEAD currently coincide.

Tests still green: 24/24 regression_runner unit tests, 832/832 full
validation suite.
---
 validation/scripts/README.md            | 43 ++++++++++---
 validation/scripts/regression_runner.py | 83 +++++++++----------------
 2 files changed, 63 insertions(+), 63 deletions(-)

diff --git a/validation/scripts/README.md b/validation/scripts/README.md
index aa428069..f0d01a9d 100644
--- a/validation/scripts/README.md
+++ b/validation/scripts/README.md
@@ -73,16 +73,39 @@ Review the generated file, commit it to the branch at
   `summary.json.counts` before per-finding diffing; any mismatch there is a
   separate failure axis.
 
-### Tooling ref pinning (known constraint)
-
-The caller workflow hardcodes `uses: camaraproject/tooling/.github/workflows/validation.yml@v1-rc`
-and does not forward `workflow_dispatch` inputs to the reusable. OIDC
-resolution inside the reusable therefore locks to whatever commit `v1-rc`
-currently points at — a local `gh workflow run` cannot override this.
-
-Fixtures are implicitly pinned to that ref. Record the current `v1-rc` SHA in
-each branch's `REGRESSION.md` (`gh api repos/camaraproject/tooling/git/refs/tags/v1-rc --jq '.object.sha'`).
-If `v1-rc` moves, recapture the fixtures.
+### Tooling ref the run actually used
+
+Each test repo's caller workflow hardcodes the tooling ref it consumes, and
+does not forward `workflow_dispatch` inputs. A local `gh workflow run` can't
+override which tooling SHA runs server-side; OIDC inside the reusable
+inherits `job_workflow_ref` from the caller's hardcoded reference.
+
+Two cases in the wild:
+
+- **Dark / production API repos** — caller targets
+  `camaraproject/tooling/.github/workflows/validation.yml@v1-rc`. Each run
+  pins to whatever commit `v1-rc` currently points at; the SHA only changes
+  when the tag is moved (a deliberate, repo-wide release event).
+- **`camaraproject/ReleaseTest` (canary)** — caller targets
+  `...@validation-framework`. Each run pins to the current HEAD of the
+  `validation-framework` branch. Every push to that branch can change what
+  the runner sees here, *before* `v1-rc` is moved for the rest of the org.
+  This is the intentional canary surface for changes under development.
+
+The runner records the **actually used** SHA into the captured fixture by
+reading `tooling_ref` from `context.json` in the diagnostics artifact. That
+field comes from the orchestrator's own resolved context, so it's correct
+regardless of which ref the caller targeted.
+
+Implications for fixture maintenance:
+
+- For ReleaseTest fixtures: any merge to `validation-framework` that changes
+  findings against the same specs will produce a FAIL on the next runner
+  invocation. That's the canary working as designed. Triage the diff:
+  - Intended rule/engine change → recapture the fixture (`--capture`),
+    review the new findings, commit.
+  - Unintended regression → fix the code on `validation-framework`, re-run.
+- For dark-repo fixtures (none today): only stale after `v1-rc` moves.
 
 ### Troubleshooting
 
diff --git a/validation/scripts/regression_runner.py b/validation/scripts/regression_runner.py
index 4866d475..21be5b0f 100644
--- a/validation/scripts/regression_runner.py
+++ b/validation/scripts/regression_runner.py
@@ -466,39 +466,6 @@ def fetch_expected(repo: str, branch: str) -> dict[str, Any]:
     return load_expected(text)
 
 
-def _resolve_tooling_ref(repo: str, tag: str) -> str:
-    """Dereference *tag* on *repo* to the underlying commit SHA.
-
-    Handles both lightweight tags (object.type == "commit") and annotated
-    tags (object.type == "tag", requiring one more dereference through
-    git/tags/{sha}).
-    """
-    ref = gh(
-        [
-            "api", f"repos/{repo}/git/refs/tags/{tag}",
-            "--jq", "[.object.type, .object.sha] | @tsv",
-        ]
-    ).strip()
-    if not ref or "\t" not in ref:
-        raise InfrastructureError(f"{repo}@{tag}: unexpected refs response: {ref!r}")
-    obj_type, obj_sha = ref.split("\t", 1)
-    if obj_type == "commit":
-        return obj_sha
-    if obj_type == "tag":
-        commit_sha = gh(
-            [
-                "api", f"repos/{repo}/git/tags/{obj_sha}",
-                "--jq", ".object.sha",
-            ]
-        ).strip()
-        if not re.match(r"^[0-9a-f]{40}$", commit_sha):
-            raise InfrastructureError(
-                f"{repo}@{tag}: dereferenced commit sha invalid: {commit_sha!r}"
-            )
-        return commit_sha
-    raise InfrastructureError(f"{repo}@{tag}: unsupported object type {obj_type!r}")
-
-
 def branch_tip_sha(repo: str, branch: str) -> str:
     """Return the current tip SHA of *branch* on *repo*."""
     data = gh(
@@ -615,11 +582,13 @@ def download_findings(
     run_id: str,
     workdir: Path,
     artifact_name: str = "validation-diagnostics",
-) -> tuple[list[dict[str, Any]], dict[str, Any] | None]:
-    """Download the validation-diagnostics artifact and load findings + summary.
+) -> tuple[list[dict[str, Any]], dict[str, Any] | None, dict[str, Any] | None]:
+    """Download the validation-diagnostics artifact and load findings, summary, context.
 
-    Returns (findings_list, summary_dict_or_None). Raises InfrastructureError
-    if the artifact is missing or findings.json is not parseable.
+    Returns (findings_list, summary_dict_or_None, context_dict_or_None).
+    Raises InfrastructureError if the artifact is missing or findings.json is
+    not parseable. Summary and context are best-effort: if they fail to load,
+    the corresponding return value is None.
     """
     workdir.mkdir(parents=True, exist_ok=True)
     gh(
@@ -652,14 +621,19 @@ def download_findings(
             f"findings.json root is not a list (got {type(findings).__name__})"
         )
 
-    summary_path = findings_path.parent / "summary.json"
-    summary: dict[str, Any] | None = None
-    if summary_path.exists():
+    def _load_optional(name: str) -> dict[str, Any] | None:
+        path = findings_path.parent / name
+        if not path.exists():
+            return None
         try:
-            summary = json.loads(summary_path.read_text(encoding="utf-8"))
+            data = json.loads(path.read_text(encoding="utf-8"))
         except json.JSONDecodeError:
-            summary = None
-    return findings, summary
+            return None
+        return data if isinstance(data, dict) else None
+
+    summary = _load_optional("summary.json")
+    context = _load_optional("context.json")
+    return findings, summary, context
 
 
 # ---------------------------------------------------------------------------
@@ -692,7 +666,7 @@ def run_branch(
     with tempfile.TemporaryDirectory(prefix="vf-regression-") as td:
         workdir = Path(td)
         logger.info("[%s] downloading diagnostics into %s", branch, workdir)
-        actual, summary = download_findings(repo, run_id, workdir)
+        actual, summary, _context = download_findings(repo, run_id, workdir)
 
     report = diff_findings(expected, actual, actual_summary=summary)
     report.branch = branch
@@ -721,15 +695,18 @@ def capture_branch(
 
     with tempfile.TemporaryDirectory(prefix="vf-capture-") as td:
         workdir = Path(td)
-        actual, _summary = download_findings(repo, run_id, workdir)
-
-    # Resolve the tooling_ref the run used. Best-effort: dereference the
-    # current v1-rc tag to the underlying commit SHA. v1-rc is annotated, so
-    # the ref returns a tag object that must be dereferenced once more.
-    tooling_ref: str | None
-    try:
-        tooling_ref = _resolve_tooling_ref("camaraproject/tooling", "v1-rc")
-    except InfrastructureError:
+        actual, _summary, context = download_findings(repo, run_id, workdir)
+
+    # The actually-used tooling SHA comes from the validation context
+    # written by the orchestrator. This is the canonical answer and works
+    # regardless of which ref the caller targets (@v1-rc on dark repos,
+    # @validation-framework HEAD on the ReleaseTest canary, etc.).
+    tooling_ref: str | None = (context or {}).get("tooling_ref") or None
+    if tooling_ref and not re.match(r"^[0-9a-f]{40}$", tooling_ref):
+        logger.warning(
+            "context.json tooling_ref is not a 40-char SHA: %r — omitting from fixture",
+            tooling_ref,
+        )
         tooling_ref = None
 
     run_url = f"https://github.com/{repo}/actions/runs/{run_id}"

From a9194881a3db5ef15d2fd09876ee61005b343fd8 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 14 Apr 2026 10:12:37 +0200
Subject: [PATCH 076/157] Add validation/docs/ and a regression testing manual

The runner's README.md was the only place that documented the regression
framework, and it had grown into a mix of motivation, design rationale,
and CLI reference. None of those things belong in the same document.

Introduce validation/docs/ as the home for longer-form, audience-targeted
documentation about the validation framework, starting with a manual for
the regression testing infrastructure. The manual covers:

- Why the framework exists (problem statement, two failure modes the
  unit tests don't catch)
- The canary metaphor and what we achieve
- Both flavours of regression branches (known-good baselines and
  known-bad targeted branches), framed as "expected verdicts" rather
  than "spec quality"
- The ReleaseTest @validation-framework vs production @v1-rc distinction
- Component layout and the fixture format
- Match-key semantics, including the deliberate exclusions
- Day-to-day workflows: verify, recapture, add a new regression branch,
  update tested_rules
- Sharp edges and known limitations
- Cross-references to the schema, runner, rule inventory, and the
  upstream tracking issue (RM#483)

Slim validation/scripts/README.md down to a CLI reference: prerequisites,
verify and capture invocations, exit codes, troubleshooting. The
substantive material moves to the manual; the README links to it
prominently at the top of its regression_runner.py section.

validation/docs/ is positioned to grow more audience-targeted documents
over time (rule-developer guide, framework architecture overview, etc.)
without bloating the per-script README files.
---
 validation/docs/regression-testing.md | 311 ++++++++++++++++++++++++++
 validation/scripts/README.md          |  98 +++-----
 2 files changed, 343 insertions(+), 66 deletions(-)
 create mode 100644 validation/docs/regression-testing.md

diff --git a/validation/docs/regression-testing.md b/validation/docs/regression-testing.md
new file mode 100644
index 00000000..5c3a25ae
--- /dev/null
+++ b/validation/docs/regression-testing.md
@@ -0,0 +1,311 @@
+# Regression Testing for the Validation Framework
+
+A safety net for evolving the validation framework with confidence.
+
+## Why it exists
+
+The Validation Framework will be the gate that decides whether a CAMARA API
+release proceeds. As new rules land and existing ones evolve, two failure
+modes have to stay out of the framework:
+
+1. A rule starts firing where it shouldn't. Codeowners drown in noise and
+   lose trust in the tool.
+2. A rule stops firing where it should. Codeowners ship a defect and nobody
+   noticed.
+
+Unit tests catch implementation bugs in individual checks. They don't catch
+the cumulative behaviour of the whole framework against real API specs.
+Regression testing closes that gap.
+
+## What it does
+
+Think of a canary in a coal mine.
+
+A curated set of CAMARA-style API specs lives on regression branches in
+`camaraproject/ReleaseTest`. Two flavours, both useful:
+
+- **Known-good baselines** — clean specs paired with the (small) set of
+  advisory findings the framework legitimately produces against them.
+  These verify that "clean stays clean": no new false positives creep in
+  as rules evolve.
+- **Known-bad targeted branches** — specs containing intentional defects
+  paired with the specific findings those defects must trigger. These
+  verify that "broken stays broken": rules don't silently stop catching
+  the things they were written to catch.
+
+What's frozen on each branch is not the quality of the spec but the
+**expected verdict** the framework should deliver about it. Each branch
+ships with a `regression-expected.yaml` fixture that lists exactly which
+findings the framework should produce.
+
+A runner script dispatches the validation framework against each regression
+branch, downloads the findings, and diffs them against the committed
+expectation. PASS means the framework's behaviour is unchanged from the
+last fixture capture. FAIL means something changed — and the diff shows
+exactly which rules now report differently and against which files.
+
+## Why ReleaseTest is special
+
+Most CAMARA repositories use the **stable** version of the framework — a
+tag called `v1-rc`. That tag only moves when the framework team
+deliberately rolls out a new version to all repositories.
+
+`camaraproject/ReleaseTest` is different on purpose. Its caller workflow
+targets `validation.yml@validation-framework` — that is, the **HEAD of the
+development branch**, not the stable tag. Every push to the
+`validation-framework` branch is exercised against ReleaseTest's regression
+fixtures **before** `v1-rc` is moved for the rest of the org. If a change
+accidentally breaks something, the canary catches it minutes after the
+push, in isolation, before any production API repository sees the change.
+
+## What we achieve
+
+- **Confidence to evolve rules.** Rule developers can refactor or add
+  checks without fearing they'll silently break something elsewhere — the
+  canary tells them within minutes if they did.
+- **A safety net before each framework release.** Before the `v1-rc` tag
+  is moved to a new commit, the canary is green. If it isn't, the release
+  doesn't go out.
+- **Living evidence of which rules are tested.** The framework has 142
+  rules. The rule inventory records which of them are pinned by a
+  regression branch. Adding more themed branches grows that number and
+  gives a measurable picture of test coverage.
+- **An authoritative answer when codeowners ask "did anything change?"**
+  Either the canary is unchanged (no behaviour change) or it's changed
+  and we can point at exactly which rules now report differently.
+
+## What it is *not*
+
+- Not a test of the **APIs themselves**. It doesn't tell us whether
+  QualityOnDemand or Device Location are correct. It tests whether the
+  validation framework judges them correctly.
+- Not user-facing. Codeowners never see this. It's a developer tool for
+  the framework team, like a smoke alarm that only the firefighters check.
+- Not a replacement for the manual review work that goes into release
+  PRs. It complements that — humans review release content; the canary
+  makes sure the tools they rely on haven't drifted.
+
+## How it works concretely
+
+### Components
+
+```
+camaraproject/tooling                       camaraproject/ReleaseTest
+─────────────────────                       ────────────────────────
+validation/                                 main
+├── docs/                                   ├── code/API_definitions/...
+│   └── regression-testing.md   ◄── this    │
+├── schemas/                                regression/r4.1-main-baseline
+│   └── regression-expected-schema.yaml     ├── code/API_definitions/...   (frozen)
+├── scripts/                                └── .regression/
+│   ├── regression_runner.py                    ├── REGRESSION.md          (purpose)
+│   └── README.md (CLI reference)               └── regression-expected.yaml (fixture)
+└── rules/
+    └── rule-inventory.yaml (tested_rules)  regression/...
+```
+
+### The fixture format
+
+Each regression branch has a `.regression/regression-expected.yaml` file
+that conforms to
+[validation/schemas/regression-expected-schema.yaml](../schemas/regression-expected-schema.yaml).
+It records:
+
+- `branch` — the regression branch name (informational)
+- `description` — what this branch tests
+- `captured_at`, `captured_from_run`, `tooling_ref` — provenance: when the
+  fixture was generated, which run produced it, and the validation
+  framework SHA in effect at the time
+- `summary` — expected aggregate counts (errors / warnings / hints), used
+  as a fast sanity check
+- `match_mode` — `exact` (default) rejects unexpected findings; `subset`
+  allows extras
+- `findings[]` — the expected list, where each entry is a unique
+  `(rule_id, path, level)` tuple with an optional `count` (default 1)
+
+### The match key — what counts as "the same finding"
+
+Two findings are considered the same if they share the same
+`(rule_id, path, level)` tuple. For findings that don't have a framework
+`rule_id` (raw engine rules without metadata), the runner falls back to
+`(engine/engine_rule, path, level)`.
+
+Three things are deliberately **excluded** from the match key:
+
+- **Line numbers.** Source maps shift as bundled output evolves; pinning
+  on a line number turns every cosmetic source change into a "regression".
+- **Messages.** Phrasing improves over time without changing the substance
+  of the check.
+- **Counts above the expected minimum.** A `count: N` entry means "at least
+  N", not "exactly N". A spec fix that removes one of three duplicate hints
+  is a desired change, not a regression. (This rule applies even in
+  `exact` match mode; `exact` only restricts what extra `(rule, path,
+  level)` keys are allowed, not how many times each one fires.)
+
+### The runner
+
+[validation/scripts/regression_runner.py](../scripts/regression_runner.py)
+is a single-file Python CLI that talks to GitHub via the `gh` CLI. Two
+modes:
+
+**Verify mode** (default): for each matching regression branch, the runner
+
+1. Fetches `regression-expected.yaml` from the branch via the GitHub
+   contents API
+2. Dispatches the validation workflow on the branch via
+   `gh workflow run camara-validation.yml --ref `
+3. Polls for the new run to appear (using a UTC timestamp marker plus the
+   branch tip SHA to disambiguate from concurrent runs) and waits for it
+   to complete
+4. Downloads the `validation-diagnostics` artifact, reads
+   `findings.json`, `summary.json`, and `context.json`
+5. Diffs actual findings against the expected fixture and reports
+6. Exits 0 (all PASS), 1 (one or more FAIL), or 2 (infrastructure
+   failure)
+
+**Capture mode** (`--capture  --out `): the runner runs
+steps 2–4 above, then groups the actual findings into a
+`regression-expected.yaml` document, and writes it to the requested
+output path. The reviewer then commits that file to the branch at
+`.regression/regression-expected.yaml`.
+
+The same dispatch / download / diff code path serves both modes — there
+is one set of bugs, not two.
+
+### How `tooling_ref` is recorded
+
+Every validation run writes its resolved tooling SHA into `context.json`
+in the diagnostics artifact (the orchestrator already does this for the
+workflow summary). The runner reads it directly from there, so the value
+in the fixture is the SHA the run actually used, regardless of which ref
+the caller targets. On ReleaseTest that's the `validation-framework` HEAD
+at run time; on dark / production repos it would be whatever `v1-rc`
+points at.
+
+## Day-to-day usage
+
+### Verify all canary branches
+
+```
+python3 validation/scripts/regression_runner.py \
+    --repo camaraproject/ReleaseTest \
+    --branch-filter 'regression/*'
+```
+
+Expected output for a clean run:
+
+```
+## Regression Runner — N/N branches PASS
+
+| Branch | Result | Matched | Missing | Unexpected | Summary |
+|---|---|---:|---:|---:|---|
+| `regression/r4.1-main-baseline` | PASS | 27 | 0 | 0 | - |
+PASS: 1/1 branches
+```
+
+Exit code 0. CLI flags and exit-code reference live in
+[validation/scripts/README.md](../scripts/README.md).
+
+### When a regression fires
+
+The runner exits 1 and prints a per-branch diff. Three classes of failure:
+
+- **Missing**: a finding in the fixture didn't appear in the actual run.
+  Either the rule was deleted, or its conditions changed and it no longer
+  fires on that file. If intentional → recapture. If not → fix the
+  framework before merging the change to `validation-framework`.
+- **Unexpected**: a finding appeared that wasn't in the fixture. Either a
+  new rule was added (or activated) and is now firing, or a rule's
+  conditions changed and it now fires where it didn't before. Same
+  triage: intended → recapture; unintended → fix.
+- **Summary mismatch**: the aggregate counts in `summary.json` don't
+  match the fixture's `summary` block. This usually shows up alongside
+  one of the other failures and confirms the cause.
+
+### Recapturing a fixture
+
+When a change is intentional, refresh the fixture:
+
+```
+python3 validation/scripts/regression_runner.py \
+    --repo camaraproject/ReleaseTest \
+    --capture regression/r4.1-main-baseline \
+    --out /tmp/expected.yaml \
+    --capture-description "baseline - ReleaseTest main, unmodified"
+```
+
+Review `/tmp/expected.yaml` against the previous version, commit it to
+the branch at `.regression/regression-expected.yaml`, and re-run the
+runner without `--capture` to confirm PASS. The fixture's `tooling_ref`
+field will reflect the current `validation-framework` HEAD.
+
+### Adding a new regression branch
+
+1. Branch from `camaraproject/ReleaseTest@main` with a descriptive name
+   under the `regression/` namespace
+   (e.g. `regression/r4.1-broken-info-block`).
+2. Make whatever spec edits the branch is meant to test. For a baseline
+   branch, leave specs unmodified.
+3. Write a short `REGRESSION.md` at `.regression/REGRESSION.md`
+   explaining what this branch is for, what it expects, and the
+   caller-workflow context if it's not the canary default.
+4. Push the branch.
+5. Run the runner in `--capture` mode to seed
+   `.regression/regression-expected.yaml`.
+6. Review, commit, push, and verify with the runner in default mode.
+7. Update [validation/rules/rule-inventory.yaml](../rules/rule-inventory.yaml):
+   add the new branch to the `tested_rules` entries for whichever rules
+   it pins, and bump `summary.total_tested` to the new unique-rule count.
+
+### Updating `tested_rules`
+
+The `tested_rules` mapping in `rule-inventory.yaml` records which rules
+are pinned by which regression branches:
+
+```yaml
+tested_rules:
+  P-006: [regression/r4.1-main-baseline]
+  S-211: [regression/r4.1-main-baseline]
+  S-313: [regression/r4.1-main-baseline]
+  S-314: [regression/r4.1-main-baseline]
+  S-316: [regression/r4.1-main-baseline]
+```
+
+Always list-valued for uniformity when a rule is covered by multiple
+branches. Treat the field as proof, not aspiration: bump it after the
+runner reports PASS against the new fixture, not before.
+
+## Sharp edges and known limitations
+
+- **Tooling-ref pinning is set by the caller workflow.** A local
+  `gh workflow run` cannot override which tooling SHA runs server-side.
+  ReleaseTest pins to `validation-framework` HEAD by design (canary).
+  Production API repos pin to `@v1-rc`. There is currently no way to
+  test an un-published developer SHA against the runner — that's a
+  separate piece of design work.
+- **Dispatch → run-id race.** `gh workflow run` does not return a run
+  ID. The runner records a UTC timestamp before dispatch and polls
+  `gh run list` for a `workflow_dispatch` run with a matching branch
+  tip SHA and a `createdAt` after the marker. Reliable in practice but
+  worth knowing if you need to debug a dispatch that "vanished".
+- **Findings ordering is not stable.** The post-filter emits findings in
+  whatever order the engines produced them. The diff is set-based on
+  the match key, so ordering is irrelevant — but if you eyeball
+  `findings.json` and `regression-expected.yaml` side by side, expect
+  them not to line up linearly.
+- **Capture-then-verify must be deterministic.** If the runner captures
+  a fixture and then immediately fails verification on a re-run against
+  the same SHA, the framework's output is non-deterministic on that
+  branch. That's a framework bug, not a runner bug — stop and
+  investigate before adding the branch.
+
+## Related references
+
+- [validation/scripts/README.md](../scripts/README.md) — runner CLI
+  reference, exit codes, troubleshooting
+- [validation/schemas/regression-expected-schema.yaml](../schemas/regression-expected-schema.yaml)
+  — JSON Schema for the fixture format
+- [validation/rules/rule-inventory.yaml](../rules/rule-inventory.yaml)
+  — rule registry with `tested_rules` coverage
+- Upstream tracking issue: [camaraproject/ReleaseManagement#483](https://github.com/camaraproject/ReleaseManagement/issues/483)
+- Umbrella validation framework issue: [camaraproject/ReleaseManagement#448](https://github.com/camaraproject/ReleaseManagement/issues/448)
diff --git a/validation/scripts/README.md b/validation/scripts/README.md
index f0d01a9d..14013019 100644
--- a/validation/scripts/README.md
+++ b/validation/scripts/README.md
@@ -19,16 +19,23 @@ Dispatches the validation framework against `regression/*` branches of a test
 repository, downloads findings, and diffs them against the committed
 `.regression/regression-expected.yaml` fixture on each branch.
 
+For motivation, the canary model, the fixture format, and day-to-day
+workflows (capture, verify, recapture, adding new branches), see the manual:
+**[../docs/regression-testing.md](../docs/regression-testing.md)**.
+
+This file is the CLI reference only.
+
 ### Prerequisites
 
 - Python 3.11+ with `pyyaml` and `jsonschema`
 - `gh` CLI installed and authenticated (`gh auth status` must be green)
-- The test repo must have the Validation Framework caller workflow installed
-  (`.github/workflows/camara-validation.yml`)
-- Each `regression/*` branch must contain `.regression/regression-expected.yaml`
-  conforming to `validation/schemas/regression-expected-schema.yaml`
+- The test repo must have the validation framework caller workflow installed
+  at `.github/workflows/camara-validation.yml`
+- For verify mode: each `regression/*` branch must contain
+  `.regression/regression-expected.yaml` conforming to
+  [../schemas/regression-expected-schema.yaml](../schemas/regression-expected-schema.yaml)
 
-### Run
+### Verify mode
 
 ```
 python3 validation/scripts/regression_runner.py \
@@ -36,76 +43,34 @@ python3 validation/scripts/regression_runner.py \
     [--branch-filter 'regression/r4.1-*'] \
     [--workflow-file camara-validation.yml] \
     [--poll-interval 15] [--poll-timeout 1800] \
-    [--summary-file regression-summary.md]
+    [--summary-file regression-summary.md] \
+    [-v|--verbose]
 ```
 
-Exit codes:
-
-| Code | Meaning |
-|---|---|
-| 0 | all branches PASS |
-| 1 | one or more branches FAIL (diff mismatch) |
-| 2 | infrastructure failure (gh error, timeout, missing artifact, schema invalid) |
+Default `--branch-filter` is `regression/*`. Default `--workflow-file` is
+`camara-validation.yml`.
 
-### Capture a new fixture
+### Capture mode
 
 ```
 python3 validation/scripts/regression_runner.py \
     --repo camaraproject/ReleaseTest \
     --capture regression/r4.1-main-baseline \
     --out /tmp/expected.yaml \
-    [--capture-description "baseline"]
+    [--capture-description "baseline - ReleaseTest main, unmodified"]
 ```
 
-Review the generated file, commit it to the branch at
-`.regression/regression-expected.yaml`, then re-run the runner without
-`--capture` to verify PASS.
-
-### Fixture match semantics
-
-- Match key is `(rule_id, path, level)` — or `(engine/engine_rule, path, level)`
-  when the framework has no `rule_id` for the rule.
-- Line numbers and messages are **not** part of the match key.
-- `count` means "at least N" in both `exact` and `subset` modes.
-- `match_mode: exact` (default) fails on unexpected extra findings;
-  `match_mode: subset` allows extras and only fails on missing expected findings.
-- The optional top-level `summary` block is checked against
-  `summary.json.counts` before per-finding diffing; any mismatch there is a
-  separate failure axis.
-
-### Tooling ref the run actually used
-
-Each test repo's caller workflow hardcodes the tooling ref it consumes, and
-does not forward `workflow_dispatch` inputs. A local `gh workflow run` can't
-override which tooling SHA runs server-side; OIDC inside the reusable
-inherits `job_workflow_ref` from the caller's hardcoded reference.
-
-Two cases in the wild:
-
-- **Dark / production API repos** — caller targets
-  `camaraproject/tooling/.github/workflows/validation.yml@v1-rc`. Each run
-  pins to whatever commit `v1-rc` currently points at; the SHA only changes
-  when the tag is moved (a deliberate, repo-wide release event).
-- **`camaraproject/ReleaseTest` (canary)** — caller targets
-  `...@validation-framework`. Each run pins to the current HEAD of the
-  `validation-framework` branch. Every push to that branch can change what
-  the runner sees here, *before* `v1-rc` is moved for the rest of the org.
-  This is the intentional canary surface for changes under development.
-
-The runner records the **actually used** SHA into the captured fixture by
-reading `tooling_ref` from `context.json` in the diagnostics artifact. That
-field comes from the orchestrator's own resolved context, so it's correct
-regardless of which ref the caller targeted.
-
-Implications for fixture maintenance:
-
-- For ReleaseTest fixtures: any merge to `validation-framework` that changes
-  findings against the same specs will produce a FAIL on the next runner
-  invocation. That's the canary working as designed. Triage the diff:
-  - Intended rule/engine change → recapture the fixture (`--capture`),
-    review the new findings, commit.
-  - Unintended regression → fix the code on `validation-framework`, re-run.
-- For dark-repo fixtures (none today): only stale after `v1-rc` moves.
+Writes a fresh `regression-expected.yaml` to `--out`. Review, commit to the
+branch at `.regression/regression-expected.yaml`, and re-run in verify mode
+to confirm PASS. See the manual for the full add-a-new-branch flow.
+
+### Exit codes
+
+| Code | Meaning |
+|---|---|
+| 0 | All branches PASS (or capture succeeded) |
+| 1 | One or more branches FAIL (diff mismatch) |
+| 2 | Infrastructure failure (gh error, timeout, missing artifact, schema invalid) |
 
 ### Troubleshooting
 
@@ -117,5 +82,6 @@ Implications for fixture maintenance:
   probably failed before the output step. Check the run URL printed in
   the log.
 - **Capture-then-verify fails on immediate re-run** — the validation output
-  is non-deterministic for this branch. Treat as a framework bug, not a
-  runner bug; stop and investigate.
+  is non-deterministic for this branch. That's a framework bug, not a
+  runner bug; stop and investigate. See the "Sharp edges" section of the
+  manual.

From bd827018fb735dc3d7f1e422694a6fada9ce44f1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 Apr 2026 11:27:29 +0000
Subject: [PATCH 077/157] chore(deps): bump tj-actions/changed-files from
 47.0.2 to 47.0.5

Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 47.0.2 to 47.0.5.
- [Release notes](https://github.com/tj-actions/changed-files/releases)
- [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md)
- [Commits](https://github.com/tj-actions/changed-files/compare/8cba46e29c11878d930bca7870bb54394d3e8b21...22103cc46bda19c2b464ffe86db46df6922fd323)

---
updated-dependencies:
- dependency-name: tj-actions/changed-files
  dependency-version: 47.0.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/pr_validation.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/pr_validation.yml b/.github/workflows/pr_validation.yml
index a8796ba9..4858c6b4 100644
--- a/.github/workflows/pr_validation.yml
+++ b/.github/workflows/pr_validation.yml
@@ -44,7 +44,7 @@ jobs:
 
       - name: Detect changed files
         id: changes
-        uses: tj-actions/changed-files@8cba46e29c11878d930bca7870bb54394d3e8b21 # v47.0.2
+        uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
         with:
           files_yaml: |
             release_plan:

From 982f23961ea8bc953338d8c0acea7afee4617e3b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 Apr 2026 11:27:33 +0000
Subject: [PATCH 078/157] chore(deps): bump actions/upload-artifact from 6 to 7

Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 6 to 7.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v6...v7)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '7'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/api-review-reusable.yml | 4 ++--
 .github/workflows/pr_validation.yml       | 2 +-
 .github/workflows/validation.yml          | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/api-review-reusable.yml b/.github/workflows/api-review-reusable.yml
index d69e53da..628832e4 100644
--- a/.github/workflows/api-review-reusable.yml
+++ b/.github/workflows/api-review-reusable.yml
@@ -301,7 +301,7 @@ jobs:
           fi
 
       - name: Upload Detailed Report
-        uses: actions/upload-artifact@v6
+        uses: actions/upload-artifact@v7
         id: upload-detailed-report
         if: always() && steps.find-report.outputs.report_path != ''
         with:
@@ -398,7 +398,7 @@ jobs:
           fi
 
       - name: Upload Summary
-        uses: actions/upload-artifact@v6
+        uses: actions/upload-artifact@v7
         if: always()
         with:
           name: api-review-summary-${{ inputs.repo_name }}-${{ inputs.issue_number != '0' && format('comment{0}', inputs.issue_number) || 'manual' }}-${{ inputs.commonalities_version }}
diff --git a/.github/workflows/pr_validation.yml b/.github/workflows/pr_validation.yml
index a8796ba9..311875e0 100644
--- a/.github/workflows/pr_validation.yml
+++ b/.github/workflows/pr_validation.yml
@@ -210,7 +210,7 @@ jobs:
 
       - name: Archive reports
         if: (success() || failure()) && steps.ml.outcome != 'skipped'
-        uses: actions/upload-artifact@v6
+        uses: actions/upload-artifact@v7
         with:
           name: MegaLinter reports
           include-hidden-files: "true"
diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 878921d3..f4d6926b 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -355,7 +355,7 @@ jobs:
       # ── Step 14: Upload diagnostics ────────────────────────────────
       - name: Upload diagnostics
         if: always() && steps.validation.outcome == 'success'
-        uses: actions/upload-artifact@v6
+        uses: actions/upload-artifact@v7
         with:
           name: validation-diagnostics
           path: validation-output/diagnostics/
@@ -400,7 +400,7 @@ jobs:
       # ── Step 16: Upload bundled specs ──────────────────────────────
       - name: Upload bundled specs
         if: always() && steps.validation.outcome == 'success'
-        uses: actions/upload-artifact@v6
+        uses: actions/upload-artifact@v7
         with:
           name: validation-bundled-specs
           path: validation-output/bundled/

From 0104f3b947fccaef15249a1aea5216239f9d13cb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 14 Apr 2026 11:27:37 +0000
Subject: [PATCH 079/157] chore(deps): bump actions/github-script from 8 to 9

Bumps [actions/github-script](https://github.com/actions/github-script) from 8 to 9.
- [Release notes](https://github.com/actions/github-script/releases)
- [Commits](https://github.com/actions/github-script/compare/v8...v9)

---
updated-dependencies:
- dependency-name: actions/github-script
  dependency-version: '9'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/pr_validation.yml           |  2 +-
 .../workflows/release-automation-reusable.yml | 28 +++++++++----------
 .github/workflows/validation.yml              |  8 +++---
 3 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/.github/workflows/pr_validation.yml b/.github/workflows/pr_validation.yml
index a8796ba9..609ca7ec 100644
--- a/.github/workflows/pr_validation.yml
+++ b/.github/workflows/pr_validation.yml
@@ -86,7 +86,7 @@ jobs:
 
       - name: release-plan.yaml validation result
         if: always() && steps.exclusivity.outcome != 'failure'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           script: |
             const changed = '${{ steps.changes.outputs.release_plan_any_changed }}' === 'true';
diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index ba91325c..231fcac0 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -64,7 +64,7 @@ jobs:
     steps:
       - name: Detect Trigger Type
         id: detect
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           script: |
             const eventName = context.eventName;
@@ -253,7 +253,7 @@ jobs:
       - name: Resolve Tooling Checkout Ref
         id: resolve-tooling-ref
         if: steps.detect.outputs.should_continue == 'true'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         env:
           TOOLING_REF_OVERRIDE: ${{ inputs.tooling_ref_override }}
         with:
@@ -324,7 +324,7 @@ jobs:
         id: ack
         if: steps.detect.outputs.trigger_type == 'slash_command'
         continue-on-error: true
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ github.token }}
           script: |
@@ -590,7 +590,7 @@ jobs:
     steps:
       - name: Validate Command
         id: validate
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           script: |
             const user = '${{ needs.check-trigger.outputs.user }}';
@@ -930,7 +930,7 @@ jobs:
 
       - name: Discard Snapshot
         id: discard
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -978,7 +978,7 @@ jobs:
       - name: Cleanup review branch
         id: cleanup
         if: steps.discard.outputs.success == 'true'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -1040,7 +1040,7 @@ jobs:
 
       - name: Delete Draft Release
         id: delete
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -1109,7 +1109,7 @@ jobs:
       - name: Cleanup review branch
         id: cleanup
         if: steps.delete.outputs.success == 'true'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -1387,7 +1387,7 @@ jobs:
         id: readme
         env:
           RELEASE_TAG: ${{ needs.derive-state.outputs.release_tag }}
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -1459,7 +1459,7 @@ jobs:
         env:
           GITHUB_TOKEN: ${{ steps.app-token.outputs.token || github.token }}
           RELEASE_TAG: ${{ needs.derive-state.outputs.release_tag }}
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           script: |
             const fs = require('fs');
@@ -1604,7 +1604,7 @@ jobs:
 
       - name: Extract CHANGELOG release notes
         id: changelog
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -1736,7 +1736,7 @@ jobs:
 
       - name: Create Draft Release
         id: create-draft
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         env:
           RELEASE_NOTES: ${{ steps.changelog.outputs.release_notes }}
         with:
@@ -1856,7 +1856,7 @@ jobs:
 
       - name: Handle Issue Event
         id: handle
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
@@ -2350,7 +2350,7 @@ jobs:
           SYNC_PR_URL: ${{ needs.create-sync-pr.outputs.sync_pr_url }}
           PUBLISH_WARNINGS: ${{ needs.publish-release.outputs.error_message }}
           SYNC_STATUS: ${{ needs.create-sync-pr.outputs.sync_status }}
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.app-token.outputs.token || github.token }}
           script: |
diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 878921d3..93c3ef6d 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -57,7 +57,7 @@ jobs:
       # ── Step 2: Resolve tooling ref (OIDC → override → fallback) ──
       - name: Resolve tooling ref
         id: resolve-ref
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         env:
           TOOLING_REF_OVERRIDE: ${{ inputs.tooling_ref_override }}
         with:
@@ -201,7 +201,7 @@ jobs:
         if: >-
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           script: |
             const mintOutcome = '${{ steps.mint-token.outcome }}';
@@ -237,7 +237,7 @@ jobs:
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
           && steps.write-access.outputs.has_write == 'true'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           github-token: ${{ steps.mint-token.outputs.token || github.token }}
           script: |
@@ -329,7 +329,7 @@ jobs:
           always() && steps.validation.outcome == 'success'
           && github.event_name == 'pull_request'
           && steps.write-access.outputs.has_write != 'true'
-        uses: actions/github-script@v8
+        uses: actions/github-script@v9
         with:
           script: |
             const fs = require('fs');

From d8263c129017a746bbdb88e5c2ae5a54a8e7192f Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 15 Apr 2026 07:50:07 +0200
Subject: [PATCH 080/157] Pin nine rules via
 regression/r4.1-broken-spec-api-metadata
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

First broken-spec regression branch on camaraproject/ReleaseTest pins
S-018, S-019, S-020, S-021, S-022, S-023, S-024, S-201, and S-210 — all
triggered by surgical edits to info/license/servers/tags/403/trailing-
slash on sample-service.yaml. Captured fixture lives at
.regression/regression-expected.yaml on the branch (36 findings: 27
baseline + 9 new, summary 2 errors / 4 warnings / 30 hints).

Updates:
- validation/rules/rule-inventory.yaml: add nine new entries to
  tested_rules; bump summary.total_tested from 5 to 14.
- validation/docs/regression-testing.md: add "The broken-spec branch
  plan" section covering the seven-theme roadmap, inherited baseline
  findings, and the minor-bump-rebase-and-rename / major-bump-preserve
  lifecycle. Update "Adding a new regression branch" with the
  `rX.Y-broken-spec-` naming convention.

832/832 validation tests pass.

Tracked upstream under ReleaseManagement#483.
---
 validation/docs/regression-testing.md | 92 +++++++++++++++++++++++++--
 validation/rules/rule-inventory.yaml  | 11 +++-
 2 files changed, 97 insertions(+), 6 deletions(-)

diff --git a/validation/docs/regression-testing.md b/validation/docs/regression-testing.md
index 5c3a25ae..90a6d95d 100644
--- a/validation/docs/regression-testing.md
+++ b/validation/docs/regression-testing.md
@@ -242,13 +242,21 @@ field will reflect the current `validation-framework` HEAD.
 ### Adding a new regression branch
 
 1. Branch from `camaraproject/ReleaseTest@main` with a descriptive name
-   under the `regression/` namespace
-   (e.g. `regression/r4.1-broken-info-block`).
+   under the `regression/` namespace. Naming convention:
+   - **Baseline branches**: `regression/rX.Y-main-baseline`
+   - **Broken-spec branches**: `regression/rX.Y-broken-spec-`
+
+   The `rX.Y` prefix records the Commonalities minor release the branch
+   was captured against. See [The broken-spec branch plan](#the-broken-spec-branch-plan)
+   for the target theme set.
 2. Make whatever spec edits the branch is meant to test. For a baseline
-   branch, leave specs unmodified.
+   branch, leave specs unmodified. For a broken-spec branch, keep edits
+   surgical — one theme per branch — and avoid cascades into rules the
+   branch is not meant to test.
 3. Write a short `REGRESSION.md` at `.regression/REGRESSION.md`
-   explaining what this branch is for, what it expects, and the
-   caller-workflow context if it's not the canary default.
+   explaining what this branch is for, what it expects (edit-to-rule
+   mapping for broken-spec branches), and the caller-workflow context
+   if it's not the canary default.
 4. Push the branch.
 5. Run the runner in `--capture` mode to seed
    `.regression/regression-expected.yaml`.
@@ -275,6 +283,80 @@ Always list-valued for uniformity when a rule is covered by multiple
 branches. Treat the field as proof, not aspiration: bump it after the
 runner reports PASS against the new fixture, not before.
 
+## The broken-spec branch plan
+
+Broken-spec branches are organised by **theme**, not by individual rule.
+Each branch contains a small set of surgical edits to one or two spec
+files on `camaraproject/ReleaseTest` that together trigger a coherent
+group of rules. One branch = one workflow run — grouping by theme keeps
+the canary dispatch budget small while still pinning every rule that
+can reasonably be exercised from the spec side.
+
+### Target themes
+
+The r4.1 rule set partitions cleanly into seven themes (plus an optional
+eighth for test-file quality). The table records the current plan; each
+theme becomes one `regression/r4.1-broken-spec-` branch.
+
+| # | Branch | Theme / target files | Rules covered | Rebase risk on minor bump |
+|---|---|---|---|---|
+| 1 | `regression/r4.1-broken-spec-api-metadata` | `sample-service.yaml` — `info`, `servers`, `tags` block | S-018, S-019, S-020, S-021, S-022, S-023, S-024, S-201, S-210 | LOW |
+| 2 | `regression/r4.1-broken-spec-yaml-fundamentals` | `sample-service.yaml` YAML-level defects + `openapi:` version + schema type | Y-001…Y-013, S-005, S-016 | LOW |
+| 3 | `regression/r4.1-broken-spec-error-handling` | `sample-service.yaml` — error responses + error codes | S-025, S-026, S-027, S-221, S-307, S-318 | LOW |
+| 4 | `regression/r4.1-broken-spec-descriptions` | `sample-service.yaml` — descriptions on operations / parameters / properties / responses / array items | S-006, S-009, S-011, S-013, S-014, S-028, S-029, S-031, S-215, S-216, S-223 | MEDIUM |
+| 5 | `regression/r4.1-broken-spec-schema-constraints` | `sample-service.yaml` components (not common files — avoid baseline collision) | S-012, S-017, S-030, S-300, S-303, S-308, S-309, S-310, S-311, S-312 | MEDIUM |
+| 6 | `regression/r4.1-broken-spec-routing` | `sample-service.yaml` — paths, operationIds, HTTP methods, servers | S-002, S-003, S-007, S-008, S-010, S-204, S-214, S-217, S-218, S-220, S-222, S-224, S-225, S-226, S-227, S-301, S-306 | HIGH |
+| 7 | `regression/r4.1-broken-spec-subscriptions` | `sample-service-subscriptions.yaml` + `sample-implicit-events.yaml` — CloudEvent / Protocol / sink / notifications + Python subscription checks | S-032, S-033, S-034, S-035, P-014, P-015, P-016, P-020 | HIGH |
+| 8 (optional) | `regression/r4.1-broken-spec-test-files` | `code/Test_definitions/*.feature` — filename / version / gherkin defects | P-001, P-002, P-003, P-004, P-005, P-007, P-008, selected G-* | LOW |
+
+Rules **not** covered by any broken-spec branch:
+
+- **Owned by the baseline fixture**: P-006, S-211, S-313, S-314, S-316.
+  Broken-spec branches inherit these when captured, but do not own the
+  pinning — they would double-count.
+- **Un-triggerable via spec edits**: P-009, P-010, P-011, P-012, P-013,
+  P-019 (release-plan / PR-context / fixture-dependent).
+- **Deprecated, OAS-3.1-only, or low-signal**: S-001, S-004, S-015,
+  S-205, S-206, S-208, S-209, S-228, S-302, S-304, S-305, S-315, S-317,
+  S-319.
+- **Manual-only (not machine-checkable)**: the 25 `TG-*` rules from the
+  testing guidelines audit.
+
+### Inherited baseline findings
+
+Broken-spec branches are cut from `main`, so every captured fixture
+contains the full baseline finding set **plus** the new findings the
+broken edits trigger. A broken-spec branch fixture is a complete
+snapshot of its branch's output, not a delta. The runner's `exact`
+match mode evaluates both halves together.
+
+When designing a new broken-spec branch, pick edits whose new match keys
+(`(rule_id, path, level)`) do **not** collide with baseline keys — the
+baseline branch already pins those. If an edit would have collided, move
+it to a different file or pick a different rule.
+
+### Lifecycle across Commonalities versions
+
+The `rX.Y` prefix records the Commonalities minor release the branch
+was captured against. Two separate lifecycles apply:
+
+- **Minor bump** (e.g. r4.1 → r4.2): rebase each broken-spec branch onto
+  the updated ReleaseTest `main`, rename the prefix (`r4.1-broken-spec-*`
+  → `r4.2-broken-spec-*`), recapture the fixture, force-push. Delete the
+  old `r4.1-*` branch. Rationale: r4.2 is the current surface, and the
+  broken-spec predicate ("info.description missing", "license.name
+  wrong", etc.) is preserved by rebase for the LOW-risk themes. MEDIUM
+  and HIGH risk themes may need the edits re-applied manually after the
+  rebase — treat them as rewrites, not pure rebases.
+- **Major bump** (e.g. r4.3 → r5.1): **keep** the last `r4.x-broken-spec-*`
+  set as permanent regression coverage for the previous major, and
+  create a fresh `r5.1-broken-spec-*` set from `r5.1` main. Breaking
+  Commonalities changes can invalidate old predicates; the previous
+  major stays frozen so long as it's still supported.
+
+The same model applies to `regression/rX.Y-main-baseline` — rebase +
+rename on minor bumps, preserve across majors.
+
 ## Sharp edges and known limitations
 
 - **Tooling-ref pinning is set by the caller workflow.** A local
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index cc24583c..a7d58c72 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 5
+  total_tested: 14
   by_engine:
     spectral: 84
     gherkin: 25
@@ -294,6 +294,15 @@ pending_rules:
 
 tested_rules:
   P-006: [regression/r4.1-main-baseline]
+  S-018: [regression/r4.1-broken-spec-api-metadata]
+  S-019: [regression/r4.1-broken-spec-api-metadata]
+  S-020: [regression/r4.1-broken-spec-api-metadata]
+  S-021: [regression/r4.1-broken-spec-api-metadata]
+  S-022: [regression/r4.1-broken-spec-api-metadata]
+  S-023: [regression/r4.1-broken-spec-api-metadata]
+  S-024: [regression/r4.1-broken-spec-api-metadata]
+  S-201: [regression/r4.1-broken-spec-api-metadata]
+  S-210: [regression/r4.1-broken-spec-api-metadata]
   S-211: [regression/r4.1-main-baseline]
   S-313: [regression/r4.1-main-baseline]
   S-314: [regression/r4.1-main-baseline]

From 0b8cf8e1316a8228e2fa5133e270949e191c5be9 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 15 Apr 2026 08:26:07 +0200
Subject: [PATCH 081/157] Add regression-runner.yml CI workflow
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Auto-dispatches validation/scripts/regression_runner.py against
camaraproject/ReleaseTest on every push to validation-framework
that touches validation/, shared-actions/, or the workflow itself,
plus workflow_dispatch for manual runs. Turns the WS07 Phase 3
regression canary from manual into automatic.

Workflow:
- Triggers: push to validation-framework + workflow_dispatch, with
  paths filter on validation/** + shared-actions/** + the workflow
  file itself.
- Concurrency: ref-scoped with cancel-in-progress, so back-to-back
  pushes don't queue stale runs.
- Permissions: top-level GITHUB_TOKEN limited to contents:read for
  the initial checkout. All cross-repo access (dispatching
  camara-validation.yml on ReleaseTest, downloading diagnostics) uses
  a short-lived camara-validation GitHub App token minted with
  owner: camaraproject, repositories: ReleaseTest. The runner picks
  it up via GH_TOKEN env, overriding the default gh CLI auth.
- Output: --summary-file writes a markdown report; the workflow
  appends it to $GITHUB_STEP_SUMMARY and uploads it as a
  regression-runner-summary artifact (30-day retention).

validation/docs/regression-testing.md gains a short "Automatic runs on
validation-framework" subsection pointing at the new workflow.

Pre-merge verification: YAML parse OK, runner --help contract
unchanged, 832/832 validation unit tests pass. First real exercise
is the post-merge push — org secrets are not available to the fork
branch, so pre-merge dispatch cannot be tested from hdamker/tooling.

Tracked upstream under ReleaseManagement#483.
---
 .github/workflows/regression-runner.yml | 91 +++++++++++++++++++++++++
 validation/docs/regression-testing.md   | 20 ++++++
 2 files changed, 111 insertions(+)
 create mode 100644 .github/workflows/regression-runner.yml

diff --git a/.github/workflows/regression-runner.yml b/.github/workflows/regression-runner.yml
new file mode 100644
index 00000000..dce3c195
--- /dev/null
+++ b/.github/workflows/regression-runner.yml
@@ -0,0 +1,91 @@
+# CAMARA Validation Framework — Regression Runner (canary)
+#
+# Auto-dispatches validation/scripts/regression_runner.py against
+# camaraproject/ReleaseTest on every push to validation-framework, so
+# that "broken stays broken" and "clean stays clean" are verified
+# automatically before v1-rc is moved for the rest of the org.
+#
+# The runner dispatches camara-validation.yml on each branch matching
+# regression/* on ReleaseTest, waits for it to complete, downloads the
+# diagnostics artifact, and diffs the findings against each branch's
+# committed .regression/regression-expected.yaml fixture.
+#
+# Cross-repo access is provided by a short-lived camara-validation
+# GitHub App installation token scoped to camaraproject/ReleaseTest.
+# The default GITHUB_TOKEN is only used for the initial checkout.
+#
+# See validation/docs/regression-testing.md for the full picture.
+
+name: Regression Runner
+
+on:
+  push:
+    branches: [validation-framework]
+    paths:
+      - 'validation/**'
+      - 'shared-actions/**'
+      - '.github/workflows/regression-runner.yml'
+  workflow_dispatch:
+
+concurrency:
+  group: regression-runner-${{ github.ref }}
+  cancel-in-progress: true
+
+permissions:
+  contents: read
+
+jobs:
+  regression:
+    name: Regression canary
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout tooling
+        uses: actions/checkout@v6
+        with:
+          persist-credentials: false
+
+      - name: Setup Python
+        uses: actions/setup-python@v6
+        with:
+          python-version: "3.11"
+
+      - name: Install runner dependencies
+        run: |
+          pip install --upgrade pip
+          pip install pyyaml jsonschema
+
+      - name: Mint validation app token
+        id: app-token
+        uses: actions/create-github-app-token@v3
+        with:
+          client-id: ${{ vars.VALIDATION_APP_CLIENT_ID }}
+          private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
+          owner: camaraproject
+          repositories: ReleaseTest
+
+      - name: Run regression runner
+        env:
+          GH_TOKEN: ${{ steps.app-token.outputs.token }}
+        run: |
+          python3 validation/scripts/regression_runner.py \
+            --repo camaraproject/ReleaseTest \
+            --branch-filter 'regression/*' \
+            --summary-file regression-summary.md
+
+      - name: Publish summary
+        if: always()
+        run: |
+          if [ -f regression-summary.md ]; then
+            cat regression-summary.md >> "$GITHUB_STEP_SUMMARY"
+          else
+            echo "::warning::regression-summary.md not produced (runner likely failed before writing)"
+          fi
+
+      - name: Upload summary artifact
+        if: always()
+        uses: actions/upload-artifact@v7
+        with:
+          name: regression-runner-summary
+          path: regression-summary.md
+          if-no-files-found: warn
+          retention-days: 30
diff --git a/validation/docs/regression-testing.md b/validation/docs/regression-testing.md
index 90a6d95d..44a01bc2 100644
--- a/validation/docs/regression-testing.md
+++ b/validation/docs/regression-testing.md
@@ -184,6 +184,26 @@ points at.
 
 ## Day-to-day usage
 
+### Automatic runs on `validation-framework`
+
+The regression runner fires automatically on every push to
+`validation-framework` that touches `validation/**`, `shared-actions/**`,
+or the workflow itself. The workflow lives at
+[.github/workflows/regression-runner.yml](../../.github/workflows/regression-runner.yml)
+on this same branch (so it only exists where it matters and does not
+run on `main`). Manual dispatch is available via the Actions UI for
+fix-then-verify cycles.
+
+Cross-repo access to ReleaseTest is provided by a short-lived
+`camara-validation` GitHub App installation token minted with
+`owner: camaraproject, repositories: ReleaseTest`. There is no
+persisted PAT.
+
+Results surface in three places: (1) the workflow's own pass/fail
+status in the Actions tab on `camaraproject/tooling`, (2) the markdown
+summary on the run's summary page, and (3) the `regression-runner-summary`
+artifact attached to each run (30-day retention).
+
 ### Verify all canary branches
 
 ```

From 71cb2cb49ca149d66dd7585deb9b09a54703c475 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 15 Apr 2026 14:18:11 +0200
Subject: [PATCH 082/157] Fix T1b feature-line regex and add Scope Issue
 template stub

T1b (test_def_api_version) matches both comma- and space-separated
Feature lines. The previous pattern required "Feature: , vwip"
and silently missed "Feature:  vwip", leaving placeholders in
transformed snapshots (observed on NetworkInsights r1.1). The new
pattern (Feature: .*?) vwip\b anchors on the single space both
conventions share.

The Release Issue template drops the Release Highlights block in
favour of a Scope Issue stub in the editable section so maintainers
can attach a link after issue creation. The Wiki release tracker that
used to hold the Scope Issue link has been dropped
(ReleaseManagement#409).

Tests: four new TestFeatureLineReplacement cases (comma, space,
already-versioned, no-vwip). 561/561 release_automation tests pass.
---
 .../config/transformations.yaml               | 12 +--
 .../issue_bodies/release_issue.mustache       |  4 +-
 .../tests/test_issue_manager.py               |  6 +-
 .../tests/test_mechanical_transformer.py      | 82 +++++++++++++++++++
 4 files changed, 94 insertions(+), 10 deletions(-)

diff --git a/release_automation/config/transformations.yaml b/release_automation/config/transformations.yaml
index e5597acd..89823737 100644
--- a/release_automation/config/transformations.yaml
+++ b/release_automation/config/transformations.yaml
@@ -49,14 +49,16 @@ transformations:
     replacement: "/{url_version}"
 
   # T1b: Test definition API version in Feature line
-  # Replaces "vwip" with "v{api_version}" (e.g., "v1.1.0") in Feature declarations
-  # Handles variations: "CAMARA/Camara", "Operation/Operation:", comments before Feature
+  # Replaces "vwip" with "v{api_version}" (e.g., "v1.1.0") in Feature declarations.
+  # Matches both the comma-separated form ("Feature: , vwip - …") and the
+  # space-only form ("Feature:  vwip - …"); the space before "vwip" is the
+  # single common anchor across CAMARA test-file conventions.
   - name: test_def_api_version
-    description: Replace vwip in test definition Feature line
+    description: Replace vwip in test definition Feature line (comma- or space-separated)
     type: regex
     file_pattern: "code/Test_definitions/*.feature"
-    pattern: "(Feature: [^,]+, )vwip"
-    replacement: "\\g<1>v{api_version}"
+    pattern: "(Feature: .*?) vwip\\b"
+    replacement: "\\g<1> v{api_version}"
 
   # T3: Commonalities reference in x-camara-commonalities
   - name: commonalities_ref
diff --git a/release_automation/templates/issue_bodies/release_issue.mustache b/release_automation/templates/issue_bodies/release_issue.mustache
index 8d0cd326..2e992665 100644
--- a/release_automation/templates/issue_bodies/release_issue.mustache
+++ b/release_automation/templates/issue_bodies/release_issue.mustache
@@ -1,9 +1,7 @@
 
 
 
-### Release Highlights
-
-_Add release highlights here before creating snapshot._
+**Scope Issue:** _Link to the Scope Issue tracking your target release — fill in after issue creation._
 
 ### Preparing the release content
 
diff --git a/release_automation/tests/test_issue_manager.py b/release_automation/tests/test_issue_manager.py
index d3dc6403..f869b01f 100644
--- a/release_automation/tests/test_issue_manager.py
+++ b/release_automation/tests/test_issue_manager.py
@@ -357,8 +357,10 @@ def test_generate_template_without_meta_release(self):
 
         # No redundant heading
         assert "## Release:" not in body
-        # Heading levels should be ###
-        assert "### Release Highlights" in body
+        # Scope Issue stub sits above the automation-managed markers
+        assert "**Scope Issue:**" in body
+        # Remaining section headings should stay at ###
+        assert "### Preparing the release content" in body
         assert "### Release Status" in body
 
 
diff --git a/release_automation/tests/test_mechanical_transformer.py b/release_automation/tests/test_mechanical_transformer.py
index 8f41f8c9..37c907fb 100644
--- a/release_automation/tests/test_mechanical_transformer.py
+++ b/release_automation/tests/test_mechanical_transformer.py
@@ -355,6 +355,88 @@ def test_no_match_returns_empty_changes(self, transformer, context):
             os.unlink(temp_path)
 
 
+class TestFeatureLineReplacement:
+    """T1b: regression coverage for `test_def_api_version` across Feature-line shapes.
+
+    CAMARA test files use two conventions in the wild:
+    - `Feature: , vwip - ` (comma-separated, majority)
+    - `Feature:  vwip - ` (space-only)
+
+    The earlier regex `(Feature: [^,]+, )vwip` silently missed the second form
+    and shipped unreplaced placeholders in snapshot branches (observed on
+    NetworkInsights r1.1). The new pattern `(Feature: .*?) vwip\\b` anchors on
+    the single space before `vwip` which is common to both forms.
+    """
+
+    T1B_RULE = TransformationRule(
+        name="test_def_api_version",
+        description="Replace vwip in test definition Feature line",
+        type=TransformationType.REGEX,
+        file_pattern="code/Test_definitions/*.feature",
+        pattern=r"(Feature: .*?) vwip\b",
+        replacement=r"\g<1> v{api_version}",
+    )
+
+    def _run(self, transformer, context, content):
+        with tempfile.TemporaryDirectory() as tmpdir:
+            feature_path = os.path.join(tmpdir, "quality-on-demand-createSession.feature")
+            with open(feature_path, "w") as f:
+                f.write(content)
+
+            result = transformer._apply_regex(feature_path, self.T1B_RULE, context)
+
+            with open(feature_path, "r") as f:
+                transformed = f.read()
+
+            return result, transformed, feature_path
+
+    def test_comma_separated_form(self, transformer, context):
+        """Majority form: `Feature: , vwip - `."""
+        result, content, feature_path = self._run(
+            transformer,
+            context,
+            "Feature: CAMARA Quality On Demand, vwip - Operation createSession\n",
+        )
+        assert result.success
+        assert feature_path in result.files_modified
+        assert content == (
+            "Feature: CAMARA Quality On Demand, v3.2.0-rc.2 - Operation createSession\n"
+        )
+
+    def test_space_only_form(self, transformer, context):
+        """Blocking bug: `Feature:  vwip - ` (no comma).
+
+        Observed on NetworkInsights r1.1: both .feature files used this form
+        and the earlier regex silently left `vwip` in place.
+        """
+        result, content, feature_path = self._run(
+            transformer,
+            context,
+            "Feature: CAMARA Quality On Demand vwip - Operation createSession\n",
+        )
+        assert result.success
+        assert feature_path in result.files_modified
+        assert content == (
+            "Feature: CAMARA Quality On Demand v3.2.0-rc.2 - Operation createSession\n"
+        )
+
+    def test_already_versioned_unchanged(self, transformer, context):
+        """A Feature line that already carries a real version must not match."""
+        original = "Feature: CAMARA Quality On Demand, v0.3.0 - Operation createSession\n"
+        result, content, feature_path = self._run(transformer, context, original)
+        assert result.success
+        assert feature_path not in result.files_modified
+        assert content == original
+
+    def test_no_vwip_unchanged(self, transformer, context):
+        """A Feature line with no `vwip` anywhere is a no-op."""
+        original = "Feature: CAMARA Quality On Demand - Operation createSession\n"
+        result, content, feature_path = self._run(transformer, context, original)
+        assert result.success
+        assert feature_path not in result.files_modified
+        assert content == original
+
+
 class TestYamlPathTransformation:
     """Tests for YAML path transformations."""
 

From 6c5a08384ccb05633a20cfbc9f8a87b666d9147b Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 15 Apr 2026 16:39:28 +0200
Subject: [PATCH 083/157] Add post-filter suppress_schema_paths and apply to
 S-313
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Reusable per-rule JSON-path allowlist in the post-filter. Findings
matching an entry (exact or prefix with dot boundary) are dropped
before applicability/severity processing. The Spectral adapter
extracts Spectral's JSONPath into finding.schema_path.

S-313 uses it to silence ten known-unactionable hints on
CAMARA_common.yaml and CAMARA_event_common.yaml. Upstream OWASP rule
is untouched — Spectral still produces the full finding set.
---
 validation/engines/spectral_adapter.py       |  13 ++
 validation/postfilter/engine.py              |  26 +++
 validation/postfilter/metadata_loader.py     |  14 ++
 validation/rules/spectral-rules.yaml         |  21 +++
 validation/schemas/findings-schema.yaml      |   9 +
 validation/schemas/rule-metadata-schema.yaml |  17 ++
 validation/tests/test_postfilter_engine.py   | 173 +++++++++++++++++++
 validation/tests/test_postfilter_metadata.py |  41 +++++
 validation/tests/test_spectral_adapter.py    |  32 ++++
 9 files changed, 346 insertions(+)

diff --git a/validation/engines/spectral_adapter.py b/validation/engines/spectral_adapter.py
index dfa86d8f..0b28752b 100644
--- a/validation/engines/spectral_adapter.py
+++ b/validation/engines/spectral_adapter.py
@@ -180,6 +180,11 @@ def normalize_finding(raw: dict, repo_root: Optional[str] = None) -> dict:
     Critical field mapping:
     - ``raw["source"]`` -> ``finding["path"]`` (file path, NOT ``raw["path"]``
       which is the JSONPath within the document).
+    - ``raw["path"]`` (JSONPath array) -> ``finding["schema_path"]``
+      (dot-joined string).  Spectral emits this as the canonical path
+      within the source file, already source-mapped for $ref-followed
+      findings.  Consumed by the post-filter ``suppress_schema_paths``
+      mechanism.
     - ``raw["range"]["start"]["line"]`` is 0-indexed; add 1 for the framework.
     - ``raw["range"]["start"]["character"]`` is 0-indexed; add 1.
 
@@ -207,12 +212,20 @@ def normalize_finding(raw: dict, repo_root: Optional[str] = None) -> dict:
 
     level = "hint" if from_external else map_severity(raw.get("severity", 1))
 
+    raw_schema_path = raw.get("path")
+    schema_path: Optional[str]
+    if isinstance(raw_schema_path, list) and raw_schema_path:
+        schema_path = ".".join(str(segment) for segment in raw_schema_path)
+    else:
+        schema_path = None
+
     finding: dict = {
         "engine": ENGINE_NAME,
         "engine_rule": raw.get("code", "unknown"),
         "level": level,
         "message": raw.get("message", ""),
         "path": source,
+        "schema_path": schema_path,
         "line": line,
         "api_name": derive_api_name(source),
     }
diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
index 584eb4a4..6325b5c6 100644
--- a/validation/postfilter/engine.py
+++ b/validation/postfilter/engine.py
@@ -60,6 +60,27 @@ def _is_engine_error_finding(finding: dict) -> bool:
     return finding.get("engine_rule", "").endswith("-execution-error")
 
 
+def _is_suppressed_by_schema_path(finding: dict, rule: RuleMetadata) -> bool:
+    """Return ``True`` when a finding's ``schema_path`` matches an entry in
+    ``rule.suppress_schema_paths``.
+
+    A match is an exact path equality OR a prefix with a dot boundary, so
+    ``components.schemas.ErrorInfo`` does NOT false-match a sibling like
+    ``components.schemas.ErrorInfoExtended``.  Findings without a
+    ``schema_path`` field (e.g. from yamllint or gherkin) can never be
+    suppressed by this mechanism and fall through unchanged.
+    """
+    if not rule.suppress_schema_paths:
+        return False
+    schema_path = finding.get("schema_path")
+    if not isinstance(schema_path, str) or not schema_path:
+        return False
+    for entry in rule.suppress_schema_paths:
+        if schema_path == entry or schema_path.startswith(entry + "."):
+            return True
+    return False
+
+
 def _resolve_api_context(
     finding: dict,
     context: ValidationContext,
@@ -233,6 +254,11 @@ def run_post_filter(
             # Step 3: Mapped rule
             api_ctx = _resolve_api_context(finding, context)
 
+            # Per-path suppression — drop findings on known-unactionable
+            # locations before any further processing.
+            if _is_suppressed_by_schema_path(finding, rule):
+                continue
+
             # Applicability check — remove if not applicable
             if not is_applicable(rule.applicability, context, api_ctx):
                 continue
diff --git a/validation/postfilter/metadata_loader.py b/validation/postfilter/metadata_loader.py
index 297718fa..05516c51 100644
--- a/validation/postfilter/metadata_loader.py
+++ b/validation/postfilter/metadata_loader.py
@@ -76,6 +76,10 @@ class RuleMetadata:
         applicability: Condition dict — omitted fields are unconstrained.
         conditional_level: Severity specification, or ``None`` to preserve
             engine-reported severity (identity mapping).
+        suppress_schema_paths: Tuple of JSON paths within a source document
+            to suppress for this rule.  A finding is dropped entirely when
+            its ``schema_path`` equals an entry here or starts with an
+            entry followed by a dot.  Empty tuple means no suppression.
     """
 
     id: str
@@ -86,6 +90,7 @@ class RuleMetadata:
     hint: Optional[str]
     applicability: dict
     conditional_level: Optional[ConditionalLevel]
+    suppress_schema_paths: Tuple[str, ...] = ()
 
 
 # ---------------------------------------------------------------------------
@@ -141,6 +146,14 @@ def parse_rule_metadata(raw: dict) -> RuleMetadata:
     raw_cl = raw.get("conditional_level")
     conditional_level = _parse_conditional_level(raw_cl) if raw_cl is not None else None
 
+    # Optional suppress_schema_paths — empty tuple means no suppression
+    raw_suppress = raw.get("suppress_schema_paths") or []
+    if not isinstance(raw_suppress, list):
+        raise ValueError("suppress_schema_paths must be a list of strings")
+    suppress_schema_paths: Tuple[str, ...] = tuple(
+        entry for entry in raw_suppress if isinstance(entry, str) and entry
+    )
+
     return RuleMetadata(
         id=raw["id"],
         name=raw.get("name", raw["engine_rule"]),
@@ -150,6 +163,7 @@ def parse_rule_metadata(raw: dict) -> RuleMetadata:
         hint=raw.get("hint"),
         applicability=raw.get("applicability", {}),
         conditional_level=conditional_level,
+        suppress_schema_paths=suppress_schema_paths,
     )
 
 
diff --git a/validation/rules/spectral-rules.yaml b/validation/rules/spectral-rules.yaml
index 33d4343b..ade4a0bc 100644
--- a/validation/rules/spectral-rules.yaml
+++ b/validation/rules/spectral-rules.yaml
@@ -348,6 +348,27 @@
   hint: "Acceptable if free-form field or implementation-dependent — no fix needed."
   conditional_level:
     default: hint
+  # Known-unactionable locations in the Commonalities common library.
+  # These string fields are already length-constrained via maxLength (S-312
+  # is satisfied); no sensible format/pattern/enum/const value exists for
+  # the free-text and identifier fields below.  The rule's own hint already
+  # flags this as "Acceptable ... no fix needed" — the allowlist removes
+  # the repeated noise. Entries are specific fields, not whole components,
+  # so any new unconstrained string added to a common schema still surfaces
+  # and can be evaluated on its own merits.
+  suppress_schema_paths:
+    # CAMARA_common.yaml
+    - components.schemas.ErrorInfo.properties.code
+    - components.schemas.ErrorInfo.properties.message
+    - components.schemas.NetworkAccessIdentifier
+    # CAMARA_event_common.yaml
+    - components.schemas.CloudEvent.properties.id
+    - components.schemas.CloudEvent.properties.type
+    - components.schemas.SubscriptionId
+    - components.schemas.HTTPSettings.properties.headers.additionalProperties
+    - components.schemas.SubscriptionStarted.properties.initiationDescription
+    - components.schemas.SubscriptionUpdated.properties.updateDescription
+    - components.schemas.SubscriptionEnded.properties.terminationDescription
 
 - id: S-314
   engine: spectral
diff --git a/validation/schemas/findings-schema.yaml b/validation/schemas/findings-schema.yaml
index 5c2824b4..9612059c 100644
--- a/validation/schemas/findings-schema.yaml
+++ b/validation/schemas/findings-schema.yaml
@@ -56,6 +56,15 @@ properties:
       File path relative to the repository root
       (e.g. "code/API_definitions/quality-on-demand.yaml").
 
+  schema_path:
+    type: ["string", "null"]
+    description: >
+      Dot-joined JSONPath of the offending node within the source document,
+      when the engine provides it (e.g.
+      "components.schemas.ErrorInfo.properties.code").  Optional — absent
+      for engines that only report file+line locations (yamllint, gherkin).
+      Used by post-filter suppress_schema_paths to drop specific findings.
+
   line:
     type: integer
     minimum: 1
diff --git a/validation/schemas/rule-metadata-schema.yaml b/validation/schemas/rule-metadata-schema.yaml
index 8c49e308..8e8c2721 100644
--- a/validation/schemas/rule-metadata-schema.yaml
+++ b/validation/schemas/rule-metadata-schema.yaml
@@ -110,6 +110,23 @@ properties:
 
     additionalProperties: false
 
+  suppress_schema_paths:
+    type: array
+    description: >
+      Post-filter allowlist of JSON paths within a source document to
+      suppress for this rule.  A finding is dropped entirely when its
+      ``schema_path`` field equals an entry here or starts with the entry
+      followed by a dot (prefix-with-boundary match).  Intended for rules
+      that fire on known-unactionable locations — e.g. free-text fields in
+      Commonalities common schemas where no sensible ``pattern``/``format``
+      exists.  Entries should be specific field paths, not whole
+      components, so that any new unconstrained field surfaces instead of
+      being silently masked.  Optional — omit for rules that do not need
+      per-path suppression.
+    items:
+      type: string
+      minLength: 1
+
   conditional_level:
     type: object
     description: >
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index d9e1b14e..6045d751 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -11,10 +11,12 @@
 from validation.postfilter.engine import (
     PostFilterResult,
     _is_engine_error_finding,
+    _is_suppressed_by_schema_path,
     _resolve_api_context,
     compute_overall_result,
     run_post_filter,
 )
+from validation.postfilter.metadata_loader import RuleMetadata
 
 
 # ---------------------------------------------------------------------------
@@ -573,3 +575,174 @@ def test_message_override_with_hint(self, tmp_path: Path):
         f = result.findings[0]
         assert f["message"] == "Better description."
         assert f["hint"] == "Fix by doing X."
+
+
+# ---------------------------------------------------------------------------
+# TestSuppressSchemaPaths — per-path suppression (unit + integration)
+# ---------------------------------------------------------------------------
+
+
+def _rule_with_suppress(
+    *paths: str,
+    id: str = "S-313",
+    engine: str = "spectral",
+    engine_rule: str = "owasp:api4:2023-string-restricted",
+) -> RuleMetadata:
+    """Build a minimal RuleMetadata with suppress_schema_paths set."""
+    return RuleMetadata(
+        id=id,
+        name=engine_rule,
+        engine=engine,
+        engine_rule=engine_rule,
+        message_override=None,
+        hint=None,
+        applicability={},
+        conditional_level=None,
+        suppress_schema_paths=tuple(paths),
+    )
+
+
+class TestIsSuppressedBySchemaPath:
+    """Unit tests for the schema-path allowlist matcher."""
+
+    def test_no_allowlist_never_suppresses(self):
+        """A rule without suppress_schema_paths never suppresses anything."""
+        rule = _rule_with_suppress()  # empty allowlist
+        f = _make_finding()
+        f["schema_path"] = "components.schemas.ErrorInfo.properties.code"
+        assert _is_suppressed_by_schema_path(f, rule) is False
+
+    def test_exact_match_suppresses(self):
+        rule = _rule_with_suppress("components.schemas.ErrorInfo.properties.code")
+        f = _make_finding()
+        f["schema_path"] = "components.schemas.ErrorInfo.properties.code"
+        assert _is_suppressed_by_schema_path(f, rule) is True
+
+    def test_prefix_with_dot_boundary_suppresses(self):
+        """Subtree match: an entry for a container path also suppresses its descendants."""
+        rule = _rule_with_suppress("components.schemas.ErrorInfo")
+        f = _make_finding()
+        f["schema_path"] = "components.schemas.ErrorInfo.properties.code"
+        assert _is_suppressed_by_schema_path(f, rule) is True
+
+    def test_lookalike_prefix_does_not_suppress(self):
+        """`ErrorInfo` entry must NOT false-match `ErrorInfoExtended`."""
+        rule = _rule_with_suppress("components.schemas.ErrorInfo")
+        f = _make_finding()
+        f["schema_path"] = "components.schemas.ErrorInfoExtended.properties.code"
+        assert _is_suppressed_by_schema_path(f, rule) is False
+
+    def test_unrelated_path_does_not_suppress(self):
+        rule = _rule_with_suppress("components.schemas.ErrorInfo.properties.code")
+        f = _make_finding()
+        f["schema_path"] = "components.schemas.MySchema.properties.name"
+        assert _is_suppressed_by_schema_path(f, rule) is False
+
+    def test_missing_schema_path_falls_through(self):
+        """Findings without schema_path (e.g. yamllint) can never be suppressed."""
+        rule = _rule_with_suppress("components.schemas.ErrorInfo.properties.code")
+        f = _make_finding()  # no schema_path
+        assert _is_suppressed_by_schema_path(f, rule) is False
+
+    def test_empty_schema_path_falls_through(self):
+        rule = _rule_with_suppress("components.schemas.ErrorInfo.properties.code")
+        f = _make_finding()
+        f["schema_path"] = ""
+        assert _is_suppressed_by_schema_path(f, rule) is False
+
+    def test_first_matching_entry_wins(self):
+        """Multiple entries — only one needs to match."""
+        rule = _rule_with_suppress(
+            "components.schemas.Foo",
+            "components.schemas.ErrorInfo.properties.code",
+            "components.schemas.Bar",
+        )
+        f = _make_finding()
+        f["schema_path"] = "components.schemas.ErrorInfo.properties.code"
+        assert _is_suppressed_by_schema_path(f, rule) is True
+
+
+class TestRunPostFilterSuppression:
+    """Integration: suppressed findings are dropped entirely from the pipeline."""
+
+    def test_suppressed_finding_dropped(self, tmp_path: Path):
+        """A matching schema_path causes the finding to be dropped, not just downgraded."""
+        _write_rules(tmp_path, [{
+            "id": "S-313",
+            "engine": "spectral",
+            "engine_rule": "owasp:api4:2023-string-restricted",
+            "conditional_level": {"default": "hint"},
+            "suppress_schema_paths": [
+                "components.schemas.ErrorInfo.properties.code",
+                "components.schemas.ErrorInfo.properties.message",
+            ],
+        }])
+        ctx = _make_context()
+        finding = _make_finding(
+            engine_rule="owasp:api4:2023-string-restricted",
+            path="code/common/CAMARA_common.yaml",
+            level="hint",
+        )
+        finding["schema_path"] = "components.schemas.ErrorInfo.properties.code"
+        result = run_post_filter([finding], ctx, tmp_path)
+        assert result.findings == []
+        assert result.result == "pass"
+
+    def test_non_suppressed_finding_kept(self, tmp_path: Path):
+        """A non-matching schema_path passes through normally."""
+        _write_rules(tmp_path, [{
+            "id": "S-313",
+            "engine": "spectral",
+            "engine_rule": "owasp:api4:2023-string-restricted",
+            "conditional_level": {"default": "hint"},
+            "suppress_schema_paths": [
+                "components.schemas.ErrorInfo.properties.code",
+            ],
+        }])
+        ctx = _make_context()
+        finding = _make_finding(
+            engine_rule="owasp:api4:2023-string-restricted",
+            path="code/API_definitions/sample-service.yaml",
+            level="hint",
+        )
+        finding["schema_path"] = "components.schemas.MyApiSchema.properties.name"
+        result = run_post_filter([finding], ctx, tmp_path)
+        assert len(result.findings) == 1
+        assert result.findings[0]["rule_id"] == "S-313"
+
+    def test_mixed_findings(self, tmp_path: Path):
+        """A batch of findings — suppressed entries drop, others pass through."""
+        _write_rules(tmp_path, [{
+            "id": "S-313",
+            "engine": "spectral",
+            "engine_rule": "owasp:api4:2023-string-restricted",
+            "conditional_level": {"default": "hint"},
+            "suppress_schema_paths": [
+                "components.schemas.ErrorInfo.properties.code",
+                "components.schemas.ErrorInfo.properties.message",
+                "components.schemas.NetworkAccessIdentifier",
+            ],
+        }])
+        ctx = _make_context()
+        findings = []
+        for schema_path in (
+            "components.schemas.ErrorInfo.properties.code",
+            "components.schemas.ErrorInfo.properties.message",
+            "components.schemas.NetworkAccessIdentifier",
+            "components.schemas.MyApiSchema.properties.name",
+            "components.schemas.MyOtherSchema.properties.id",
+        ):
+            f = _make_finding(
+                engine_rule="owasp:api4:2023-string-restricted",
+                level="hint",
+            )
+            f["schema_path"] = schema_path
+            findings.append(f)
+
+        result = run_post_filter(findings, ctx, tmp_path)
+        # 3 common-library entries suppressed, 2 API-specific kept
+        kept_paths = [f["schema_path"] for f in result.findings]
+        assert kept_paths == [
+            "components.schemas.MyApiSchema.properties.name",
+            "components.schemas.MyOtherSchema.properties.id",
+        ]
diff --git a/validation/tests/test_postfilter_metadata.py b/validation/tests/test_postfilter_metadata.py
index 96a2799d..fa076d19 100644
--- a/validation/tests/test_postfilter_metadata.py
+++ b/validation/tests/test_postfilter_metadata.py
@@ -98,6 +98,47 @@ def test_optional_fields_default_to_none(self):
         rule = parse_rule_metadata(raw)
         assert rule.message_override is None
         assert rule.hint is None
+        assert rule.suppress_schema_paths == ()
+
+    def test_suppress_schema_paths_parsed(self):
+        """suppress_schema_paths is an optional list that becomes a tuple."""
+        raw = _minimal_rule_dict(
+            suppress_schema_paths=[
+                "components.schemas.ErrorInfo.properties.code",
+                "components.schemas.NetworkAccessIdentifier",
+            ]
+        )
+        rule = parse_rule_metadata(raw)
+        assert rule.suppress_schema_paths == (
+            "components.schemas.ErrorInfo.properties.code",
+            "components.schemas.NetworkAccessIdentifier",
+        )
+
+    def test_suppress_schema_paths_empty_list(self):
+        raw = _minimal_rule_dict(suppress_schema_paths=[])
+        rule = parse_rule_metadata(raw)
+        assert rule.suppress_schema_paths == ()
+
+    def test_suppress_schema_paths_invalid_type_raises(self):
+        raw = _minimal_rule_dict(suppress_schema_paths="not-a-list")
+        with pytest.raises(ValueError, match="suppress_schema_paths"):
+            parse_rule_metadata(raw)
+
+    def test_suppress_schema_paths_drops_non_string_entries(self):
+        """Non-string entries are silently dropped (defensive)."""
+        raw = _minimal_rule_dict(
+            suppress_schema_paths=[
+                "components.schemas.Valid",
+                123,  # not a string
+                "",   # empty
+                "components.schemas.AlsoValid",
+            ]
+        )
+        rule = parse_rule_metadata(raw)
+        assert rule.suppress_schema_paths == (
+            "components.schemas.Valid",
+            "components.schemas.AlsoValid",
+        )
 
     def test_explicit_message_override(self):
         raw = _minimal_rule_dict(message_override="Better message.")
diff --git a/validation/tests/test_spectral_adapter.py b/validation/tests/test_spectral_adapter.py
index 81d60bda..78eebb45 100644
--- a/validation/tests/test_spectral_adapter.py
+++ b/validation/tests/test_spectral_adapter.py
@@ -248,6 +248,38 @@ def test_rule_id_and_hint_not_set(self):
         assert "rule_id" not in finding
         assert "hint" not in finding
 
+    def test_schema_path_dot_joined(self):
+        """Spectral's JSONPath array is dot-joined into finding['schema_path']."""
+        finding = normalize_finding(SAMPLE_SPECTRAL_FINDING)
+        # raw path is ["paths", "/qualityOnDemand", "post"]
+        assert finding["schema_path"] == "paths./qualityOnDemand.post"
+
+    def test_schema_path_mixed_string_and_int_segments(self):
+        """JSONPath segments can include array indices — cast to str."""
+        raw = {
+            **SAMPLE_SPECTRAL_FINDING,
+            "path": ["components", "schemas", "Foo", "allOf", 0, "properties", "bar"],
+        }
+        finding = normalize_finding(raw)
+        assert finding["schema_path"] == "components.schemas.Foo.allOf.0.properties.bar"
+
+    def test_schema_path_none_when_empty(self):
+        """An empty JSONPath list yields schema_path=None."""
+        raw = {**SAMPLE_SPECTRAL_FINDING, "path": []}
+        finding = normalize_finding(raw)
+        assert finding["schema_path"] is None
+
+    def test_schema_path_none_when_missing(self):
+        raw = {
+            "code": "some-rule",
+            "message": "msg",
+            "severity": 1,
+            "source": "code/API_definitions/api.yaml",
+            "range": {"start": {"line": 0, "character": 0}},
+        }
+        finding = normalize_finding(raw)
+        assert finding["schema_path"] is None
+
     def test_absolute_path_normalised_with_repo_root(self):
         raw = {
             **SAMPLE_SPECTRAL_FINDING,

From 111363a1398a927001e5e245af9728985e7b3e47 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 16 Apr 2026 10:09:06 +0200
Subject: [PATCH 084/157] Pin 15 rules via
 regression/r4.1-broken-spec-yaml-fundamentals

Add Y-001..Y-013, S-005, S-016 to tested_rules (total_tested 14 -> 29).
Branch 2 of the 7-branch broken-spec roadmap (RM#483).
---
 validation/rules/rule-inventory.yaml | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index a7d58c72..cb561f84 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 14
+  total_tested: 29
   by_engine:
     spectral: 84
     gherkin: 25
@@ -294,6 +294,8 @@ pending_rules:
 
 tested_rules:
   P-006: [regression/r4.1-main-baseline]
+  S-005: [regression/r4.1-broken-spec-yaml-fundamentals]
+  S-016: [regression/r4.1-broken-spec-yaml-fundamentals]
   S-018: [regression/r4.1-broken-spec-api-metadata]
   S-019: [regression/r4.1-broken-spec-api-metadata]
   S-020: [regression/r4.1-broken-spec-api-metadata]
@@ -307,6 +309,19 @@ tested_rules:
   S-313: [regression/r4.1-main-baseline]
   S-314: [regression/r4.1-main-baseline]
   S-316: [regression/r4.1-main-baseline]
+  Y-001: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-002: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-003: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-004: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-005: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-006: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-007: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-008: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-009: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-010: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-011: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-012: [regression/r4.1-broken-spec-yaml-fundamentals]
+  Y-013: [regression/r4.1-broken-spec-yaml-fundamentals]
 
 # ---------------------------------------------------------------------------
 # Manual rules — require human judgment

From 3e8cb07ddfb7070586ae0d98e1483f735d3cfa66 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 16 Apr 2026 13:15:53 +0200
Subject: [PATCH 085/157] Pin 6 rules via
 regression/r4.1-broken-spec-error-handling

Add S-025, S-026, S-027, S-221, S-307, S-318 to tested_rules.
Update total_tested from 29 to 35.
---
 validation/rules/rule-inventory.yaml | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index cb561f84..6d97f889 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 29
+  total_tested: 35
   by_engine:
     spectral: 84
     gherkin: 25
@@ -303,12 +303,18 @@ tested_rules:
   S-022: [regression/r4.1-broken-spec-api-metadata]
   S-023: [regression/r4.1-broken-spec-api-metadata]
   S-024: [regression/r4.1-broken-spec-api-metadata]
+  S-025: [regression/r4.1-broken-spec-error-handling]
+  S-026: [regression/r4.1-broken-spec-error-handling]
+  S-027: [regression/r4.1-broken-spec-error-handling]
   S-201: [regression/r4.1-broken-spec-api-metadata]
   S-210: [regression/r4.1-broken-spec-api-metadata]
   S-211: [regression/r4.1-main-baseline]
+  S-221: [regression/r4.1-broken-spec-error-handling]
+  S-307: [regression/r4.1-broken-spec-error-handling]
   S-313: [regression/r4.1-main-baseline]
   S-314: [regression/r4.1-main-baseline]
   S-316: [regression/r4.1-main-baseline]
+  S-318: [regression/r4.1-broken-spec-error-handling]
   Y-001: [regression/r4.1-broken-spec-yaml-fundamentals]
   Y-002: [regression/r4.1-broken-spec-yaml-fundamentals]
   Y-003: [regression/r4.1-broken-spec-yaml-fundamentals]

From df74df5e4b80384371e021363b4688174f9cc59b Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 16 Apr 2026 13:25:03 +0200
Subject: [PATCH 086/157] =?UTF-8?q?Fix=20stale=20docstrings:=20per-API=20?=
 =?UTF-8?q?=E2=86=92=20engine=20summary=20table?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

workflow_summary.py and formatting.py docstrings still referenced
"per-API summary table" but the implementation uses per-engine tables.
---
 validation/output/formatting.py       | 2 +-
 validation/output/workflow_summary.py | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/validation/output/formatting.py b/validation/output/formatting.py
index 36de87d5..2e5b7eaa 100644
--- a/validation/output/formatting.py
+++ b/validation/output/formatting.py
@@ -6,7 +6,7 @@
 
 Design doc references:
   - Section 9.2: finding grouping and priority ordering
-  - Section 9.3: per-API summary table
+  - Section 9.3: engine summary table
 """
 
 from __future__ import annotations
diff --git a/validation/output/workflow_summary.py b/validation/output/workflow_summary.py
index 29d11a84..ebd82661 100644
--- a/validation/output/workflow_summary.py
+++ b/validation/output/workflow_summary.py
@@ -1,7 +1,7 @@
 """Workflow summary generation for ``$GITHUB_STEP_SUMMARY``.
 
-Produces a Markdown string with header, per-API summary table, findings
-tables grouped by severity level, engine status table, and footer.
+Produces a Markdown string with header, engine summary table, findings
+tables grouped by severity level, and footer.
 Implements 900 KB truncation with priority ordering (errors are never
 truncated).
 

From a7e2bdf75be81e50a2419d8c588bb9caf857fc6f Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 07:10:35 +0200
Subject: [PATCH 087/157] fix: replace Scope Issue stub with optional scope
 placeholder in Release Issue template

Replace the prescriptive "Scope Issue" bold line with a softer optional
placeholder that suggests linking a scope issue, milestone, or listing
issues/PRs. Follows consensus from RM#409 discussion.

Fixes camaraproject/ReleaseManagement#409
---
 .../templates/issue_bodies/release_issue.mustache             | 2 +-
 release_automation/tests/test_issue_manager.py                | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/release_automation/templates/issue_bodies/release_issue.mustache b/release_automation/templates/issue_bodies/release_issue.mustache
index 2e992665..9a0fcf3b 100644
--- a/release_automation/templates/issue_bodies/release_issue.mustache
+++ b/release_automation/templates/issue_bodies/release_issue.mustache
@@ -1,7 +1,7 @@
 
 
 
-**Scope Issue:** _Link to the Scope Issue tracking your target release — fill in after issue creation._
+_Optional: use this space to describe the (planned) content scope of this release — for example by linking a scope issue or milestone, or listing the issues/PRs to be resolved before the release._
 
 ### Preparing the release content
 
diff --git a/release_automation/tests/test_issue_manager.py b/release_automation/tests/test_issue_manager.py
index f869b01f..6181784a 100644
--- a/release_automation/tests/test_issue_manager.py
+++ b/release_automation/tests/test_issue_manager.py
@@ -357,8 +357,8 @@ def test_generate_template_without_meta_release(self):
 
         # No redundant heading
         assert "## Release:" not in body
-        # Scope Issue stub sits above the automation-managed markers
-        assert "**Scope Issue:**" in body
+        # Optional scope placeholder sits above the automation-managed markers
+        assert "_Optional: use this space to describe" in body
         # Remaining section headings should stay at ###
         assert "### Preparing the release content" in body
         assert "### Release Status" in body

From a7491ddaa5a0f99d8a4c6833a6afdd52908fff5b Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 07:27:12 +0200
Subject: [PATCH 088/157] Add P-021 check-common-cache-sync + tooling_lib
 shared package
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

P-021 (DEC-027/030) validates that code/common/ files match the sync
manifest written by the RA sync-common handler. The check warns on
missing directory, missing manifest, tag mismatch, and file integrity
issues. Post-filter escalates to error for release-automation triggers.

Core sync logic lives in tooling_lib/cache_sync.py (DEC-031) — shared
between VF (P-021 wrapper) and RA (derive-state, Session B). The
tooling_lib/ package is the first shared Python library between VF and
RA, setting a pattern for future shared utilities.

New files:
- tooling_lib/ — shared package with cache_sync.py (SyncStatus,
  SourceStatus, check_sync_status, git_blob_sha) + 28 unit tests
- validation/schemas/sync-manifest-schema.yaml — JSON Schema for
  code/common/.sync-manifest.yaml (multi-source format)
- validation/engines/python_checks/common_cache_checks.py — P-021 VF
  wrapper converting SyncStatus to findings + 10 unit tests
- Rule metadata in python-rules.yaml with applicability
  (release_plan_changed: false, commonalities_release: >=r4.2) and
  conditional severity (warn default, error on release-automation)

870/870 tests pass.
---
 tooling_lib/__init__.py                       |   9 +
 tooling_lib/cache_sync.py                     | 204 ++++++++++
 tooling_lib/tests/__init__.py                 |   0
 tooling_lib/tests/test_cache_sync.py          | 359 ++++++++++++++++++
 validation/engines/python_checks/__init__.py  |   2 +
 .../python_checks/common_cache_checks.py      | 130 +++++++
 validation/rules/python-rules.yaml            |  21 +
 validation/rules/rule-inventory.yaml          |  10 +-
 validation/schemas/sync-manifest-schema.yaml  |  42 ++
 .../tests/test_python_checks_common_cache.py  | 199 ++++++++++
 .../tests/test_rule_metadata_integrity.py     |   6 +-
 11 files changed, 977 insertions(+), 5 deletions(-)
 create mode 100644 tooling_lib/__init__.py
 create mode 100644 tooling_lib/cache_sync.py
 create mode 100644 tooling_lib/tests/__init__.py
 create mode 100644 tooling_lib/tests/test_cache_sync.py
 create mode 100644 validation/engines/python_checks/common_cache_checks.py
 create mode 100644 validation/schemas/sync-manifest-schema.yaml
 create mode 100644 validation/tests/test_python_checks_common_cache.py

diff --git a/tooling_lib/__init__.py b/tooling_lib/__init__.py
new file mode 100644
index 00000000..c09f8bb0
--- /dev/null
+++ b/tooling_lib/__init__.py
@@ -0,0 +1,9 @@
+"""Shared Python library for camaraproject/tooling.
+
+Hosts code reused across ``validation/`` (VF) and
+``release_automation/`` (RA).  Both packages import from here via
+``from tooling_lib. import ...``.
+
+Modules:
+    cache_sync  -- Common-file cache sync status checking (DEC-030/031).
+"""
diff --git a/tooling_lib/cache_sync.py b/tooling_lib/cache_sync.py
new file mode 100644
index 00000000..5fcec288
--- /dev/null
+++ b/tooling_lib/cache_sync.py
@@ -0,0 +1,204 @@
+"""Common-file cache sync status checking.
+
+Verifies that ``code/common/`` files match the expected content declared
+in ``.sync-manifest.yaml``.  The manifest is written by the
+camara-release-automation sync-common handler and records the source
+repository, release tag, and git blob SHA-1 for each synced file.
+
+This module is intentionally VF- and RA-independent: it uses only the
+Python standard library plus ``pyyaml``.  Both the validation framework
+(P-021 check) and release automation (derive-state ``out_of_sync``
+signal) import from here.
+
+See DEC-030 (manifest-based validation) and DEC-031 (tooling_lib).
+"""
+
+from __future__ import annotations
+
+import hashlib
+import logging
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+import yaml
+
+__all__ = [
+    "SyncStatus",
+    "SourceStatus",
+    "check_sync_status",
+    "git_blob_sha",
+    "MANIFEST_FILENAME",
+]
+
+logger = logging.getLogger(__name__)
+
+MANIFEST_FILENAME = ".sync-manifest.yaml"
+"""Name of the sync manifest inside ``code/common/``."""
+
+COMMON_DIR = "code/common"
+"""Repo-relative path to the common-file cache directory."""
+
+
+# ---------------------------------------------------------------------------
+# Git blob SHA-1
+# ---------------------------------------------------------------------------
+
+
+def git_blob_sha(content: bytes) -> str:
+    """Compute the git blob SHA-1 for *content*.
+
+    Produces the same 40-character hex digest as ``git hash-object`` and
+    the GitHub Contents API ``.sha`` field::
+
+        sha1("blob {length}\\0" + content)
+    """
+    header = f"blob {len(content)}\0".encode("ascii")
+    return hashlib.sha1(header + content).hexdigest()
+
+
+# ---------------------------------------------------------------------------
+# Data model
+# ---------------------------------------------------------------------------
+
+
+@dataclass
+class SourceStatus:
+    """Sync status for a single source repository entry in the manifest."""
+
+    repository: str
+    tag_mismatch: Optional[Tuple[str, str]] = None  # (expected, actual)
+    missing_files: List[str] = field(default_factory=list)
+    modified_files: List[str] = field(default_factory=list)
+
+    @property
+    def in_sync(self) -> bool:
+        return (
+            self.tag_mismatch is None
+            and not self.missing_files
+            and not self.modified_files
+        )
+
+
+@dataclass
+class SyncStatus:
+    """Overall sync status for ``code/common/``."""
+
+    no_common_dir: bool = False
+    no_manifest: bool = False
+    sources: List[SourceStatus] = field(default_factory=list)
+
+    @property
+    def in_sync(self) -> bool:
+        if self.no_common_dir or self.no_manifest:
+            return False
+        return all(s.in_sync for s in self.sources)
+
+
+# ---------------------------------------------------------------------------
+# Manifest loading
+# ---------------------------------------------------------------------------
+
+
+def _load_manifest(manifest_path: Path) -> Optional[dict]:
+    """Load and basic-validate the sync manifest.
+
+    Returns ``None`` if the file does not exist, is not valid YAML, or
+    does not contain the expected top-level structure.
+    """
+    if not manifest_path.is_file():
+        return None
+    try:
+        data = yaml.safe_load(manifest_path.read_text(encoding="utf-8"))
+    except Exception:
+        logger.warning("Failed to parse %s", manifest_path)
+        return None
+    if not isinstance(data, dict) or "sources" not in data:
+        logger.warning("Manifest %s missing 'sources' key", manifest_path)
+        return None
+    if not isinstance(data["sources"], list):
+        logger.warning("Manifest %s 'sources' is not a list", manifest_path)
+        return None
+    return data
+
+
+# ---------------------------------------------------------------------------
+# Core check
+# ---------------------------------------------------------------------------
+
+
+def check_sync_status(
+    repo_path: Path,
+    expected_releases: Dict[str, str],
+) -> SyncStatus:
+    """Check whether ``code/common/`` files match the sync manifest.
+
+    Parameters
+    ----------
+    repo_path:
+        Root of the API repository checkout.
+    expected_releases:
+        Map of source repository name to expected release tag, e.g.
+        ``{"Commonalities": "r4.2"}``.  Built by the caller from
+        ``release-plan.yaml`` dependencies.
+
+    Returns
+    -------
+    SyncStatus
+        Structured result.  Callers convert this to their own output
+        format (VF findings or RA error strings).
+    """
+    common_dir = repo_path / COMMON_DIR
+
+    if not common_dir.is_dir():
+        return SyncStatus(no_common_dir=True)
+
+    manifest_path = common_dir / MANIFEST_FILENAME
+    manifest = _load_manifest(manifest_path)
+    if manifest is None:
+        return SyncStatus(no_manifest=True)
+
+    # Index manifest sources by repository name for O(1) lookup.
+    manifest_sources: Dict[str, dict] = {}
+    for entry in manifest["sources"]:
+        if isinstance(entry, dict) and "repository" in entry:
+            manifest_sources[entry["repository"]] = entry
+
+    # Check each expected dependency against the manifest.
+    source_statuses: List[SourceStatus] = []
+    for repo_name, expected_tag in sorted(expected_releases.items()):
+        entry = manifest_sources.get(repo_name)
+        if entry is None:
+            # Source expected but not in manifest — treat as missing manifest
+            # for this specific source.  Produces a tag_mismatch with
+            # actual=None signalling "not present".
+            source_statuses.append(
+                SourceStatus(
+                    repository=repo_name,
+                    tag_mismatch=(expected_tag, ""),
+                )
+            )
+            continue
+
+        status = SourceStatus(repository=repo_name)
+
+        # --- Tag match ---
+        actual_tag = entry.get("release", "")
+        if actual_tag != expected_tag:
+            status.tag_mismatch = (expected_tag, actual_tag)
+
+        # --- File integrity ---
+        files = entry.get("files", {})
+        if isinstance(files, dict):
+            for filename, expected_sha in sorted(files.items()):
+                file_path = common_dir / filename
+                if not file_path.is_file():
+                    status.missing_files.append(filename)
+                    continue
+                actual_sha = git_blob_sha(file_path.read_bytes())
+                if actual_sha != expected_sha:
+                    status.modified_files.append(filename)
+
+        source_statuses.append(status)
+
+    return SyncStatus(sources=source_statuses)
diff --git a/tooling_lib/tests/__init__.py b/tooling_lib/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tooling_lib/tests/test_cache_sync.py b/tooling_lib/tests/test_cache_sync.py
new file mode 100644
index 00000000..a210a68d
--- /dev/null
+++ b/tooling_lib/tests/test_cache_sync.py
@@ -0,0 +1,359 @@
+"""Unit tests for tooling_lib.cache_sync."""
+
+from __future__ import annotations
+
+import hashlib
+from pathlib import Path
+from typing import Dict
+
+import yaml
+
+from tooling_lib.cache_sync import (
+    COMMON_DIR,
+    MANIFEST_FILENAME,
+    SourceStatus,
+    SyncStatus,
+    check_sync_status,
+    git_blob_sha,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _blob_sha(content: bytes) -> str:
+    """Reference implementation for expected SHA values in tests."""
+    header = f"blob {len(content)}\0".encode("ascii")
+    return hashlib.sha1(header + content).hexdigest()
+
+
+def _write_manifest(
+    tmp_path: Path,
+    sources: list,
+) -> None:
+    """Write a .sync-manifest.yaml into code/common/."""
+    common_dir = tmp_path / COMMON_DIR
+    common_dir.mkdir(parents=True, exist_ok=True)
+    manifest = {"sources": sources}
+    (common_dir / MANIFEST_FILENAME).write_text(
+        yaml.dump(manifest, default_flow_style=False), encoding="utf-8"
+    )
+
+
+def _write_common_file(tmp_path: Path, filename: str, content: str) -> str:
+    """Write a file into code/common/ and return its git blob SHA."""
+    common_dir = tmp_path / COMMON_DIR
+    common_dir.mkdir(parents=True, exist_ok=True)
+    data = content.encode("utf-8")
+    (common_dir / filename).write_text(content, encoding="utf-8")
+    return _blob_sha(data)
+
+
+def _make_source(
+    repo: str = "Commonalities",
+    release: str = "r4.2",
+    files: Dict[str, str] | None = None,
+) -> dict:
+    """Build a manifest source entry."""
+    return {
+        "repository": repo,
+        "release": release,
+        "files": files or {},
+    }
+
+
+# ---------------------------------------------------------------------------
+# git_blob_sha
+# ---------------------------------------------------------------------------
+
+
+class TestGitBlobSha:
+
+    def test_known_content(self):
+        content = b"hello world"
+        expected = hashlib.sha1(b"blob 11\0hello world").hexdigest()
+        assert git_blob_sha(content) == expected
+
+    def test_empty_content(self):
+        expected = hashlib.sha1(b"blob 0\0").hexdigest()
+        assert git_blob_sha(b"") == expected
+
+    def test_binary_content(self):
+        content = bytes(range(256))
+        header = f"blob {len(content)}\0".encode("ascii")
+        expected = hashlib.sha1(header + content).hexdigest()
+        assert git_blob_sha(content) == expected
+
+
+# ---------------------------------------------------------------------------
+# check_sync_status — structural checks
+# ---------------------------------------------------------------------------
+
+
+class TestSyncStatusStructural:
+
+    def test_no_common_dir(self, tmp_path: Path):
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.no_common_dir is True
+        assert status.in_sync is False
+
+    def test_no_manifest(self, tmp_path: Path):
+        (tmp_path / COMMON_DIR).mkdir(parents=True)
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.no_manifest is True
+        assert status.in_sync is False
+
+    def test_invalid_yaml_manifest(self, tmp_path: Path):
+        common_dir = tmp_path / COMMON_DIR
+        common_dir.mkdir(parents=True)
+        (common_dir / MANIFEST_FILENAME).write_text(
+            ": invalid: yaml: [", encoding="utf-8"
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.no_manifest is True
+
+    def test_manifest_missing_sources_key(self, tmp_path: Path):
+        common_dir = tmp_path / COMMON_DIR
+        common_dir.mkdir(parents=True)
+        (common_dir / MANIFEST_FILENAME).write_text(
+            yaml.dump({"version": 1}), encoding="utf-8"
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.no_manifest is True
+
+    def test_manifest_sources_not_a_list(self, tmp_path: Path):
+        common_dir = tmp_path / COMMON_DIR
+        common_dir.mkdir(parents=True)
+        (common_dir / MANIFEST_FILENAME).write_text(
+            yaml.dump({"sources": "not-a-list"}), encoding="utf-8"
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.no_manifest is True
+
+
+# ---------------------------------------------------------------------------
+# check_sync_status — tag matching
+# ---------------------------------------------------------------------------
+
+
+class TestSyncStatusTagMatch:
+
+    def test_tag_matches(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "content")
+        _write_manifest(
+            tmp_path,
+            [_make_source(files={"CAMARA_common.yaml": sha})],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is True
+        assert len(status.sources) == 1
+        assert status.sources[0].tag_mismatch is None
+
+    def test_tag_mismatch(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "content")
+        _write_manifest(
+            tmp_path,
+            [_make_source(release="r4.1", files={"CAMARA_common.yaml": sha})],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is False
+        assert status.sources[0].tag_mismatch == ("r4.2", "r4.1")
+
+    def test_source_not_in_manifest(self, tmp_path: Path):
+        _write_manifest(tmp_path, [_make_source(repo="OtherRepo")])
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is False
+        assert len(status.sources) == 1
+        assert status.sources[0].tag_mismatch == ("r4.2", "")
+
+
+# ---------------------------------------------------------------------------
+# check_sync_status — file integrity
+# ---------------------------------------------------------------------------
+
+
+class TestSyncStatusFileIntegrity:
+
+    def test_file_matches(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "data")
+        _write_manifest(
+            tmp_path,
+            [_make_source(files={"CAMARA_common.yaml": sha})],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is True
+        assert status.sources[0].missing_files == []
+        assert status.sources[0].modified_files == []
+
+    def test_file_missing(self, tmp_path: Path):
+        (tmp_path / COMMON_DIR).mkdir(parents=True, exist_ok=True)
+        _write_manifest(
+            tmp_path,
+            [_make_source(files={"CAMARA_common.yaml": "a" * 40})],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is False
+        assert status.sources[0].missing_files == ["CAMARA_common.yaml"]
+
+    def test_file_modified(self, tmp_path: Path):
+        original_sha = _blob_sha(b"original content")
+        _write_common_file(tmp_path, "CAMARA_common.yaml", "modified content")
+        _write_manifest(
+            tmp_path,
+            [_make_source(files={"CAMARA_common.yaml": original_sha})],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is False
+        assert status.sources[0].modified_files == ["CAMARA_common.yaml"]
+
+    def test_multiple_files_mixed(self, tmp_path: Path):
+        sha_ok = _write_common_file(tmp_path, "CAMARA_common.yaml", "ok")
+        sha_wrong = _blob_sha(b"expected")
+        _write_common_file(tmp_path, "CAMARA_event_common.yaml", "actual")
+        _write_manifest(
+            tmp_path,
+            [
+                _make_source(
+                    files={
+                        "CAMARA_common.yaml": sha_ok,
+                        "CAMARA_event_common.yaml": sha_wrong,
+                        "CAMARA_missing.yaml": "b" * 40,
+                    }
+                )
+            ],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is False
+        src = status.sources[0]
+        assert src.missing_files == ["CAMARA_missing.yaml"]
+        assert src.modified_files == ["CAMARA_event_common.yaml"]
+
+    def test_empty_files_dict(self, tmp_path: Path):
+        _write_manifest(tmp_path, [_make_source(files={})])
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        # Tag matches, no files to check → in sync.
+        assert status.in_sync is True
+
+
+# ---------------------------------------------------------------------------
+# check_sync_status — multi-source / extra files
+# ---------------------------------------------------------------------------
+
+
+class TestSyncStatusMultiSource:
+
+    def test_extra_local_files_ignored(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "content")
+        _write_common_file(tmp_path, "local_extra.yaml", "extra stuff")
+        _write_manifest(
+            tmp_path,
+            [_make_source(files={"CAMARA_common.yaml": sha})],
+        )
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is True
+
+    def test_multiple_sources(self, tmp_path: Path):
+        sha1 = _write_common_file(tmp_path, "CAMARA_common.yaml", "c1")
+        sha2 = _write_common_file(tmp_path, "QoS_common.yaml", "q1")
+        _write_manifest(
+            tmp_path,
+            [
+                _make_source(
+                    repo="Commonalities",
+                    release="r4.2",
+                    files={"CAMARA_common.yaml": sha1},
+                ),
+                _make_source(
+                    repo="QoSProfiles",
+                    release="r1.1",
+                    files={"QoS_common.yaml": sha2},
+                ),
+            ],
+        )
+        status = check_sync_status(
+            tmp_path,
+            {"Commonalities": "r4.2", "QoSProfiles": "r1.1"},
+        )
+        assert status.in_sync is True
+        assert len(status.sources) == 2
+
+    def test_manifest_source_not_in_expected_skipped(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "c1")
+        _write_manifest(
+            tmp_path,
+            [
+                _make_source(
+                    repo="Commonalities",
+                    release="r4.2",
+                    files={"CAMARA_common.yaml": sha},
+                ),
+                _make_source(
+                    repo="UnexpectedRepo",
+                    release="r1.1",
+                    files={"other.yaml": "c" * 40},
+                ),
+            ],
+        )
+        # Only checking Commonalities — UnexpectedRepo is skipped.
+        status = check_sync_status(tmp_path, {"Commonalities": "r4.2"})
+        assert status.in_sync is True
+        assert len(status.sources) == 1
+        assert status.sources[0].repository == "Commonalities"
+
+
+# ---------------------------------------------------------------------------
+# SourceStatus.in_sync property
+# ---------------------------------------------------------------------------
+
+
+class TestSourceStatusProperty:
+
+    def test_in_sync_when_clean(self):
+        s = SourceStatus(repository="X")
+        assert s.in_sync is True
+
+    def test_not_in_sync_tag_mismatch(self):
+        s = SourceStatus(repository="X", tag_mismatch=("r4.2", "r4.1"))
+        assert s.in_sync is False
+
+    def test_not_in_sync_missing_files(self):
+        s = SourceStatus(repository="X", missing_files=["a.yaml"])
+        assert s.in_sync is False
+
+    def test_not_in_sync_modified_files(self):
+        s = SourceStatus(repository="X", modified_files=["a.yaml"])
+        assert s.in_sync is False
+
+
+# ---------------------------------------------------------------------------
+# SyncStatus.in_sync property
+# ---------------------------------------------------------------------------
+
+
+class TestSyncStatusProperty:
+
+    def test_in_sync_all_sources_ok(self):
+        s = SyncStatus(sources=[SourceStatus(repository="X")])
+        assert s.in_sync is True
+
+    def test_not_in_sync_no_common_dir(self):
+        s = SyncStatus(no_common_dir=True)
+        assert s.in_sync is False
+
+    def test_not_in_sync_no_manifest(self):
+        s = SyncStatus(no_manifest=True)
+        assert s.in_sync is False
+
+    def test_not_in_sync_source_problem(self):
+        s = SyncStatus(
+            sources=[
+                SourceStatus(repository="X", tag_mismatch=("a", "b")),
+            ]
+        )
+        assert s.in_sync is False
+
+    def test_empty_sources_is_in_sync(self):
+        s = SyncStatus(sources=[])
+        assert s.in_sync is True
diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py
index dc4344eb..b4b5ad7f 100644
--- a/validation/engines/python_checks/__init__.py
+++ b/validation/engines/python_checks/__init__.py
@@ -11,6 +11,7 @@
 from .filename_checks import check_filename_kebab_case, check_filename_matches_api_name
 from .metadata_checks import check_commonalities_version
 from .readme_checks import check_readme_placeholder_removal
+from .common_cache_checks import check_common_cache_sync
 from .release_plan_checks import check_orphan_api_definitions, check_release_plan_semantics
 from .release_review_checks import check_release_review_file_restriction
 from .subscription_checks import (
@@ -55,6 +56,7 @@
     CheckDescriptor("check-readme-placeholder-removal", CheckScope.REPO, check_readme_placeholder_removal),
     CheckDescriptor("check-release-review-file-restriction", CheckScope.REPO, check_release_review_file_restriction),
     CheckDescriptor("check-orphan-api-definitions", CheckScope.REPO, check_orphan_api_definitions),
+    CheckDescriptor("check-common-cache-sync", CheckScope.REPO, check_common_cache_sync),
 ]
 
 __all__ = ["CHECKS", "CheckDescriptor", "CheckScope"]
diff --git a/validation/engines/python_checks/common_cache_checks.py b/validation/engines/python_checks/common_cache_checks.py
new file mode 100644
index 00000000..6fb11937
--- /dev/null
+++ b/validation/engines/python_checks/common_cache_checks.py
@@ -0,0 +1,130 @@
+"""Common-file cache sync check (P-021).
+
+Wrapper around :func:`tooling_lib.cache_sync.check_sync_status` that
+converts the structured :class:`~tooling_lib.cache_sync.SyncStatus`
+into VF findings.
+
+DEC-027 (RA-integrated sync), DEC-030 (manifest-based validation).
+"""
+
+from __future__ import annotations
+
+from pathlib import Path
+from typing import List
+
+from tooling_lib.cache_sync import COMMON_DIR, SyncStatus, check_sync_status
+from validation.context import ValidationContext
+
+from ._types import make_finding
+
+_ENGINE_RULE = "check-common-cache-sync"
+
+
+def check_common_cache_sync(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Verify ``code/common/`` files match the sync manifest.
+
+    Repo-level check — runs once, not per-API.
+
+    Builds the expected-releases dict from *context* and delegates to
+    :func:`~tooling_lib.cache_sync.check_sync_status`.  Returns an
+    empty list when no expected releases can be determined (e.g. no
+    ``release-plan.yaml``).
+    """
+    expected = _build_expected_releases(context)
+    if not expected:
+        return []
+
+    status = check_sync_status(repo_path, expected)
+    return _status_to_findings(status)
+
+
+# ------------------------------------------------------------------
+# Internals
+# ------------------------------------------------------------------
+
+
+def _build_expected_releases(context: ValidationContext) -> dict:
+    """Derive expected source-repo releases from the validation context."""
+    expected: dict = {}
+    if context.commonalities_release:
+        expected["Commonalities"] = context.commonalities_release
+    return expected
+
+
+def _status_to_findings(status: SyncStatus) -> List[dict]:
+    """Convert a *SyncStatus* into a list of VF findings."""
+    findings: List[dict] = []
+
+    if status.no_common_dir:
+        findings.append(
+            make_finding(
+                engine_rule=_ENGINE_RULE,
+                level="warn",
+                message=(
+                    f"{COMMON_DIR}/ directory is missing — required for "
+                    f"repos declaring a commonalities_release dependency"
+                ),
+                path=COMMON_DIR,
+            )
+        )
+        return findings
+
+    if status.no_manifest:
+        findings.append(
+            make_finding(
+                engine_rule=_ENGINE_RULE,
+                level="warn",
+                message=(
+                    f"Sync manifest ({COMMON_DIR}/.sync-manifest.yaml) is "
+                    f"missing — common files must be managed by the sync "
+                    f"mechanism"
+                ),
+                path=f"{COMMON_DIR}/.sync-manifest.yaml",
+            )
+        )
+        return findings
+
+    for src in status.sources:
+        if src.tag_mismatch:
+            expected, actual = src.tag_mismatch
+            findings.append(
+                make_finding(
+                    engine_rule=_ENGINE_RULE,
+                    level="warn",
+                    message=(
+                        f"{src.repository}: dependency declares {expected} "
+                        f"but common files synced from {actual}"
+                    ),
+                    path=f"{COMMON_DIR}/.sync-manifest.yaml",
+                )
+            )
+
+        for filename in src.missing_files:
+            findings.append(
+                make_finding(
+                    engine_rule=_ENGINE_RULE,
+                    level="warn",
+                    message=(
+                        f"{src.repository}: expected file '{filename}' is "
+                        f"missing from {COMMON_DIR}/"
+                    ),
+                    path=f"{COMMON_DIR}/{filename}",
+                )
+            )
+
+        for filename in src.modified_files:
+            findings.append(
+                make_finding(
+                    engine_rule=_ENGINE_RULE,
+                    level="warn",
+                    message=(
+                        f"{src.repository}: '{filename}' has been modified "
+                        f"since last sync"
+                    ),
+                    path=f"{COMMON_DIR}/{filename}",
+                )
+            )
+
+    return findings
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index cba25cf3..a7c53ea0 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -229,3 +229,24 @@
     or an allOf combining the $ref with an API-specific ApiEventType
     schema. See implicit-events API template in Commonalities
     artifacts/api-templates/.
+
+# P-021: check-common-cache-sync (DEC-027/030)
+# Safety check: code/common/ files must match the sync manifest written
+# by the RA sync-common handler.  Suppressed on bump PRs via the
+# three-step consistency model (DEC-029).  Version-gated to repos
+# adopting $ref consumption (>=r4.2).
+- id: P-021
+  engine: python
+  engine_rule: check-common-cache-sync
+  applicability:
+    release_plan_changed: false
+    commonalities_release: ">=r4.2"
+  conditional_level:
+    default: warn
+    overrides:
+      - condition:
+          trigger_types: [release-automation]
+        level: error
+  hint: >-
+    Merge the auto-created sync PR or trigger the release automation
+    workflow manually (workflow_dispatch) to update code/common/ files.
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 6d97f889..0b99c9d3 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -14,7 +14,7 @@ version: 1
 generated: 2026-04-07
 
 summary:
-  total_implemented: 142
+  total_implemented: 143
   total_gap: 0
   total_manual: 25
   total_pending: 0
@@ -22,7 +22,7 @@ summary:
   by_engine:
     spectral: 84
     gherkin: 25
-    python: 20
+    python: 21
     yamllint: 13
 
 # ---------------------------------------------------------------------------
@@ -205,6 +205,12 @@ gap_rules:
     status: implemented
     rule_id: P-020
 
+  - audit_id: DEC-027
+    description: "Common file cache sync safety check — code/common/ must match sync manifest from Commonalities artifacts"
+    target_engine: python
+    status: implemented
+    rule_id: P-021
+
 # ---------------------------------------------------------------------------
 # Fixes needed — implemented rules with incorrect behavior
 # ---------------------------------------------------------------------------
diff --git a/validation/schemas/sync-manifest-schema.yaml b/validation/schemas/sync-manifest-schema.yaml
new file mode 100644
index 00000000..316a78f7
--- /dev/null
+++ b/validation/schemas/sync-manifest-schema.yaml
@@ -0,0 +1,42 @@
+$schema: "https://json-schema.org/draft/2020-12/schema"
+title: Common File Sync Manifest
+description: >-
+  Written by the camara-release-automation sync-common handler into
+  code/common/.sync-manifest.yaml.  Records the source repository,
+  release tag, and git blob SHA-1 hashes for each synced file.
+  Read by VF P-021 (check-common-cache-sync) and RA derive-state
+  (out_of_sync signal).  See DEC-030.
+type: object
+required:
+  - sources
+additionalProperties: false
+properties:
+  sources:
+    type: array
+    minItems: 1
+    items:
+      type: object
+      required:
+        - repository
+        - release
+        - files
+      additionalProperties: false
+      properties:
+        repository:
+          type: string
+          description: >-
+            Source repository name (e.g. "Commonalities").
+          minLength: 1
+        release:
+          type: string
+          description: >-
+            Release tag the files were synced from (e.g. "r4.2").
+          pattern: "^r[1-9]\\d*\\.[1-9]\\d*$"
+        files:
+          type: object
+          description: >-
+            Map of filename to git blob SHA-1 hash (40-char hex).
+          minProperties: 1
+          additionalProperties:
+            type: string
+            pattern: "^[0-9a-f]{40}$"
diff --git a/validation/tests/test_python_checks_common_cache.py b/validation/tests/test_python_checks_common_cache.py
new file mode 100644
index 00000000..ffbccb61
--- /dev/null
+++ b/validation/tests/test_python_checks_common_cache.py
@@ -0,0 +1,199 @@
+"""Unit tests for validation.engines.python_checks.common_cache_checks (P-021).
+
+The core sync logic is tested exhaustively in
+``tooling_lib/tests/test_cache_sync.py``.  These tests verify the VF
+wrapper: context-to-expected-releases mapping and SyncStatus-to-findings
+conversion.
+"""
+
+from __future__ import annotations
+
+import hashlib
+from pathlib import Path
+from typing import Optional
+
+import yaml
+
+from validation.context import ValidationContext
+from validation.context.context_builder import ApiContext
+from validation.engines.python_checks.common_cache_checks import (
+    check_common_cache_sync,
+)
+
+
+# ---------------------------------------------------------------------------
+# Helpers
+# ---------------------------------------------------------------------------
+
+
+def _blob_sha(content: bytes) -> str:
+    header = f"blob {len(content)}\0".encode("ascii")
+    return hashlib.sha1(header + content).hexdigest()
+
+
+def _make_context(
+    commonalities_release: Optional[str] = None,
+) -> ValidationContext:
+    return ValidationContext(
+        repository="TestRepo",
+        branch_type="main",
+        trigger_type="dispatch",
+        profile="advisory",
+        stage="enabled",
+        target_release_type=None,
+        commonalities_release=commonalities_release,
+        commonalities_version=None,
+        icm_release=None,
+        base_ref=None,
+        is_release_review_pr=False,
+        release_plan_changed=None,
+        pr_number=None,
+        apis=(),
+        workflow_run_url="",
+        tooling_ref="",
+    )
+
+
+def _write_manifest(tmp_path: Path, sources: list) -> None:
+    common_dir = tmp_path / "code" / "common"
+    common_dir.mkdir(parents=True, exist_ok=True)
+    manifest = {"sources": sources}
+    (common_dir / ".sync-manifest.yaml").write_text(
+        yaml.dump(manifest, default_flow_style=False), encoding="utf-8"
+    )
+
+
+def _write_common_file(tmp_path: Path, filename: str, content: str) -> str:
+    common_dir = tmp_path / "code" / "common"
+    common_dir.mkdir(parents=True, exist_ok=True)
+    data = content.encode("utf-8")
+    (common_dir / filename).write_text(content, encoding="utf-8")
+    return _blob_sha(data)
+
+
+# ---------------------------------------------------------------------------
+# Tests — context-to-expected mapping
+# ---------------------------------------------------------------------------
+
+
+class TestContextMapping:
+
+    def test_no_commonalities_release_returns_empty(self, tmp_path: Path):
+        ctx = _make_context(commonalities_release=None)
+        assert check_common_cache_sync(tmp_path, ctx) == []
+
+    def test_commonalities_release_populates_expected(self, tmp_path: Path):
+        """When commonalities_release is set, the check runs."""
+        ctx = _make_context(commonalities_release="r4.2")
+        # No code/common/ dir → should produce a finding.
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "directory" in findings[0]["message"].lower()
+
+
+# ---------------------------------------------------------------------------
+# Tests — SyncStatus-to-findings conversion
+# ---------------------------------------------------------------------------
+
+
+class TestFindingsConversion:
+
+    def test_no_common_dir(self, tmp_path: Path):
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert len(findings) == 1
+        f = findings[0]
+        assert f["engine_rule"] == "check-common-cache-sync"
+        assert f["level"] == "warn"
+        assert "directory" in f["message"].lower()
+        assert f["path"] == "code/common"
+
+    def test_no_manifest(self, tmp_path: Path):
+        (tmp_path / "code" / "common").mkdir(parents=True)
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "manifest" in findings[0]["message"].lower()
+        assert findings[0]["path"] == "code/common/.sync-manifest.yaml"
+
+    def test_all_in_sync(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "ok")
+        _write_manifest(
+            tmp_path,
+            [
+                {
+                    "repository": "Commonalities",
+                    "release": "r4.2",
+                    "files": {"CAMARA_common.yaml": sha},
+                }
+            ],
+        )
+        ctx = _make_context(commonalities_release="r4.2")
+        assert check_common_cache_sync(tmp_path, ctx) == []
+
+    def test_tag_mismatch_finding(self, tmp_path: Path):
+        sha = _write_common_file(tmp_path, "CAMARA_common.yaml", "data")
+        _write_manifest(
+            tmp_path,
+            [
+                {
+                    "repository": "Commonalities",
+                    "release": "r4.1",
+                    "files": {"CAMARA_common.yaml": sha},
+                }
+            ],
+        )
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "r4.2" in findings[0]["message"]
+        assert "r4.1" in findings[0]["message"]
+
+    def test_missing_file_finding(self, tmp_path: Path):
+        (tmp_path / "code" / "common").mkdir(parents=True, exist_ok=True)
+        _write_manifest(
+            tmp_path,
+            [
+                {
+                    "repository": "Commonalities",
+                    "release": "r4.2",
+                    "files": {"CAMARA_common.yaml": "a" * 40},
+                }
+            ],
+        )
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "missing" in findings[0]["message"].lower()
+        assert findings[0]["path"] == "code/common/CAMARA_common.yaml"
+
+    def test_modified_file_finding(self, tmp_path: Path):
+        original_sha = _blob_sha(b"original")
+        _write_common_file(tmp_path, "CAMARA_common.yaml", "modified")
+        _write_manifest(
+            tmp_path,
+            [
+                {
+                    "repository": "Commonalities",
+                    "release": "r4.2",
+                    "files": {"CAMARA_common.yaml": original_sha},
+                }
+            ],
+        )
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "modified" in findings[0]["message"].lower()
+
+    def test_all_findings_are_warn(self, tmp_path: Path):
+        """Every finding from P-021 is 'warn' (post-filter handles escalation)."""
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert all(f["level"] == "warn" for f in findings)
+
+    def test_all_findings_have_engine_rule(self, tmp_path: Path):
+        ctx = _make_context(commonalities_release="r4.2")
+        findings = check_common_cache_sync(tmp_path, ctx)
+        assert all(
+            f["engine_rule"] == "check-common-cache-sync" for f in findings
+        )
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index e159618b..323ec36f 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -77,7 +77,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 20
+        assert counts["python"] == 21
         assert counts["spectral"] == 84
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13
@@ -306,8 +306,8 @@ def test_hints_are_exception_not_norm(self, all_rules):
         """
         with_hints = [r.id for r in all_rules if r.hint is not None]
         with_overrides = [r.id for r in all_rules if r.message_override is not None]
-        assert len(with_hints) == 13, (
-            f"Expected 13 explicit hints (update test if adding hints): "
+        assert len(with_hints) == 14, (
+            f"Expected 14 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
         assert len(with_overrides) == 0, (

From 56b755b0dc640d4b918ea207ae38a7909dd4e410 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 08:28:15 +0200
Subject: [PATCH 089/157] fix: add tooling_lib to sparse checkout in validation
 workflow

The sparse checkout in validation.yml did not include the new
tooling_lib/ package introduced in #181, causing ModuleNotFoundError
for P-021's import of tooling_lib.cache_sync in CI.

Follow-up fix for #181.
---
 .github/workflows/validation.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 96c508b7..2121db5e 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -136,6 +136,7 @@ jobs:
             linting/config
             validation
             shared-actions
+            tooling_lib
           path: .tooling
 
       # ── Step 4: Setup Python ───────────────────────────────────────

From 0f9074c9eba8b4b5af213c4da3145beb259bd453 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 08:52:31 +0200
Subject: [PATCH 090/157] fix: expand regression runner path triggers to cover
 validation.yml and tooling_lib

The regression runner only triggered on changes under validation/,
shared-actions/, and its own workflow file. Changes to the reusable
validation workflow or the shared tooling_lib package were missed,
as demonstrated when #182 merged without triggering a canary run.
---
 .github/workflows/regression-runner.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/.github/workflows/regression-runner.yml b/.github/workflows/regression-runner.yml
index dce3c195..a89e0e78 100644
--- a/.github/workflows/regression-runner.yml
+++ b/.github/workflows/regression-runner.yml
@@ -24,6 +24,8 @@ on:
     paths:
       - 'validation/**'
       - 'shared-actions/**'
+      - 'tooling_lib/**'
+      - '.github/workflows/validation.yml'
       - '.github/workflows/regression-runner.yml'
   workflow_dispatch:
 

From 7747e08fb14b721edbefb36a10a46c284c4ca7b7 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 10:41:01 +0200
Subject: [PATCH 091/157] feat: add RA sync-common handler for common file
 cache sync

Add cache sync detection to derive-state, /create-snapshot blocking
in validate-command, and sync PR creation embedded in update-issue.

- derive-state: detect stale common file cache via tooling_lib, output
  common_cache_status (stale/in_sync/""), common_cache_details, and
  common_sync_pr_url
- validate-command: block /create-snapshot when cache is stale with
  descriptive error message referencing sync PR or dispatch guidance
- update-issue: embedded sync steps fetch Commonalities files at the
  declared tag, write .sync-manifest.yaml, and create/update a sync PR
  on sync-common/{tag} branch (with continue-on-error)
- issue_manager: config section shows staleness warning with sync PR link
- BotContext + workflow_context: new common_cache_* fields and flags
- sync-release-issue action: accept and thread cache status inputs
- New bot template: common_cache_stale_warning for SNAPSHOT_ACTIVE/
  DRAFT_READY drift warning

1438 tests pass (596 RA + tooling_lib, 842 validation).
---
 .../workflows/release-automation-reusable.yml | 359 ++++++++++++++++++
 release_automation/scripts/bot_context.py     |  17 +
 release_automation/scripts/issue_manager.py   |  24 +-
 release_automation/scripts/issue_sync.py      |  17 +-
 .../scripts/workflow_context.py               |   4 +
 .../common_cache_stale_warning.md             |  10 +
 release_automation/tests/test_bot_context.py  |  39 ++
 .../tests/test_issue_manager.py               |  77 ++++
 .../tests/test_template_context_contract.py   |   7 +-
 shared-actions/sync-release-issue/action.yml  |  21 +
 10 files changed, 570 insertions(+), 5 deletions(-)
 create mode 100644 release_automation/templates/bot_messages/common_cache_stale_warning.md

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 231fcac0..1aa2d1e4 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -378,6 +378,9 @@ jobs:
       release_plan_url: ${{ steps.state.outputs.release_plan_url }}
       src_commit_sha_short: ${{ steps.state.outputs.src_commit_sha_short }}
       meta_release: ${{ steps.state.outputs.meta_release }}
+      # Common file cache sync outputs      common_cache_status: ${{ steps.cache-sync.outputs.common_cache_status }}
+      common_cache_details: ${{ steps.cache-sync.outputs.common_cache_details }}
+      common_sync_pr_url: ${{ steps.find-sync-pr.outputs.common_sync_pr_url }}
     steps:
       - name: Checkout tooling
         uses: actions/checkout@v6
@@ -388,12 +391,115 @@ jobs:
           sparse-checkout: |
             release_automation/scripts
             shared-actions/derive-release-state
+            tooling_lib
 
       - name: Derive Release State
         id: state
         uses: ./_tooling/shared-actions/derive-release-state
         # No release_tag input - derived from repository artifacts (release-plan.yaml or release-metadata.yaml)
 
+      # ── Common file cache sync detection ──────────────────────────────
+      - name: Checkout API repo (sparse, for cache sync check)
+        if: |
+          steps.state.outputs.config_error == '' &&
+          steps.state.outputs.commonalities_release != ''
+        uses: actions/checkout@v6
+        with:
+          path: _api_repo
+          sparse-checkout: |
+            code/common
+          sparse-checkout-cone-mode: true
+
+      - name: Check common cache sync
+        id: cache-sync
+        if: |
+          steps.state.outputs.config_error == '' &&
+          steps.state.outputs.commonalities_release != ''
+        shell: python
+        env:
+          API_REPO_PATH: ${{ github.workspace }}/_api_repo
+          COMMONALITIES_RELEASE: ${{ steps.state.outputs.commonalities_release }}
+          TOOLING_PATH: ${{ github.workspace }}/_tooling
+        run: |
+          import os, sys
+          from pathlib import Path
+
+          sys.path.insert(0, os.environ['TOOLING_PATH'])
+
+          comm_release = os.environ['COMMONALITIES_RELEASE']
+          api_repo = Path(os.environ['API_REPO_PATH'])
+          output_file = os.environ['GITHUB_OUTPUT']
+
+          # Version gate: only check for r4.2+
+          import re
+          m = re.match(r'^r(\d+)\.(\d+)$', comm_release)
+          if not m:
+              print(f"Cannot parse commonalities_release '{comm_release}', skipping cache check")
+              with open(output_file, 'a') as f:
+                  f.write("common_cache_status=\n")
+                  f.write("common_cache_details=\n")
+              sys.exit(0)
+
+          major, minor = int(m.group(1)), int(m.group(2))
+          if major < 4 or (major == 4 and minor < 2):
+              print(f"commonalities_release {comm_release} < r4.2, skipping cache check")
+              with open(output_file, 'a') as f:
+                  f.write("common_cache_status=\n")
+                  f.write("common_cache_details=\n")
+              sys.exit(0)
+
+          from tooling_lib.cache_sync import check_sync_status
+
+          expected = {"Commonalities": comm_release}
+          status = check_sync_status(api_repo, expected)
+
+          if status.in_sync:
+              print(f"Common file cache is in sync with {comm_release}")
+              with open(output_file, 'a') as f:
+                  f.write("common_cache_status=in_sync\n")
+                  f.write("common_cache_details=\n")
+              sys.exit(0)
+
+          # Build descriptive details
+          details_parts = []
+          if status.no_common_dir:
+              details_parts.append("code/common/ directory is missing")
+          elif status.no_manifest:
+              details_parts.append("code/common/.sync-manifest.yaml is missing")
+          else:
+              for src in status.sources:
+                  if src.tag_mismatch:
+                      expected_tag, actual_tag = src.tag_mismatch
+                      details_parts.append(
+                          f"{src.repository}: expected {expected_tag}, manifest has {actual_tag}"
+                      )
+                  for fname in src.missing_files:
+                      details_parts.append(f"{fname} missing from code/common/")
+                  for fname in src.modified_files:
+                      details_parts.append(f"{fname} modified since last sync")
+
+          details = "; ".join(details_parts) if details_parts else "cache mismatch detected"
+          print(f"Common file cache is stale: {details}")
+
+          with open(output_file, 'a') as f:
+              f.write("common_cache_status=stale\n")
+              f.write(f"common_cache_details={details}\n")
+
+      - name: Find existing sync PR
+        id: find-sync-pr
+        if: steps.cache-sync.outputs.common_cache_status == 'stale'
+        env:
+          GH_TOKEN: ${{ github.token }}
+        shell: bash
+        run: |
+          SYNC_PR_URL=$(gh pr list --head "sync-common/" --state open --json url --jq '.[0].url' 2>/dev/null || true)
+          echo "common_sync_pr_url=$SYNC_PR_URL" >> "$GITHUB_OUTPUT"
+          if [ -n "$SYNC_PR_URL" ]; then
+            echo "Found existing sync PR: $SYNC_PR_URL"
+          else
+            echo "No existing sync PR found"
+          fi
+
   # ─────────────────────────────────────────────────────────────────────────────
   # Phase 2b: Handle configuration errors (if derive-state detected any)
   # ─────────────────────────────────────────────────────────────────────────────
@@ -530,6 +636,8 @@ jobs:
           CTX_APIS_JSON: ${{ needs.derive-state.outputs.apis_json }}
           CTX_COMMONALITIES_RELEASE: ${{ needs.derive-state.outputs.commonalities_release }}
           CTX_IDENTITY_CONSENT_MANAGEMENT_RELEASE: ${{ needs.derive-state.outputs.identity_consent_management_release }}
+          CTX_COMMON_CACHE_STATUS: ${{ needs.derive-state.outputs.common_cache_status }}
+          CTX_COMMON_CACHE_DETAILS: ${{ needs.derive-state.outputs.common_cache_details }}
           CTX_TRIGGER_PR_NUMBER: ${{ needs.check-trigger.outputs.trigger_pr_number }}
           CTX_TRIGGER_PR_URL: ${{ needs.check-trigger.outputs.trigger_pr_url }}
           GITHUB_TOKEN: ${{ github.token }}
@@ -622,6 +730,21 @@ jobs:
               return;
             }
 
+            // Block /create-snapshot when common file cache is stale            if (command === 'create-snapshot') {
+              const cacheStatus = '${{ needs.derive-state.outputs.common_cache_status }}';
+              const cacheDetails = '${{ needs.derive-state.outputs.common_cache_details }}';
+              const syncPrUrl = '${{ needs.derive-state.outputs.common_sync_pr_url }}';
+              if (cacheStatus === 'stale') {
+                const prHint = syncPrUrl
+                  ? ` Merge the sync PR: ${syncPrUrl}`
+                  : ' Run workflow_dispatch to trigger sync, or close and reopen the Release Issue.';
+                core.setOutput('allowed', 'false');
+                core.setOutput('error_message',
+                  `Common file cache is stale: ${cacheDetails}.${prHint}`);
+                return;
+              }
+            }
+
             // Check user permission (must be write or higher)
             console.log('Checking user permission...');
             let userPermission;
@@ -1963,6 +2086,218 @@ jobs:
             release_automation/templates
             shared-actions/sync-release-issue
             shared-actions/post-bot-comment
+            tooling_lib
+
+      # ── Common file cache sync ─────────────────────────────────────────
+      # When cache is stale, fetch Commonalities files and create/update a
+      # sync PR before the Release Issue update. Uses continue-on-error so
+      # sync failures don't prevent the issue update.
+
+      - name: Checkout API repo (for common sync)
+        if: needs.derive-state.outputs.common_cache_status == 'stale'
+        uses: actions/checkout@v6
+        with:
+          path: _api_repo
+          fetch-depth: 1
+          token: ${{ steps.app-token.outputs.token || github.token }}
+
+      - name: Fetch Commonalities files and write manifest
+        id: sync-common
+        if: needs.derive-state.outputs.common_cache_status == 'stale'
+        continue-on-error: true
+        env:
+          GH_TOKEN: ${{ steps.app-token.outputs.token || github.token }}
+          COMMONALITIES_RELEASE: ${{ needs.derive-state.outputs.commonalities_release }}
+          TOOLING_PATH: ${{ github.workspace }}/_tooling
+        shell: python
+        run: |
+          import base64, hashlib, json, os, subprocess, sys
+          from pathlib import Path
+
+          sys.path.insert(0, os.environ['TOOLING_PATH'])
+
+          comm_release = os.environ['COMMONALITIES_RELEASE']
+          api_repo = Path('_api_repo')
+          common_dir = api_repo / 'code' / 'common'
+          common_dir.mkdir(parents=True, exist_ok=True)
+          output_file = os.environ['GITHUB_OUTPUT']
+
+          source_repo = 'camaraproject/Commonalities'
+          source_path = 'artifacts/common'
+
+          # List files in Commonalities artifacts/common at tag
+          try:
+              raw = subprocess.check_output([
+                  'gh', 'api',
+                  f'repos/{source_repo}/contents/{source_path}',
+                  '-q', '.[].name',
+                  '-H', 'Accept: application/vnd.github+json',
+                  '--paginate',
+              ], env={**os.environ, 'GH_TOKEN': os.environ['GH_TOKEN']},
+                  text=True, stderr=subprocess.PIPE)
+              files = [f.strip() for f in raw.strip().split('\n') if f.strip() and f.strip().endswith('.yaml')]
+          except subprocess.CalledProcessError as e:
+              print(f"::error::Could not list files from {source_repo}/{source_path} at {comm_release}: {e.stderr}")
+              with open(output_file, 'a') as f:
+                  f.write("synced=false\n")
+              sys.exit(0)
+
+          if not files:
+              print(f"::error::No YAML files found in {source_repo}/{source_path} at {comm_release}")
+              with open(output_file, 'a') as f:
+                  f.write("synced=false\n")
+              sys.exit(0)
+
+          # Download each file and compute git blob SHA-1
+          from tooling_lib.cache_sync import git_blob_sha
+
+          manifest_files = {}
+          for filename in files:
+              try:
+                  content_b64 = subprocess.check_output([
+                      'gh', 'api',
+                      f'repos/{source_repo}/contents/{source_path}/{filename}?ref={comm_release}',
+                      '-q', '.content',
+                  ], env={**os.environ, 'GH_TOKEN': os.environ['GH_TOKEN']},
+                      text=True, stderr=subprocess.PIPE).strip()
+
+                  raw_bytes = base64.b64decode(content_b64)
+                  dest = common_dir / filename
+                  dest.write_bytes(raw_bytes)
+
+                  sha = git_blob_sha(raw_bytes)
+                  manifest_files[filename] = sha
+                  print(f"Synced: {filename} ({sha[:12]})")
+              except Exception as e:
+                  print(f"::warning::Failed to sync {filename}: {e}")
+
+          if not manifest_files:
+              print("::error::No files synced successfully")
+              with open(output_file, 'a') as f:
+                  f.write("synced=false\n")
+              sys.exit(0)
+
+          # Write .sync-manifest.yaml per sync-manifest-schema.yaml
+          import yaml
+
+          manifest = {
+              'sources': [{
+                  'repository': 'Commonalities',
+                  'release': comm_release,
+                  'files': manifest_files,
+              }]
+          }
+          manifest_path = common_dir / '.sync-manifest.yaml'
+          manifest_path.write_text(
+              yaml.dump(manifest, default_flow_style=False, sort_keys=False),
+              encoding='utf-8'
+          )
+          print(f"Wrote manifest: {manifest_path}")
+
+          with open(output_file, 'a') as f:
+              f.write("synced=true\n")
+              f.write(f"files_count={len(manifest_files)}\n")
+
+      - name: Resolve Bot Identity (for common sync)
+        id: sync-bot-identity
+        if: needs.derive-state.outputs.common_cache_status == 'stale' && steps.sync-common.outputs.synced == 'true'
+        env:
+          GH_TOKEN: ${{ steps.app-token.outputs.token || github.token }}
+        run: |
+          if [ -n "${{ vars.RELEASE_APP_SLUG }}" ] && [ -n "${{ steps.app-token.outputs.token }}" ]; then
+            BOT_LOGIN="${{ vars.RELEASE_APP_SLUG }}[bot]"
+          else
+            BOT_LOGIN="github-actions[bot]"
+          fi
+          BOT_INFO=$(gh api "/users/$BOT_LOGIN" --jq '{id: .id, login: .login}')
+          BOT_NAME=$(echo "$BOT_INFO" | jq -r '.login')
+          BOT_ID=$(echo "$BOT_INFO" | jq -r '.id')
+          echo "bot_name=$BOT_NAME" >> $GITHUB_OUTPUT
+          echo "bot_email=${BOT_ID}+${BOT_NAME}@users.noreply.github.com" >> $GITHUB_OUTPUT
+
+      - name: Create or update common sync PR
+        id: sync-pr
+        if: needs.derive-state.outputs.common_cache_status == 'stale' && steps.sync-common.outputs.synced == 'true'
+        continue-on-error: true
+        env:
+          GH_TOKEN: ${{ steps.app-token.outputs.token || github.token }}
+          COMMONALITIES_RELEASE: ${{ needs.derive-state.outputs.commonalities_release }}
+        run: |
+          cd _api_repo
+
+          SYNC_BRANCH="sync-common/${COMMONALITIES_RELEASE}"
+
+          # Close stale sync PRs (different commonalities release)
+          STALE_PRS=$(gh pr list --state open --json number,headRefName \
+            --jq '.[] | select(.headRefName | startswith("sync-common/")) | select(.headRefName != "'"$SYNC_BRANCH"'") | .number' 2>/dev/null || true)
+          for pr_num in $STALE_PRS; do
+            echo "Closing stale sync PR #$pr_num"
+            gh pr close "$pr_num" --comment "Superseded by sync to Commonalities ${COMMONALITIES_RELEASE}" || true
+          done
+
+          git config user.name "${{ steps.sync-bot-identity.outputs.bot_name }}"
+          git config user.email "${{ steps.sync-bot-identity.outputs.bot_email }}"
+
+          # Check if branch already exists on remote
+          if git ls-remote --heads origin "$SYNC_BRANCH" | grep -q .; then
+            # Existing branch — update it
+            git fetch origin "$SYNC_BRANCH"
+            git checkout "$SYNC_BRANCH"
+            git add code/common/
+            if git diff --cached --quiet; then
+              echo "No changes to commit on existing sync branch"
+              EXISTING_PR=$(gh pr list --head "$SYNC_BRANCH" --state open --json url --jq '.[0].url' 2>/dev/null || true)
+              echo "sync_pr_url=$EXISTING_PR" >> $GITHUB_OUTPUT
+              echo "sync_pr_number=" >> $GITHUB_OUTPUT
+              echo "sync_status=up_to_date" >> $GITHUB_OUTPUT
+              exit 0
+            fi
+            git commit -m "chore: update common file cache for Commonalities ${COMMONALITIES_RELEASE}"
+            git push origin "$SYNC_BRANCH"
+            EXISTING_PR=$(gh pr list --head "$SYNC_BRANCH" --state open --json url --jq '.[0].url' 2>/dev/null || true)
+            echo "sync_pr_url=$EXISTING_PR" >> $GITHUB_OUTPUT
+            echo "sync_status=updated" >> $GITHUB_OUTPUT
+          else
+            # New branch
+            git checkout -b "$SYNC_BRANCH"
+            git add code/common/
+            if git diff --cached --quiet; then
+              echo "No changes to commit"
+              echo "sync_status=no_changes" >> $GITHUB_OUTPUT
+              exit 0
+            fi
+            git commit -m "chore: sync common files from Commonalities ${COMMONALITIES_RELEASE}"
+            git push origin "$SYNC_BRANCH"
+
+            # Create PR
+            PR_URL=$(gh pr create \
+              --title "Sync common files from Commonalities ${COMMONALITIES_RELEASE}" \
+              --body "Automated sync of \`code/common/\` files from \`camaraproject/Commonalities\` at release tag \`${COMMONALITIES_RELEASE}\`.
+
+          This PR was created by release automation because the cached Commonalities files were out of sync with the declared dependency in \`release-plan.yaml\`.
+
+          **What changed:**
+          - Updated files in \`code/common/\` to match Commonalities ${COMMONALITIES_RELEASE}
+          - Added/updated \`.sync-manifest.yaml\` with file integrity hashes
+
+          **Action required:** Review and merge this PR. The \`/create-snapshot\` command will be blocked until common files are in sync." \
+              --head "$SYNC_BRANCH" \
+              --base main) || {
+              echo "::error::Failed to create sync PR"
+              echo "sync_status=failed" >> $GITHUB_OUTPUT
+              exit 0
+            }
+
+            PR_NUMBER=$(echo "$PR_URL" | grep -oE '[0-9]+$')
+            gh pr edit "$PR_NUMBER" --add-label "common-sync" --add-label "automated" 2>/dev/null || true
+
+            echo "sync_pr_url=$PR_URL" >> $GITHUB_OUTPUT
+            echo "sync_pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT
+            echo "sync_status=created" >> $GITHUB_OUTPUT
+            echo "Sync PR created: $PR_URL"
+          fi
+
+      # ── End common file cache sync ────────────────────────────────────────
 
       - name: Resolve Sync Context
         id: sync-context
@@ -2018,6 +2353,9 @@ jobs:
           force_update: ${{ steps.sync-context.outputs.force_update }}
           trigger_type: ${{ needs.check-trigger.outputs.trigger_type }}
           github_token: ${{ steps.app-token.outputs.token || github.token }}
+          common_cache_status: ${{ needs.derive-state.outputs.common_cache_status }}
+          common_cache_details: ${{ needs.derive-state.outputs.common_cache_details }}
+          common_sync_pr_url: ${{ steps.sync-pr.outputs.sync_pr_url || needs.derive-state.outputs.common_sync_pr_url }}
 
       - name: Log Result
         run: |
@@ -2089,6 +2427,27 @@ jobs:
               "release_plan_url": "${{ github.server_url }}/${{ github.repository }}/blob/main/release-plan.yaml"
             }
 
+      - name: Post Common Cache Stale Warning
+        if: |
+          needs.derive-state.outputs.common_cache_status == 'stale' &&
+          needs.derive-state.outputs.release_issue_number != '' &&
+          (needs.derive-state.outputs.state == 'snapshot-active' ||
+           needs.derive-state.outputs.state == 'draft-ready')
+        uses: ./_tooling/shared-actions/post-bot-comment
+        with:
+          issue_number: ${{ needs.derive-state.outputs.release_issue_number }}
+          release_tag: ${{ needs.derive-state.outputs.release_tag }}
+          run_id: ${{ github.run_id }}
+          template: common_cache_stale_warning
+          base_context: ${{ needs.assemble-context.outputs.base_context }}
+          github_token: ${{ steps.app-token.outputs.token || github.token }}
+          context: |
+            {
+              "common_cache_status": "${{ needs.derive-state.outputs.common_cache_status }}",
+              "common_cache_details": "${{ needs.derive-state.outputs.common_cache_details }}",
+              "common_sync_pr_url": "${{ steps.sync-pr.outputs.sync_pr_url || needs.derive-state.outputs.common_sync_pr_url }}"
+            }
+
   # ─────────────────────────────────────────────────────────────────────────────
   # Phase 7: Post final result comment
   # ─────────────────────────────────────────────────────────────────────────────
diff --git a/release_automation/scripts/bot_context.py b/release_automation/scripts/bot_context.py
index e081b6b1..849afe13 100644
--- a/release_automation/scripts/bot_context.py
+++ b/release_automation/scripts/bot_context.py
@@ -96,6 +96,15 @@ class BotContext:
     has_sync_pr: bool = False
     has_publish_warnings: bool = False
 
+    # Common file cache sync fields
+    common_cache_status: str = ""      # "stale", "in_sync", or "" (unchecked)
+    common_cache_details: str = ""     # Human-readable staleness description
+    common_sync_pr_url: str = ""       # URL of open sync-common/* PR
+
+    # Derived cache sync flags (set by derive_flags())
+    common_cache_stale: bool = False
+    has_common_sync_pr: bool = False
+
     def derive_flags(self) -> None:
         """Compute boolean flags and derived fields from string fields."""
         self.is_missing_file = self.error_type == "missing_file"
@@ -111,6 +120,8 @@ def derive_flags(self) -> None:
         self.has_reason = bool(self.reason)
         self.has_sync_pr = bool(self.sync_pr_url)
         self.has_publish_warnings = bool(self.publish_warnings)
+        self.common_cache_stale = self.common_cache_status == "stale"
+        self.has_common_sync_pr = bool(self.common_sync_pr_url)
         if not self.short_type:
             self.short_type = config.SHORT_TYPE_MAP.get(
                 self.release_type, self.release_type
@@ -189,4 +200,10 @@ def to_dict(self) -> Dict[str, Any]:
             "publish_warnings": self.publish_warnings,
             "has_sync_pr": self.has_sync_pr,
             "has_publish_warnings": self.has_publish_warnings,
+            # Common file cache sync fields
+            "common_cache_status": self.common_cache_status,
+            "common_cache_details": self.common_cache_details,
+            "common_sync_pr_url": self.common_sync_pr_url,
+            "common_cache_stale": self.common_cache_stale,
+            "has_common_sync_pr": self.has_common_sync_pr,
         }
diff --git a/release_automation/scripts/issue_manager.py b/release_automation/scripts/issue_manager.py
index 57ed5751..ee4e9c76 100644
--- a/release_automation/scripts/issue_manager.py
+++ b/release_automation/scripts/issue_manager.py
@@ -229,7 +229,10 @@ def generate_config_section(
         release_plan: Dict[str, Any],
         api_versions: Dict[str, str],
         commonalities_release: str = "",
-        icm_release: str = ""
+        icm_release: str = "",
+        common_cache_status: str = "",
+        common_cache_details: str = "",
+        common_sync_pr_url: str = "",
     ) -> str:
         """
         Generate content for the CONFIG section.
@@ -237,12 +240,16 @@ def generate_config_section(
         Displays release configuration including:
         - APIs table with target and calculated versions
         - Dependencies (Commonalities, ICM)
+        - Common file cache staleness warning if applicable
 
         Args:
             release_plan: Parsed release-plan.yaml content
             api_versions: Dict mapping API name to calculated version
             commonalities_release: Required Commonalities version
             icm_release: Required ICM version
+            common_cache_status: "stale", "in_sync", or "" (unchecked)
+            common_cache_details: Human-readable staleness description
+            common_sync_pr_url: URL of open sync-common PR if any
 
         Returns:
             Formatted config section content
@@ -284,6 +291,21 @@ def generate_config_section(
         elif not apis:
             lines.append("_No APIs or dependencies configured_")
 
+        # Add common file cache staleness warning
+        if common_cache_status == "stale":
+            lines.append("")
+            detail = f" \u2014 {common_cache_details}" if common_cache_details else ""
+            if common_sync_pr_url:
+                lines.append(
+                    f"\u26a0\ufe0f **Common file cache stale**{detail}. "
+                    f"[Sync PR]({common_sync_pr_url}) pending."
+                )
+            else:
+                lines.append(
+                    f"\u26a0\ufe0f **Common file cache stale**{detail}. "
+                    f"Run `workflow_dispatch` to trigger sync."
+                )
+
         return "\n".join(lines)
 
     def generate_issue_body_template(
diff --git a/release_automation/scripts/issue_sync.py b/release_automation/scripts/issue_sync.py
index 114224a6..f42686aa 100644
--- a/release_automation/scripts/issue_sync.py
+++ b/release_automation/scripts/issue_sync.py
@@ -117,6 +117,9 @@ def sync_release_issue(
         release_pr_number_override: Optional[str] = None,
         draft_release_url_override: Optional[str] = None,
         force_update: bool = False,
+        common_cache_status: str = "",
+        common_cache_details: str = "",
+        common_sync_pr_url: str = "",
     ) -> SyncResult:
         """
         Ensure Release Issue exists and reflects current state.
@@ -189,6 +192,9 @@ def _post_create():
                         snapshot_branch_override=snapshot_branch_override,
                         release_pr_number_override=release_pr_number_override,
                         draft_release_url_override=draft_release_url_override,
+                        common_cache_status=common_cache_status,
+                        common_cache_details=common_cache_details,
+                        common_sync_pr_url=common_sync_pr_url,
                     )
                     return self.gh.get_issue(new_issue["number"])
                 updated_issue = self.gh.retry_on_not_found(_post_create)
@@ -205,6 +211,9 @@ def _post_create():
                 snapshot_branch_override=snapshot_branch_override,
                 release_pr_number_override=release_pr_number_override,
                 draft_release_url_override=draft_release_url_override,
+                common_cache_status=common_cache_status,
+                common_cache_details=common_cache_details,
+                common_sync_pr_url=common_sync_pr_url,
             )
             # Refetch issue after update
             updated_issue = self.gh.get_issue(issue["number"])
@@ -356,6 +365,9 @@ def _update_release_issue(
         snapshot_branch_override: Optional[str] = None,
         release_pr_number_override: Optional[str] = None,
         draft_release_url_override: Optional[str] = None,
+        common_cache_status: str = "",
+        common_cache_details: str = "",
+        common_sync_pr_url: str = "",
     ) -> None:
         """
         Update an existing Release Issue to match current state.
@@ -467,7 +479,10 @@ def _update_release_issue(
             release_plan=release_plan,
             api_versions=api_versions,
             commonalities_release=commonalities_release,
-            icm_release=icm_release
+            icm_release=icm_release,
+            common_cache_status=common_cache_status,
+            common_cache_details=common_cache_details,
+            common_sync_pr_url=common_sync_pr_url,
         )
         updated_body = self.issue_manager.update_section(
             updated_body, "CONFIG", new_config_content
diff --git a/release_automation/scripts/workflow_context.py b/release_automation/scripts/workflow_context.py
index 53f05d9a..3366b916 100644
--- a/release_automation/scripts/workflow_context.py
+++ b/release_automation/scripts/workflow_context.py
@@ -69,6 +69,10 @@ def parse_json_list(json_str):
         "commonalities_release": os.environ.get("CTX_COMMONALITIES_RELEASE", ""),
         "identity_consent_management_release": os.environ.get("CTX_IDENTITY_CONSENT_MANAGEMENT_RELEASE", ""),
 
+        # Common file cache sync
+        "common_cache_status": os.environ.get("CTX_COMMON_CACHE_STATUS", ""),
+        "common_cache_details": os.environ.get("CTX_COMMON_CACHE_DETAILS", ""),
+
         # Lists
         "apis": parse_json_list(os.environ.get("CTX_APIS_JSON", "[]")),
         
diff --git a/release_automation/templates/bot_messages/common_cache_stale_warning.md b/release_automation/templates/bot_messages/common_cache_stale_warning.md
new file mode 100644
index 00000000..5ba0a3b6
--- /dev/null
+++ b/release_automation/templates/bot_messages/common_cache_stale_warning.md
@@ -0,0 +1,10 @@
+**⚠️ Common file cache stale — State: `{{state}}`**
+{{common_cache_details}}
+
+The cached Commonalities files in `code/common/` do not match `{{commonalities_release}}` declared in `release-plan.yaml`.
+
+{{#has_common_sync_pr}}[Sync PR]({{common_sync_pr_url}}) is pending — merge it when ready to update the common files on main.{{/has_common_sync_pr}}
+{{^has_common_sync_pr}}Run `workflow_dispatch` to trigger sync, or `/discard-snapshot` to return to planned state.{{/has_common_sync_pr}}
+
+{{#state_snapshot_active}}_The active snapshot uses the common files from when it was created. To pick up updated files, discard the snapshot and create a new one after merging the sync PR._{{/state_snapshot_active}}
+{{#state_draft_ready}}_The draft release uses the common files from when the snapshot was created. Consider whether the common files need updating before publishing._{{/state_draft_ready}}
diff --git a/release_automation/tests/test_bot_context.py b/release_automation/tests/test_bot_context.py
index f8446c8c..41c94fde 100644
--- a/release_automation/tests/test_bot_context.py
+++ b/release_automation/tests/test_bot_context.py
@@ -70,6 +70,39 @@ def test_default_values(self):
         assert ctx.has_sync_pr is False
         assert ctx.has_publish_warnings is False
 
+        # Cache sync fields
+        assert ctx.common_cache_status == ""
+        assert ctx.common_cache_details == ""
+        assert ctx.common_sync_pr_url == ""
+        assert ctx.common_cache_stale is False
+        assert ctx.has_common_sync_pr is False
+
+    def test_derive_flags_common_cache_stale(self):
+        """common_cache_status 'stale' sets common_cache_stale flag."""
+        ctx = BotContext(common_cache_status="stale")
+        ctx.derive_flags()
+
+        assert ctx.common_cache_stale is True
+        assert ctx.has_common_sync_pr is False
+
+    def test_derive_flags_common_cache_in_sync(self):
+        """common_cache_status 'in_sync' does not set stale flag."""
+        ctx = BotContext(common_cache_status="in_sync")
+        ctx.derive_flags()
+
+        assert ctx.common_cache_stale is False
+
+    def test_derive_flags_common_sync_pr(self):
+        """common_sync_pr_url sets has_common_sync_pr flag."""
+        ctx = BotContext(
+            common_cache_status="stale",
+            common_sync_pr_url="https://github.com/org/repo/pull/42",
+        )
+        ctx.derive_flags()
+
+        assert ctx.common_cache_stale is True
+        assert ctx.has_common_sync_pr is True
+
     def test_derive_flags_missing_file(self):
         """error_type 'missing_file' sets is_missing_file flag."""
         ctx = BotContext(error_type="missing_file")
@@ -178,6 +211,9 @@ def test_to_dict_returns_all_keys(self):
             "sync_pr_number", "sync_pr_url",
             "src_commit_sha_short", "confirm_tag",
             "publish_warnings", "has_sync_pr", "has_publish_warnings",
+            # Common cache sync fields
+            "common_cache_status", "common_cache_details", "common_sync_pr_url",
+            "common_cache_stale", "has_common_sync_pr",
         }
         assert set(d.keys()) == expected_keys
 
@@ -257,6 +293,9 @@ def test_returns_complete_dict(self):
             "sync_pr_number", "sync_pr_url",
             "src_commit_sha_short", "confirm_tag",
             "publish_warnings", "has_sync_pr", "has_publish_warnings",
+            # Common cache sync fields
+            "common_cache_status", "common_cache_details", "common_sync_pr_url",
+            "common_cache_stale", "has_common_sync_pr",
         }
         assert set(result.keys()) == expected_keys
 
diff --git a/release_automation/tests/test_issue_manager.py b/release_automation/tests/test_issue_manager.py
index f869b01f..5145b2a0 100644
--- a/release_automation/tests/test_issue_manager.py
+++ b/release_automation/tests/test_issue_manager.py
@@ -301,6 +301,83 @@ def test_generate_config_without_apis(self):
 
         assert "_No APIs or dependencies configured_" in content
 
+    def test_generate_config_with_stale_cache_and_sync_pr(self):
+        """Stale cache with sync PR shows warning with link."""
+        manager = IssueManager()
+
+        release_plan = {
+            "repository": {"target_release_type": "public-release"},
+            "apis": [{"api_name": "test-api", "target_api_version": "1.0.0", "target_api_status": "public"}],
+            "dependencies": {"commonalities_release": "r4.2"},
+        }
+
+        content = manager.generate_config_section(
+            release_plan, {"test-api": "1.0.0"},
+            commonalities_release="r4.2",
+            common_cache_status="stale",
+            common_cache_details="Commonalities: expected r4.2, manifest has r4.1",
+            common_sync_pr_url="https://github.com/org/repo/pull/42",
+        )
+
+        assert "\u26a0\ufe0f **Common file cache stale**" in content
+        assert "expected r4.2, manifest has r4.1" in content
+        assert "[Sync PR](https://github.com/org/repo/pull/42)" in content
+
+    def test_generate_config_with_stale_cache_no_pr(self):
+        """Stale cache without sync PR shows dispatch guidance."""
+        manager = IssueManager()
+
+        release_plan = {
+            "repository": {"target_release_type": "public-release"},
+            "apis": [{"api_name": "test-api", "target_api_version": "1.0.0"}],
+        }
+
+        content = manager.generate_config_section(
+            release_plan, {},
+            commonalities_release="r4.2",
+            common_cache_status="stale",
+            common_cache_details="CAMARA_common.yaml modified since last sync",
+        )
+
+        assert "\u26a0\ufe0f **Common file cache stale**" in content
+        assert "modified since last sync" in content
+        assert "workflow_dispatch" in content
+
+    def test_generate_config_in_sync_no_warning(self):
+        """In-sync cache shows no warning."""
+        manager = IssueManager()
+
+        release_plan = {
+            "repository": {"target_release_type": "public-release"},
+            "apis": [{"api_name": "test-api", "target_api_version": "1.0.0"}],
+        }
+
+        content = manager.generate_config_section(
+            release_plan, {},
+            commonalities_release="r4.2",
+            common_cache_status="in_sync",
+        )
+
+        assert "\u26a0\ufe0f" not in content
+        assert "stale" not in content
+
+    def test_generate_config_unchecked_cache_no_warning(self):
+        """Empty cache status (unchecked/legacy) shows no warning."""
+        manager = IssueManager()
+
+        release_plan = {
+            "repository": {"target_release_type": "public-release"},
+            "apis": [{"api_name": "test-api", "target_api_version": "1.0.0"}],
+        }
+
+        content = manager.generate_config_section(
+            release_plan, {},
+            commonalities_release="r3.4",
+            common_cache_status="",
+        )
+
+        assert "\u26a0\ufe0f" not in content
+
 
 class TestIssueManagerGenerateIssueBodyTemplate:
     """Tests for generate_issue_body_template method."""
diff --git a/release_automation/tests/test_template_context_contract.py b/release_automation/tests/test_template_context_contract.py
index 54865867..785b2009 100644
--- a/release_automation/tests/test_template_context_contract.py
+++ b/release_automation/tests/test_template_context_contract.py
@@ -15,6 +15,7 @@
 
 KNOWN_TEMPLATES = [
     "command_rejected",
+    "common_cache_stale_warning",
     "config_drift_warning",
     "config_error",
     "draft_created",
@@ -116,10 +117,10 @@ def test_all_known_templates_exist(self, responder):
             )
 
     def test_list_templates_returns_expected_count(self, responder):
-        """list_templates() returns at least 12 templates."""
+        """list_templates() returns at least 13 templates."""
         templates = responder.list_templates()
-        assert len(templates) >= 12, (
-            f"Expected at least 12 templates, got {len(templates)}: {templates}"
+        assert len(templates) >= 13, (
+            f"Expected at least 13 templates, got {len(templates)}: {templates}"
         )
 
     def test_build_context_no_none_values(self):
diff --git a/shared-actions/sync-release-issue/action.yml b/shared-actions/sync-release-issue/action.yml
index fbec2461..13f797f1 100644
--- a/shared-actions/sync-release-issue/action.yml
+++ b/shared-actions/sync-release-issue/action.yml
@@ -49,6 +49,18 @@ inputs:
   github_token:
     description: 'GitHub token with issue write permissions'
     required: true
+  common_cache_status:
+    description: 'Common cache sync status from derive-state ("stale", "in_sync", or "")'
+    required: false
+    default: ''
+  common_cache_details:
+    description: 'Human-readable cache staleness description'
+    required: false
+    default: ''
+  common_sync_pr_url:
+    description: 'URL of open sync-common PR if any'
+    required: false
+    default: ''
 
 outputs:
   issue_number:
@@ -88,6 +100,9 @@ runs:
         DRAFT_RELEASE_URL_OVERRIDE: ${{ inputs.draft_release_url_override }}
         FORCE_UPDATE: ${{ inputs.force_update }}
         TRIGGER_TYPE: ${{ inputs.trigger_type }}
+        COMMON_CACHE_STATUS: ${{ inputs.common_cache_status }}
+        COMMON_CACHE_DETAILS: ${{ inputs.common_cache_details }}
+        COMMON_SYNC_PR_URL: ${{ inputs.common_sync_pr_url }}
         REPO: ${{ github.repository }}
         SCRIPTS_PATH: ${{ github.action_path }}/../../release_automation/scripts
       run: |
@@ -117,6 +132,9 @@ runs:
         draft_release_url_override = os.environ.get('DRAFT_RELEASE_URL_OVERRIDE', '').strip()
         force_update = os.environ.get('FORCE_UPDATE', 'false').strip().lower() == 'true'
         trigger_type = os.environ.get('TRIGGER_TYPE', 'workflow_dispatch')
+        common_cache_status = os.environ.get('COMMON_CACHE_STATUS', '').strip()
+        common_cache_details = os.environ.get('COMMON_CACHE_DETAILS', '').strip()
+        common_sync_pr_url = os.environ.get('COMMON_SYNC_PR_URL', '').strip()
 
         output_file = os.environ['GITHUB_OUTPUT']
 
@@ -234,6 +252,9 @@ runs:
                 release_pr_number_override=release_pr_number_override or None,
                 draft_release_url_override=draft_release_url_override or None,
                 force_update=force_update,
+                common_cache_status=common_cache_status,
+                common_cache_details=common_cache_details,
+                common_sync_pr_url=common_sync_pr_url,
             )
 
             # Extract results

From dcfaed89f58ff25d638747da636f7743ce5aaccc Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 11:11:16 +0200
Subject: [PATCH 092/157] refactor: use sparse checkout instead of gh api for
 Commonalities fetch

Replace per-file GitHub Contents API calls with a sparse checkout of
camaraproject/Commonalities at the declared tag. Simpler, no base64
decoding, no API rate limit concern, no file size limit.
---
 .../workflows/release-automation-reusable.yml | 78 +++++++------------
 1 file changed, 29 insertions(+), 49 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 1aa2d1e4..8dd285bd 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -2101,82 +2101,62 @@ jobs:
           fetch-depth: 1
           token: ${{ steps.app-token.outputs.token || github.token }}
 
-      - name: Fetch Commonalities files and write manifest
+      - name: Checkout Commonalities at declared tag (sparse)
+        if: needs.derive-state.outputs.common_cache_status == 'stale'
+        uses: actions/checkout@v6
+        with:
+          repository: camaraproject/Commonalities
+          ref: ${{ needs.derive-state.outputs.commonalities_release }}
+          path: _commonalities
+          sparse-checkout: artifacts/common
+          sparse-checkout-cone-mode: true
+
+      - name: Copy Commonalities files and write manifest
         id: sync-common
         if: needs.derive-state.outputs.common_cache_status == 'stale'
         continue-on-error: true
         env:
-          GH_TOKEN: ${{ steps.app-token.outputs.token || github.token }}
           COMMONALITIES_RELEASE: ${{ needs.derive-state.outputs.commonalities_release }}
           TOOLING_PATH: ${{ github.workspace }}/_tooling
         shell: python
         run: |
-          import base64, hashlib, json, os, subprocess, sys
+          import os, shutil, sys
           from pathlib import Path
 
           sys.path.insert(0, os.environ['TOOLING_PATH'])
 
           comm_release = os.environ['COMMONALITIES_RELEASE']
+          source_dir = Path('_commonalities/artifacts/common')
           api_repo = Path('_api_repo')
           common_dir = api_repo / 'code' / 'common'
           common_dir.mkdir(parents=True, exist_ok=True)
           output_file = os.environ['GITHUB_OUTPUT']
 
-          source_repo = 'camaraproject/Commonalities'
-          source_path = 'artifacts/common'
-
-          # List files in Commonalities artifacts/common at tag
-          try:
-              raw = subprocess.check_output([
-                  'gh', 'api',
-                  f'repos/{source_repo}/contents/{source_path}',
-                  '-q', '.[].name',
-                  '-H', 'Accept: application/vnd.github+json',
-                  '--paginate',
-              ], env={**os.environ, 'GH_TOKEN': os.environ['GH_TOKEN']},
-                  text=True, stderr=subprocess.PIPE)
-              files = [f.strip() for f in raw.strip().split('\n') if f.strip() and f.strip().endswith('.yaml')]
-          except subprocess.CalledProcessError as e:
-              print(f"::error::Could not list files from {source_repo}/{source_path} at {comm_release}: {e.stderr}")
+          if not source_dir.is_dir():
+              print(f"::error::Commonalities artifacts/common not found at tag {comm_release}")
               with open(output_file, 'a') as f:
                   f.write("synced=false\n")
               sys.exit(0)
 
-          if not files:
-              print(f"::error::No YAML files found in {source_repo}/{source_path} at {comm_release}")
-              with open(output_file, 'a') as f:
-                  f.write("synced=false\n")
-              sys.exit(0)
-
-          # Download each file and compute git blob SHA-1
+          # Copy YAML files from Commonalities checkout to code/common/
           from tooling_lib.cache_sync import git_blob_sha
 
-          manifest_files = {}
-          for filename in files:
-              try:
-                  content_b64 = subprocess.check_output([
-                      'gh', 'api',
-                      f'repos/{source_repo}/contents/{source_path}/{filename}?ref={comm_release}',
-                      '-q', '.content',
-                  ], env={**os.environ, 'GH_TOKEN': os.environ['GH_TOKEN']},
-                      text=True, stderr=subprocess.PIPE).strip()
-
-                  raw_bytes = base64.b64decode(content_b64)
-                  dest = common_dir / filename
-                  dest.write_bytes(raw_bytes)
-
-                  sha = git_blob_sha(raw_bytes)
-                  manifest_files[filename] = sha
-                  print(f"Synced: {filename} ({sha[:12]})")
-              except Exception as e:
-                  print(f"::warning::Failed to sync {filename}: {e}")
-
-          if not manifest_files:
-              print("::error::No files synced successfully")
+          source_files = sorted(p for p in source_dir.iterdir() if p.suffix == '.yaml')
+          if not source_files:
+              print(f"::error::No YAML files in Commonalities artifacts/common at {comm_release}")
               with open(output_file, 'a') as f:
                   f.write("synced=false\n")
               sys.exit(0)
 
+          manifest_files = {}
+          for src_path in source_files:
+              dest = common_dir / src_path.name
+              shutil.copy2(src_path, dest)
+              raw_bytes = dest.read_bytes()
+              sha = git_blob_sha(raw_bytes)
+              manifest_files[src_path.name] = sha
+              print(f"Synced: {src_path.name} ({sha[:12]})")
+
           # Write .sync-manifest.yaml per sync-manifest-schema.yaml
           import yaml
 
@@ -2192,7 +2172,7 @@ jobs:
               yaml.dump(manifest, default_flow_style=False, sort_keys=False),
               encoding='utf-8'
           )
-          print(f"Wrote manifest: {manifest_path}")
+          print(f"Wrote manifest: {manifest_path} ({len(manifest_files)} files)")
 
           with open(output_file, 'a') as f:
               f.write("synced=true\n")

From 0273428d1a3acf8832fe019d63ec5f62b1d11384 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 12:19:55 +0200
Subject: [PATCH 093/157] fix: replace staleness warning with sync PR created
 notification

Replace the generic "cache stale" bot comment (posted on every run when
stale) with a "sync PR created" notification that fires only when the
sync steps actually create a new PR. The PR URL is always available at
this point (posted after the sync steps complete).

Also fix gh pr list --head prefix search (use --json + jq startswith
instead of exact match) and remove the redundant bot template.

The config section warning in the Release Issue body continues to show
staleness status on every sync-issue update.
---
 .../workflows/release-automation-reusable.yml  | 18 ++++++++----------
 .../bot_messages/common_cache_stale_warning.md | 10 ----------
 .../bot_messages/common_sync_pr_created.md     |  6 ++++++
 .../tests/test_template_context_contract.py    |  2 +-
 4 files changed, 15 insertions(+), 21 deletions(-)
 delete mode 100644 release_automation/templates/bot_messages/common_cache_stale_warning.md
 create mode 100644 release_automation/templates/bot_messages/common_sync_pr_created.md

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 8dd285bd..b5e8fc44 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -492,7 +492,8 @@ jobs:
           GH_TOKEN: ${{ github.token }}
         shell: bash
         run: |
-          SYNC_PR_URL=$(gh pr list --head "sync-common/" --state open --json url --jq '.[0].url' 2>/dev/null || true)
+          SYNC_PR_URL=$(gh pr list --state open --json headRefName,url \
+            --jq '.[] | select(.headRefName | startswith("sync-common/")) | .url' 2>/dev/null | head -1 || true)
           echo "common_sync_pr_url=$SYNC_PR_URL" >> "$GITHUB_OUTPUT"
           if [ -n "$SYNC_PR_URL" ]; then
             echo "Found existing sync PR: $SYNC_PR_URL"
@@ -2407,25 +2408,22 @@ jobs:
               "release_plan_url": "${{ github.server_url }}/${{ github.repository }}/blob/main/release-plan.yaml"
             }
 
-      - name: Post Common Cache Stale Warning
+      - name: Post Common Sync PR Created
         if: |
-          needs.derive-state.outputs.common_cache_status == 'stale' &&
-          needs.derive-state.outputs.release_issue_number != '' &&
-          (needs.derive-state.outputs.state == 'snapshot-active' ||
-           needs.derive-state.outputs.state == 'draft-ready')
+          steps.sync-pr.outputs.sync_status == 'created' &&
+          (needs.derive-state.outputs.release_issue_number != '' || steps.sync.outputs.issue_number != '')
         uses: ./_tooling/shared-actions/post-bot-comment
         with:
-          issue_number: ${{ needs.derive-state.outputs.release_issue_number }}
+          issue_number: ${{ needs.derive-state.outputs.release_issue_number || steps.sync.outputs.issue_number }}
           release_tag: ${{ needs.derive-state.outputs.release_tag }}
           run_id: ${{ github.run_id }}
-          template: common_cache_stale_warning
+          template: common_sync_pr_created
           base_context: ${{ needs.assemble-context.outputs.base_context }}
           github_token: ${{ steps.app-token.outputs.token || github.token }}
           context: |
             {
-              "common_cache_status": "${{ needs.derive-state.outputs.common_cache_status }}",
               "common_cache_details": "${{ needs.derive-state.outputs.common_cache_details }}",
-              "common_sync_pr_url": "${{ steps.sync-pr.outputs.sync_pr_url || needs.derive-state.outputs.common_sync_pr_url }}"
+              "common_sync_pr_url": "${{ steps.sync-pr.outputs.sync_pr_url }}"
             }
 
   # ─────────────────────────────────────────────────────────────────────────────
diff --git a/release_automation/templates/bot_messages/common_cache_stale_warning.md b/release_automation/templates/bot_messages/common_cache_stale_warning.md
deleted file mode 100644
index 5ba0a3b6..00000000
--- a/release_automation/templates/bot_messages/common_cache_stale_warning.md
+++ /dev/null
@@ -1,10 +0,0 @@
-**⚠️ Common file cache stale — State: `{{state}}`**
-{{common_cache_details}}
-
-The cached Commonalities files in `code/common/` do not match `{{commonalities_release}}` declared in `release-plan.yaml`.
-
-{{#has_common_sync_pr}}[Sync PR]({{common_sync_pr_url}}) is pending — merge it when ready to update the common files on main.{{/has_common_sync_pr}}
-{{^has_common_sync_pr}}Run `workflow_dispatch` to trigger sync, or `/discard-snapshot` to return to planned state.{{/has_common_sync_pr}}
-
-{{#state_snapshot_active}}_The active snapshot uses the common files from when it was created. To pick up updated files, discard the snapshot and create a new one after merging the sync PR._{{/state_snapshot_active}}
-{{#state_draft_ready}}_The draft release uses the common files from when the snapshot was created. Consider whether the common files need updating before publishing._{{/state_draft_ready}}
diff --git a/release_automation/templates/bot_messages/common_sync_pr_created.md b/release_automation/templates/bot_messages/common_sync_pr_created.md
new file mode 100644
index 00000000..01538613
--- /dev/null
+++ b/release_automation/templates/bot_messages/common_sync_pr_created.md
@@ -0,0 +1,6 @@
+**Common file sync: [sync PR]({{common_sync_pr_url}}) created**
+{{common_cache_details}}
+
+The cached Commonalities files in `code/common/` are out of sync with `{{commonalities_release}}` declared in `release-plan.yaml`. A sync PR has been created to update them.
+
+**Action required:** Review and merge the sync PR. `/create-snapshot` is blocked until common files are in sync.
diff --git a/release_automation/tests/test_template_context_contract.py b/release_automation/tests/test_template_context_contract.py
index 785b2009..9a83d53f 100644
--- a/release_automation/tests/test_template_context_contract.py
+++ b/release_automation/tests/test_template_context_contract.py
@@ -15,7 +15,7 @@
 
 KNOWN_TEMPLATES = [
     "command_rejected",
-    "common_cache_stale_warning",
+    "common_sync_pr_created",
     "config_drift_warning",
     "config_error",
     "draft_created",

From 52e5bedb27535325b7a8c4d4011792016d5d9ebd Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 12:43:42 +0200
Subject: [PATCH 094/157] feat: add code/common/** push trigger with correct
 classification

Add code/common/** to the caller push path filter so that merging a
sync PR triggers RA to re-derive state (clearing the staleness warning).

Classify push events by changed paths: release-plan.yaml changes get
trigger_type 'release_plan_change' (existing behavior), other pushes
(e.g. code/common/**) get trigger_type 'push'. This prevents the config
drift warning and issue_created template from falsely attributing a
common-file push to a release-plan.yaml change.
---
 .github/workflows/release-automation-reusable.yml        | 9 +++++++--
 .../workflows/release-automation-caller.yml              | 4 ++--
 2 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index b5e8fc44..bbeb324a 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -199,10 +199,15 @@ jobs:
             }
 
             // ─────────────────────────────────────────────────────────────────
-            // Event: push (release-plan.yaml change on main)
+            // Event: push (release-plan.yaml or code/common changes on main)
             // ─────────────────────────────────────────────────────────────────
             else if (eventName === 'push') {
-              triggerType = 'release_plan_change';
+              // Classify by which paths changed
+              const changedFiles = (context.payload.commits || [])
+                .flatMap(c => [...(c.added || []), ...(c.modified || []), ...(c.removed || [])]);
+              const hasReleasePlanChange = changedFiles.includes('release-plan.yaml');
+
+              triggerType = hasReleasePlanChange ? 'release_plan_change' : 'push';
               command = 'sync-issue';
               user = context.actor;
               shouldContinue = 'true';
diff --git a/release_automation/workflows/release-automation-caller.yml b/release_automation/workflows/release-automation-caller.yml
index 88cf724f..b3d7fdd0 100644
--- a/release_automation/workflows/release-automation-caller.yml
+++ b/release_automation/workflows/release-automation-caller.yml
@@ -29,10 +29,10 @@ on:
     branches:
       - 'release-snapshot/**'
 
-  # Push to main with release-plan.yaml changes (auto sync-issue)
+  # Push to main with release-plan.yaml or common file changes (auto sync-issue)
   push:
     branches: [main]
-    paths: ['release-plan.yaml']
+    paths: ['release-plan.yaml', 'code/common/**']
 
   # Manual trigger for sync-issue only
   # Use this for: initial setup, recovery after manual repo changes, or forced sync

From 545d3d4404f8d358ab8d0d2e732c275e33898402 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 13:17:43 +0200
Subject: [PATCH 095/157] fix: update Tier 3 fallback ref from
 validation-framework to v1-rc

Fork PRs cannot obtain OIDC tokens, so the resolve-ref step falls
through to the Tier 3 fallback which still pointed at the
validation-framework branch instead of v1-rc.
---
 .github/workflows/validation.yml | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 2121db5e..6959fbbc 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -117,14 +117,10 @@ jobs:
             }
 
             // Tier 3: Hardcoded fallback tag
-            // ┌──────────────────────────────────────────────────────────┐
-            // │ PRE-RC: branch ref — replace with v1-rc tag after tagging │
-            // │   repo: camaraproject/tooling  ref: validation-framework│
-            // └──────────────────────────────────────────────────────────┘
             core.setOutput('tooling_checkout_repo', 'camaraproject/tooling');
-            core.setOutput('tooling_checkout_ref', 'validation-framework');
+            core.setOutput('tooling_checkout_ref', 'v1-rc');
             core.setOutput('tooling_ref_source', 'fallback_tag');
-            core.info('Tooling ref: fallback camaraproject/tooling@validation-framework');
+            core.info('Tooling ref: fallback camaraproject/tooling@v1-rc');
 
       # ── Step 3: Checkout tooling (sparse) ──────────────────────────
       - name: Checkout tooling

From 5feb1507def3a05acc526a4203e5b7a24392b665 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 17:15:16 +0200
Subject: [PATCH 096/157] =?UTF-8?q?fix:=20broken=20YAML=20output=20?=
 =?UTF-8?q?=E2=80=94=20comment=20merged=20with=20key=20on=20same=20line?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The comment and the common_cache_status output key were on the same
line, making the entire line a YAML comment. The output was never
exposed to downstream jobs, causing all sync steps to silently skip.
---
 .github/workflows/release-automation-reusable.yml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index bbeb324a..7bf9dd49 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -383,7 +383,8 @@ jobs:
       release_plan_url: ${{ steps.state.outputs.release_plan_url }}
       src_commit_sha_short: ${{ steps.state.outputs.src_commit_sha_short }}
       meta_release: ${{ steps.state.outputs.meta_release }}
-      # Common file cache sync outputs      common_cache_status: ${{ steps.cache-sync.outputs.common_cache_status }}
+      # Common file cache sync outputs
+      common_cache_status: ${{ steps.cache-sync.outputs.common_cache_status }}
       common_cache_details: ${{ steps.cache-sync.outputs.common_cache_details }}
       common_sync_pr_url: ${{ steps.find-sync-pr.outputs.common_sync_pr_url }}
     steps:

From e665fc6dc524a4f1871f53e1c8878478fb65665c Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 17:51:12 +0200
Subject: [PATCH 097/157] fix: add continue-on-error to sync checkouts + post
 failure comment

Two fixes for the sync steps in update-issue:

1. Add continue-on-error to both checkout steps (API repo +
   Commonalities) so a checkout failure (e.g. tag doesn't exist yet)
   skips the sync gracefully without killing the Release Issue update.

2. Post a "common sync failed" bot comment when the Commonalities
   checkout fails, explaining that the tag may not exist yet and
   guiding the user to retry after the release is published.

Previously, a Commonalities checkout failure cascaded to skip all
remaining steps including Sync Release Issue, and produced no
user-visible error message.
---
 .../workflows/release-automation-reusable.yml | 21 +++++++++++++++++--
 .../bot_messages/common_sync_failed.md        |  6 ++++++
 .../tests/test_template_context_contract.py   |  5 +++--
 3 files changed, 28 insertions(+), 4 deletions(-)
 create mode 100644 release_automation/templates/bot_messages/common_sync_failed.md

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 7bf9dd49..7bfaea96 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -2101,7 +2101,9 @@ jobs:
       # sync failures don't prevent the issue update.
 
       - name: Checkout API repo (for common sync)
+        id: sync-checkout-api
         if: needs.derive-state.outputs.common_cache_status == 'stale'
+        continue-on-error: true
         uses: actions/checkout@v6
         with:
           path: _api_repo
@@ -2109,7 +2111,9 @@ jobs:
           token: ${{ steps.app-token.outputs.token || github.token }}
 
       - name: Checkout Commonalities at declared tag (sparse)
-        if: needs.derive-state.outputs.common_cache_status == 'stale'
+        id: sync-checkout-commonalities
+        if: steps.sync-checkout-api.outcome == 'success'
+        continue-on-error: true
         uses: actions/checkout@v6
         with:
           repository: camaraproject/Commonalities
@@ -2120,7 +2124,7 @@ jobs:
 
       - name: Copy Commonalities files and write manifest
         id: sync-common
-        if: needs.derive-state.outputs.common_cache_status == 'stale'
+        if: steps.sync-checkout-commonalities.outcome == 'success'
         continue-on-error: true
         env:
           COMMONALITIES_RELEASE: ${{ needs.derive-state.outputs.commonalities_release }}
@@ -2432,6 +2436,19 @@ jobs:
               "common_sync_pr_url": "${{ steps.sync-pr.outputs.sync_pr_url }}"
             }
 
+      - name: Post Common Sync Failed
+        if: |
+          steps.sync-checkout-commonalities.outcome == 'failure' &&
+          (needs.derive-state.outputs.release_issue_number != '' || steps.sync.outputs.issue_number != '')
+        uses: ./_tooling/shared-actions/post-bot-comment
+        with:
+          issue_number: ${{ needs.derive-state.outputs.release_issue_number || steps.sync.outputs.issue_number }}
+          release_tag: ${{ needs.derive-state.outputs.release_tag }}
+          run_id: ${{ github.run_id }}
+          template: common_sync_failed
+          base_context: ${{ needs.assemble-context.outputs.base_context }}
+          github_token: ${{ steps.app-token.outputs.token || github.token }}
+
   # ─────────────────────────────────────────────────────────────────────────────
   # Phase 7: Post final result comment
   # ─────────────────────────────────────────────────────────────────────────────
diff --git a/release_automation/templates/bot_messages/common_sync_failed.md b/release_automation/templates/bot_messages/common_sync_failed.md
new file mode 100644
index 00000000..cb64e0ca
--- /dev/null
+++ b/release_automation/templates/bot_messages/common_sync_failed.md
@@ -0,0 +1,6 @@
+**⚠️ Common file sync failed**
+Could not sync `code/common/` files from Commonalities `{{commonalities_release}}`: the release tag may not exist yet.
+
+The `/create-snapshot` command remains blocked until common files are in sync. Once the Commonalities release is published, trigger sync via `workflow_dispatch` or push to `release-plan.yaml`.
+
+{{#workflow_run_url}}[View workflow logs]({{workflow_run_url}}){{/workflow_run_url}}
diff --git a/release_automation/tests/test_template_context_contract.py b/release_automation/tests/test_template_context_contract.py
index 9a83d53f..b2b36fa9 100644
--- a/release_automation/tests/test_template_context_contract.py
+++ b/release_automation/tests/test_template_context_contract.py
@@ -15,6 +15,7 @@
 
 KNOWN_TEMPLATES = [
     "command_rejected",
+    "common_sync_failed",
     "common_sync_pr_created",
     "config_drift_warning",
     "config_error",
@@ -117,10 +118,10 @@ def test_all_known_templates_exist(self, responder):
             )
 
     def test_list_templates_returns_expected_count(self, responder):
-        """list_templates() returns at least 13 templates."""
+        """list_templates() returns at least 14 templates."""
         templates = responder.list_templates()
         assert len(templates) >= 13, (
-            f"Expected at least 13 templates, got {len(templates)}: {templates}"
+            f"Expected at least 14 templates, got {len(templates)}: {templates}"
         )
 
     def test_build_context_no_none_values(self):

From e4945582ab276fa803ec072205cf164bedb839eb Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Fri, 17 Apr 2026 18:05:34 +0200
Subject: [PATCH 098/157] fix: pass empty JSON context to post-bot-comment for
 sync failed

The post-bot-comment action requires a valid JSON string for the context
input. When omitted, the env var is empty and json.loads('') fails with
"Invalid JSON context". Pass '{}' explicitly.
---
 .github/workflows/release-automation-reusable.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 7bfaea96..5a9a85a7 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -2448,6 +2448,7 @@ jobs:
           template: common_sync_failed
           base_context: ${{ needs.assemble-context.outputs.base_context }}
           github_token: ${{ steps.app-token.outputs.token || github.token }}
+          context: '{}'
 
   # ─────────────────────────────────────────────────────────────────────────────
   # Phase 7: Post final result comment

From e1e5f7f1a412d51e543c93366ef6823be9814d81 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sat, 18 Apr 2026 19:47:17 +0200
Subject: [PATCH 099/157] Pin 11 rules via
 regression/r4.1-broken-spec-test-files

Adds P-001, P-002, P-004, P-005 and G-002, G-014, G-016, G-019, G-021,
G-024, G-025 to tested_rules (total_tested 35 -> 46). Branch 8 of the
broken-spec regression roadmap covers the Python and gherkin engines
via a synthetic 'Bad_Name_Api' release-plan entry, a server URL edit on
sample-service.yaml, and surgical gherkin defects in
sample-service-createResource.feature.

P-003 and P-007 were evaluated and excluded: both early-return on
feature branches by design, so regression (always feature-classified)
cannot trigger them. The roadmap note under the plan table is updated
to reflect this.
---
 validation/docs/regression-testing.md |  9 ++++++++-
 validation/rules/rule-inventory.yaml  | 13 ++++++++++++-
 2 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/validation/docs/regression-testing.md b/validation/docs/regression-testing.md
index 44a01bc2..4961faa1 100644
--- a/validation/docs/regression-testing.md
+++ b/validation/docs/regression-testing.md
@@ -327,7 +327,7 @@ theme becomes one `regression/r4.1-broken-spec-` branch.
 | 5 | `regression/r4.1-broken-spec-schema-constraints` | `sample-service.yaml` components (not common files — avoid baseline collision) | S-012, S-017, S-030, S-300, S-303, S-308, S-309, S-310, S-311, S-312 | MEDIUM |
 | 6 | `regression/r4.1-broken-spec-routing` | `sample-service.yaml` — paths, operationIds, HTTP methods, servers | S-002, S-003, S-007, S-008, S-010, S-204, S-214, S-217, S-218, S-220, S-222, S-224, S-225, S-226, S-227, S-301, S-306 | HIGH |
 | 7 | `regression/r4.1-broken-spec-subscriptions` | `sample-service-subscriptions.yaml` + `sample-implicit-events.yaml` — CloudEvent / Protocol / sink / notifications + Python subscription checks | S-032, S-033, S-034, S-035, P-014, P-015, P-016, P-020 | HIGH |
-| 8 (optional) | `regression/r4.1-broken-spec-test-files` | `code/Test_definitions/*.feature` — filename / version / gherkin defects | P-001, P-002, P-003, P-004, P-005, P-007, P-008, selected G-* | LOW |
+| 8 (optional) | `regression/r4.1-broken-spec-test-files` | `release-plan.yaml` synthetic API + `sample-service.yaml` server URL + `sample-service-createResource.feature` gherkin defects | P-001, P-002, P-004, P-005, G-002, G-014, G-016, G-019, G-021, G-024, G-025 | LOW |
 
 Rules **not** covered by any broken-spec branch:
 
@@ -336,6 +336,13 @@ Rules **not** covered by any broken-spec branch:
   pinning — they would double-count.
 - **Un-triggerable via spec edits**: P-009, P-010, P-011, P-012, P-013,
   P-019 (release-plan / PR-context / fixture-dependent).
+- **Branch-type dependent — silent on feature branches**: P-003
+  (`check-info-version-format`) and P-007 (`check-test-file-version`)
+  early-return unless `branch_type` is `main`, `release`, or
+  `maintenance`. Regression branches are feature-named and always
+  classify as `feature`, so these checks cannot be pinned via the
+  broken-spec branch model. Unit tests at the Python check level remain
+  authoritative for them.
 - **Deprecated, OAS-3.1-only, or low-signal**: S-001, S-004, S-015,
   S-205, S-206, S-208, S-209, S-228, S-302, S-304, S-305, S-315, S-317,
   S-319.
diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 0b99c9d3..4635c120 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 35
+  total_tested: 46
   by_engine:
     spectral: 84
     gherkin: 25
@@ -299,6 +299,17 @@ pending_rules:
 # Format: rule_id: [branch, ...]
 
 tested_rules:
+  G-002: [regression/r4.1-broken-spec-test-files]
+  G-014: [regression/r4.1-broken-spec-test-files]
+  G-016: [regression/r4.1-broken-spec-test-files]
+  G-019: [regression/r4.1-broken-spec-test-files]
+  G-021: [regression/r4.1-broken-spec-test-files]
+  G-024: [regression/r4.1-broken-spec-test-files]
+  G-025: [regression/r4.1-broken-spec-test-files]
+  P-001: [regression/r4.1-broken-spec-test-files]
+  P-002: [regression/r4.1-broken-spec-test-files]
+  P-004: [regression/r4.1-broken-spec-test-files]
+  P-005: [regression/r4.1-broken-spec-test-files]
   P-006: [regression/r4.1-main-baseline]
   S-005: [regression/r4.1-broken-spec-yaml-fundamentals]
   S-016: [regression/r4.1-broken-spec-yaml-fundamentals]

From 1d138cf863a05a570bbaa28a8a0a994cfcf8baf0 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 19 Apr 2026 08:01:43 +0200
Subject: [PATCH 100/157] Pin 11 rules via
 regression/r4.1-broken-spec-descriptions

---
 validation/rules/rule-inventory.yaml | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 4635c120..87bc80b5 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 46
+  total_tested: 57
   by_engine:
     spectral: 84
     gherkin: 25
@@ -312,6 +312,11 @@ tested_rules:
   P-005: [regression/r4.1-broken-spec-test-files]
   P-006: [regression/r4.1-main-baseline]
   S-005: [regression/r4.1-broken-spec-yaml-fundamentals]
+  S-006: [regression/r4.1-broken-spec-descriptions]
+  S-009: [regression/r4.1-broken-spec-descriptions]
+  S-011: [regression/r4.1-broken-spec-descriptions]
+  S-013: [regression/r4.1-broken-spec-descriptions]
+  S-014: [regression/r4.1-broken-spec-descriptions]
   S-016: [regression/r4.1-broken-spec-yaml-fundamentals]
   S-018: [regression/r4.1-broken-spec-api-metadata]
   S-019: [regression/r4.1-broken-spec-api-metadata]
@@ -323,10 +328,16 @@ tested_rules:
   S-025: [regression/r4.1-broken-spec-error-handling]
   S-026: [regression/r4.1-broken-spec-error-handling]
   S-027: [regression/r4.1-broken-spec-error-handling]
+  S-028: [regression/r4.1-broken-spec-descriptions]
+  S-029: [regression/r4.1-broken-spec-descriptions]
+  S-031: [regression/r4.1-broken-spec-descriptions]
   S-201: [regression/r4.1-broken-spec-api-metadata]
   S-210: [regression/r4.1-broken-spec-api-metadata]
   S-211: [regression/r4.1-main-baseline]
+  S-215: [regression/r4.1-broken-spec-descriptions]
+  S-216: [regression/r4.1-broken-spec-descriptions]
   S-221: [regression/r4.1-broken-spec-error-handling]
+  S-223: [regression/r4.1-broken-spec-descriptions]
   S-307: [regression/r4.1-broken-spec-error-handling]
   S-313: [regression/r4.1-main-baseline]
   S-314: [regression/r4.1-main-baseline]

From 939c61f88202cde67d6dad37ec2d511346488435 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 19 Apr 2026 09:20:25 +0200
Subject: [PATCH 101/157] Pin 8 rules via
 regression/r4.1-broken-spec-schema-constraints

---
 validation/rules/rule-inventory.yaml | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 87bc80b5..0b4c577c 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 57
+  total_tested: 65
   by_engine:
     spectral: 84
     gherkin: 25
@@ -330,6 +330,7 @@ tested_rules:
   S-027: [regression/r4.1-broken-spec-error-handling]
   S-028: [regression/r4.1-broken-spec-descriptions]
   S-029: [regression/r4.1-broken-spec-descriptions]
+  S-030: [regression/r4.1-broken-spec-schema-constraints]
   S-031: [regression/r4.1-broken-spec-descriptions]
   S-201: [regression/r4.1-broken-spec-api-metadata]
   S-210: [regression/r4.1-broken-spec-api-metadata]
@@ -338,7 +339,14 @@ tested_rules:
   S-216: [regression/r4.1-broken-spec-descriptions]
   S-221: [regression/r4.1-broken-spec-error-handling]
   S-223: [regression/r4.1-broken-spec-descriptions]
+  S-300: [regression/r4.1-broken-spec-schema-constraints]
+  S-303: [regression/r4.1-broken-spec-schema-constraints]
   S-307: [regression/r4.1-broken-spec-error-handling]
+  S-308: [regression/r4.1-broken-spec-schema-constraints]
+  S-309: [regression/r4.1-broken-spec-schema-constraints]
+  S-310: [regression/r4.1-broken-spec-schema-constraints]
+  S-311: [regression/r4.1-broken-spec-schema-constraints]
+  S-312: [regression/r4.1-broken-spec-schema-constraints]
   S-313: [regression/r4.1-main-baseline]
   S-314: [regression/r4.1-main-baseline]
   S-316: [regression/r4.1-main-baseline]

From 308bd32da3314169a296f0ae4626dc4e4ce459b2 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 19 Apr 2026 11:16:59 +0200
Subject: [PATCH 102/157] Pin 11 rules via regression/r4.1-broken-spec-routing

---
 validation/rules/rule-inventory.yaml | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/validation/rules/rule-inventory.yaml b/validation/rules/rule-inventory.yaml
index 0b4c577c..0b031dc1 100644
--- a/validation/rules/rule-inventory.yaml
+++ b/validation/rules/rule-inventory.yaml
@@ -18,7 +18,7 @@ summary:
   total_gap: 0
   total_manual: 25
   total_pending: 0
-  total_tested: 65
+  total_tested: 76
   by_engine:
     spectral: 84
     gherkin: 25
@@ -311,9 +311,14 @@ tested_rules:
   P-004: [regression/r4.1-broken-spec-test-files]
   P-005: [regression/r4.1-broken-spec-test-files]
   P-006: [regression/r4.1-main-baseline]
+  S-002: [regression/r4.1-broken-spec-routing]
+  S-003: [regression/r4.1-broken-spec-routing]
   S-005: [regression/r4.1-broken-spec-yaml-fundamentals]
   S-006: [regression/r4.1-broken-spec-descriptions]
+  S-007: [regression/r4.1-broken-spec-routing]
+  S-008: [regression/r4.1-broken-spec-routing]
   S-009: [regression/r4.1-broken-spec-descriptions]
+  S-010: [regression/r4.1-broken-spec-routing]
   S-011: [regression/r4.1-broken-spec-descriptions]
   S-013: [regression/r4.1-broken-spec-descriptions]
   S-014: [regression/r4.1-broken-spec-descriptions]
@@ -337,10 +342,16 @@ tested_rules:
   S-211: [regression/r4.1-main-baseline]
   S-215: [regression/r4.1-broken-spec-descriptions]
   S-216: [regression/r4.1-broken-spec-descriptions]
+  S-217: [regression/r4.1-broken-spec-routing]
+  S-220: [regression/r4.1-broken-spec-routing]
   S-221: [regression/r4.1-broken-spec-error-handling]
+  S-222: [regression/r4.1-broken-spec-routing]
   S-223: [regression/r4.1-broken-spec-descriptions]
+  S-225: [regression/r4.1-broken-spec-routing]
+  S-227: [regression/r4.1-broken-spec-routing]
   S-300: [regression/r4.1-broken-spec-schema-constraints]
   S-303: [regression/r4.1-broken-spec-schema-constraints]
+  S-306: [regression/r4.1-broken-spec-routing]
   S-307: [regression/r4.1-broken-spec-error-handling]
   S-308: [regression/r4.1-broken-spec-schema-constraints]
   S-309: [regression/r4.1-broken-spec-schema-constraints]

From c7ce136c58c9197ae72132472ef510ce57c60c80 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 19 Apr 2026 16:40:25 +0200
Subject: [PATCH 103/157] docs(release-automation): document common cache sync
 + refresh caller docs

- release_automation/docs/technical-architecture.md: add section 2.13
  Common Cache Sync describing the sync handler embedded in update-issue,
  manifest contract, state x trigger matrix, sync PR model, notifications,
  and the boundary with the validation framework P-021 check.
- release_automation/docs/repository-setup.md: refresh to reflect the
  current setup. Cover both caller workflows (release-automation and
  CAMARA Validation) side-by-side, describe the push-path filter
  (release-plan.yaml, code/common/**, caller self-trigger), and drop
  the pr_validation v0-era Recommended Enhancement and the outdated
  reference-lifecycle table (defer to branching-model.md).
- release_automation/workflows/release-automation-caller.yml: mention
  code/common/** in the header trigger comment and add the caller
  itself to push.paths so future caller updates pick up their own
  trigger on merge (first merge from pre-trigger state still needs
  one manual dispatch, as documented in the update PR body).
- .github/workflows/release-automation-reusable.yml: update the comment
  on the push-trigger classification block to include the caller-self
  path alongside release-plan.yaml and code/common/.
---
 .../workflows/release-automation-reusable.yml |  3 +-
 release_automation/docs/repository-setup.md   | 83 +++++++++----------
 .../docs/technical-architecture.md            | 48 +++++++++++
 .../workflows/release-automation-caller.yml   | 14 +++-
 4 files changed, 100 insertions(+), 48 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 5a9a85a7..d96a3ae9 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -199,7 +199,8 @@ jobs:
             }
 
             // ─────────────────────────────────────────────────────────────────
-            // Event: push (release-plan.yaml or code/common changes on main)
+            // Event: push (release-plan.yaml, code/common, or the caller
+            // workflow itself changed on main)
             // ─────────────────────────────────────────────────────────────────
             else if (eventName === 'push') {
               // Classify by which paths changed
diff --git a/release_automation/docs/repository-setup.md b/release_automation/docs/repository-setup.md
index 8b2191de..d05777e8 100644
--- a/release_automation/docs/repository-setup.md
+++ b/release_automation/docs/repository-setup.md
@@ -1,6 +1,6 @@
 # Repository Setup for Release Automation
 
-**Last Updated**: 2026-03-01
+**Last Updated**: 2026-04-19
 
 ## Overview
 
@@ -8,7 +8,7 @@ API repositories that adopt the CAMARA release automation need specific reposito
 
 This document defines the required configuration for each API repository. It serves as the **specification** that the automated onboarding tooling implements — repository administrators do not need to apply or verify this configuration manually.
 
-**Automated application**: The [release automation onboarding campaign](https://github.com/camaraproject/project-administration/pull/134) in `project-administration` applies the full configuration to API repositories. It includes a campaign workflow (caller workflow, CHANGELOG structure, CODEOWNERS adjustments) and an admin script (repository ruleset). Both support dry-run/plan modes and phased rollout — test repositories first, then volunteering repos, then all.
+**Automated application**: The `campaign-release-automation-onboarding` campaign in [`camaraproject/project-administration`](https://github.com/camaraproject/project-administration) applies the full configuration to API repositories. It installs both the release-automation caller workflow and the CAMARA Validation caller workflow side-by-side, sets up the CHANGELOG directory structure, and uses a stable reconciliation branch so repeated runs update the same PR rather than creating new ones. A separate admin script (`apply-release-rulesets.sh`) applies the repository rulesets. Both support dry-run / plan modes and phased rollout — test repositories first, then volunteering repos, then all.
 
 **New repositories**: After rollout, the configuration will also be applied to `Template_API_Repository` ([camaraproject/tooling#82](https://github.com/camaraproject/tooling/issues/82)), so that newly created API repositories inherit it automatically.
 
@@ -26,7 +26,8 @@ This document defines the required configuration for each API repository. It ser
 |------|---------|---------|
 | Repository ruleset | Branch protection for snapshot branches | [Ruleset](#repository-ruleset) |
 | CODEOWNERS file | Codeowner assignment for `/publish-release` authorization | [CODEOWNERS](#codeowners-requirements) |
-| Caller workflow file | Entry point that connects the repo to the automation | [Caller Workflow](#caller-workflow) |
+| Release-automation caller workflow | Entry point that connects the repo to the release automation | [Caller Workflows](#caller-workflows) |
+| CAMARA Validation caller workflow | Entry point that connects the repo to the validation framework | [Caller Workflows](#caller-workflows) |
 | `release-plan.yaml` | Release configuration (target tag, type, APIs) | [Required Files](#required-files) |
 | README delimiters | Release Information section markers | [Required Files](#required-files) |
 | CHANGELOG structure | Directory layout for per-cycle changelog files | [CHANGELOG Structure](#changelog-structure) |
@@ -297,49 +298,47 @@ The `/publish-release` command checks CODEOWNERS to authorize the publishing use
 
 ---
 
-## Caller Workflow
+## Caller Workflows
 
-The caller workflow is the entry point that connects an API repository to the release automation. It is a static YAML file installed at `.github/workflows/release-automation.yml`.
+API repositories carry **two** caller workflows installed side-by-side by the onboarding campaign:
 
-### Source template
+| Caller file | Connects to | Canonical template in `camaraproject/tooling` |
+|-------------|-------------|------------------------------------------------|
+| `.github/workflows/release-automation.yml` | Release automation | `release_automation/workflows/release-automation-caller.yml` |
+| `.github/workflows/camara-validation.yml` | Validation framework | `validation/workflows/validation-caller.yml` |
 
-The canonical caller workflow template is maintained in the tooling repository:
-
-```
-camaraproject/tooling (release-automation branch)
-  └── release_automation/workflows/release-automation-caller.yml
-```
-
-The onboarding campaign reads this file and copies it to each target repository. Do not maintain separate copies — the template in `tooling` is the single source of truth.
+Both are static files. The onboarding / reconciliation campaign reads them from the tooling repository and copies them into each target repo — do not maintain separate copies.
 
 ### Reference lifecycle
 
-The caller's `uses:` line references the reusable workflow in `camaraproject/tooling`. The reference changes as the automation progresses through rollout phases:
+The callers' `uses:` lines reference reusable workflows in `camaraproject/tooling` via a floating tag. The current RC period uses the unified `@v1-rc` tag for both callers; GA will switch both to `@v1`. See [branching-model.md](branching-model.md) for the full phase model, tag strategy, and how callers transition between refs.
 
-| Phase | `uses:` ref | Who uses it |
-|-------|-------------|-------------|
-| Alpha | `@release-automation` | Test repositories |
-| RC | `@ra-v1-rc` | Test + volunteering repos |
-| GA | `@v1` | All API repositories |
+Transitions between refs are applied by re-dispatching the reconciliation campaign with the new ref inputs — each repo gets a single update PR on the stable reconciliation branch.
 
-See [branching-model.md](branching-model.md) for the full lifecycle and tag strategy.
+### Release automation caller
 
-When transitioning between phases, a campaign updates the `uses:` line across all participating repositories.
-
-### Key configuration in the caller
+Installed at `.github/workflows/release-automation.yml`. Key configuration:
 
 | Aspect | Value | Purpose |
 |--------|-------|---------|
-| **Permissions** | `contents: write`, `issues: write`, `pull-requests: write`, `id-token: write` | Branch/release ops, issue management, PR creation, OIDC claim access for called-workflow repo/SHA resolution |
+| **Permissions** | `contents: write`, `issues: write`, `pull-requests: write`, `id-token: write` | Branch / release ops, issue management, PR creation, OIDC claim access for tooling checkout consistency |
 | **Concurrency** | `release-automation-${{ github.repository }}`, `cancel-in-progress: false` | Serialize runs, prevent race conditions |
-| **Triggers** | `issue_comment`, `issues`, `pull_request`, `push`, `workflow_dispatch` | Slash commands, lifecycle events, auto-sync, manual |
+| **Triggers** | `issue_comment`, `issues`, `pull_request` (on `release-snapshot/**`), `push` (on `main`), `workflow_dispatch` | Slash commands, lifecycle events, auto-sync, manual |
+
+**Push-path filter on main** (controls when the caller auto-fires):
+- `release-plan.yaml` — triggers sync-issue (release configuration changed)
+- `code/common/**` — triggers sync-issue + common-cache sync handler (cache updated for repos on `commonalities_release >= r4.2`)
+- `.github/workflows/release-automation.yml` — triggers sync-issue so a caller update is picked up immediately after merge
+
+### CAMARA Validation caller
+
+Installed at `.github/workflows/camara-validation.yml`. Runs validation on PRs and on `workflow_dispatch`. Controlled centrally by the stage setting in the validation framework's per-repo config file — repos at stage `disabled` have the caller installed but the reusable workflow exits immediately. See the validation framework documentation for stage semantics.
 
-For break-glass or testing, the caller can set `with.tooling_ref_override` in the workflow file.
-This requires committing a workflow file change in the target repository.
-The value must be a full 40-character SHA.
+### Reusable-workflow checkout consistency
 
-No caller-side repository override is needed for fork testing: the reusable workflow derives the
-tooling repository from OIDC claim `job_workflow_ref`.
+Both reusable workflows derive their tooling checkout (Python scripts, shared actions) from OIDC claims on the caller's `id-token: write` — guaranteeing that helper code ships from the same repository + commit as the workflow itself, even when callers reference floating tags such as `@v1-rc` or `@v1`.
+
+For break-glass or testing, the release-automation caller can set `with.tooling_ref_override` to a full 40-character SHA. No caller-side repository override is needed for fork testing — OIDC handles it.
 
 ---
 
@@ -452,16 +451,6 @@ A follow-up campaign moves the legacy content from root `CHANGELOG.md` into the
 
 ---
 
-## Recommended Enhancements
-
-### Configuration drift protection
-
-When a release snapshot is active, changes to `release-plan.yaml` on `main` can cause the snapshot to diverge from the current configuration. The release automation includes a post-merge warning (config drift warning posted to the Release Issue), but does not block the PR.
-
-For stronger protection, the `pr_validation` workflow can be extended to block PRs that modify `release-plan.yaml` when a `release-snapshot/*` branch exists. This is tracked as [camaraproject/tooling#63](https://github.com/camaraproject/tooling/issues/63) and can be implemented independently on the `main` branch (pr_validation v0).
-
----
-
 ## Verification Checklist
 
 Use this checklist to verify that a repository is correctly configured for release automation. This is the acceptance checklist for test repo setup.
@@ -489,12 +478,18 @@ Use this checklist to verify that a repository is correctly configured for relea
 - [ ] First `*` line lists at least one individual codeowner (`@username`)
 - [ ] `/CHANGELOG.md` and/or `/CHANGELOG.MD` lines present with `@camaraproject/release-management_reviewers`
 
-### Caller Workflow
+### Caller Workflows
 
+Release automation caller:
 - [ ] `.github/workflows/release-automation.yml` exists
-- [ ] `uses:` line references correct org/repo/ref for current phase
-- [ ] `permissions:` includes `contents: write`, `issues: write`, `pull-requests: write`
+- [ ] `uses:` line references correct org/repo/ref for current phase (see [branching-model.md](branching-model.md))
+- [ ] `permissions:` includes `contents: write`, `issues: write`, `pull-requests: write`, `id-token: write`
 - [ ] `concurrency:` group is `release-automation-${{ github.repository }}`
+- [ ] `push.paths` includes `release-plan.yaml`, `code/common/**`, and `.github/workflows/release-automation.yml`
+
+Validation caller:
+- [ ] `.github/workflows/camara-validation.yml` exists
+- [ ] `uses:` line references the validation reusable workflow at the correct ref
 
 ### Required Files
 
diff --git a/release_automation/docs/technical-architecture.md b/release_automation/docs/technical-architecture.md
index 1b9fe96f..b81c3f08 100644
--- a/release_automation/docs/technical-architecture.md
+++ b/release_automation/docs/technical-architecture.md
@@ -509,6 +509,54 @@ Creates a sync PR to main after release publication.
 
 ---
 
+### 2.13 Common Cache Sync
+
+Keeps `code/common/` in sync with the Commonalities release declared under `dependencies.commonalities_release` in `release-plan.yaml`. Only active for repositories declaring `commonalities_release >= r4.2` (earlier releases do not consume common files via `$ref`).
+
+**Handler location:** Embedded in the `update-issue` job of [release-automation-reusable.yml](../../.github/workflows/release-automation-reusable.yml) rather than a standalone job, so the cache converges on every RA invocation that reaches update-issue (push, `workflow_dispatch`, slash commands ending in a state update).
+
+**Signal model:** The `derive-state` job outputs `common_cache_status` with three values:
+
+| Value | Meaning |
+|-------|---------|
+| `""` | Unchecked — repo declares commonalities `< r4.2` or has no cache yet |
+| `in_sync` | Cache matches the declared tag |
+| `stale` | Drift detected (tag mismatch, missing files, or modified files); `common_cache_details` carries the reason |
+
+**Trigger x state matrix:** Sync-PR creation is state-dependent to avoid mixing concerns with an active release:
+
+| Release state | Push to `release-plan.yaml` or `code/common/**` | `/create-snapshot` | `/discard-snapshot` or `/delete-draft` | `workflow_dispatch` |
+|---------------|------------------------------------------------|--------------------|---------------------------------------|---------------------|
+| `NOT_PLANNED` / `PLANNED` / `PUBLISHED` | Create / update sync PR | Block (command rejected when stale) | n/a | Create / update sync PR |
+| `SNAPSHOT_ACTIVE` / `DRAFT_READY` | Drift warning in Release Issue only | n/a | Create / update sync PR | Create / update sync PR |
+
+**Sync PR model:**
+- Branch: `sync-common/{tag}` (e.g., `sync-common/r4.2`)
+- Author: `camara-release-automation` App bot
+- No force-push — codeowners may push fix commits to the PR before merge
+- Stale PRs (wrong tag after a dependency bump) are closed automatically when a new sync is needed
+- Standard CODEOWNERS review applies (one codeowner approval is sufficient; bot is author)
+
+**Data contracts:**
+- [`code/common/.sync-manifest.yaml`](../../validation/schemas/sync-manifest-schema.yaml) — records source repository (Commonalities), declared release tag, and per-file git blob SHA-1. Written by the RA sync handler; read by both RA (`derive-state` drift detection) and the validation framework (`P-021 check-common-cache-sync`).
+- [`tooling_lib/cache_sync.py`](../../tooling_lib/cache_sync.py) — shared drift-detection logic (`SyncStatus`, `SourceStatus`, `check_sync_status`, `git_blob_sha`). Imported by both RA and VF so they produce identical verdicts.
+
+**Notifications:**
+- `common_sync_pr_created.md` — bot comment on the Release Issue when a new sync PR is created (fires once per creation, not once per stale-detection)
+- `common_sync_failed.md` — bot comment when checkout from Commonalities fails (for example, the declared tag has not yet been published)
+- The Release Issue config section shows the `stale` warning on every sync-issue update until the cache converges
+
+**Boundary with validation framework:**
+- RA owns detection and repair (writes common files, creates/updates sync PR, writes manifest)
+- VF's `P-021 check-common-cache-sync` is a read-only safety net. It fires only when codeowners bypass the sync process (e.g., direct edits to `code/common/`) — the design expects it to be silent in normal operation
+- The `camara-release-automation` App has `contents: write`; the `camara-validation` App deliberately does not
+
+**References:**
+- [Commonalities-Consumption-and-Bundling-Design.md §4](https://github.com/camaraproject/ReleaseManagement/blob/main/documentation/SupportingDocuments/Commonalities-Consumption-and-Bundling-Design.md) — cache and sync model
+- Upstream tracking: [ReleaseManagement#489](https://github.com/camaraproject/ReleaseManagement/issues/489)
+
+---
+
 ## 3. Workflow Architecture
 
 ### 3.1 Event Flow
diff --git a/release_automation/workflows/release-automation-caller.yml b/release_automation/workflows/release-automation-caller.yml
index b3d7fdd0..ee4f6cf3 100644
--- a/release_automation/workflows/release-automation-caller.yml
+++ b/release_automation/workflows/release-automation-caller.yml
@@ -9,7 +9,10 @@
 # - Slash commands: /create-snapshot, /discard-snapshot, /delete-draft, /publish-release
 # - Issue events: close (with auto-reopen), reopen
 # - PR merge: on release-snapshot branches (creates draft release)
-# - Push to main: when release-plan.yaml changes (auto sync-issue)
+# - Push to main: when release-plan.yaml, code/common/**, or this caller
+#   workflow itself change. release-plan.yaml and the caller trigger sync-issue;
+#   code/common/** additionally triggers the common-cache sync handler for
+#   repos declaring commonalities_release >= r4.2.
 # - Manual: workflow_dispatch triggers sync-issue (reads from release-plan.yaml)
 
 name: CAMARA Release Automation
@@ -29,10 +32,15 @@ on:
     branches:
       - 'release-snapshot/**'
 
-  # Push to main with release-plan.yaml or common file changes (auto sync-issue)
+  # Push to main with release-plan.yaml, common file, or caller workflow changes.
+  # Listing the caller itself as a path ensures the workflow runs once right
+  # after it is merged (future caller updates pick up their own trigger).
   push:
     branches: [main]
-    paths: ['release-plan.yaml', 'code/common/**']
+    paths:
+      - 'release-plan.yaml'
+      - 'code/common/**'
+      - '.github/workflows/release-automation.yml'
 
   # Manual trigger for sync-issue only
   # Use this for: initial setup, recovery after manual repo changes, or forced sync

From 9a9d2229c4c699935094505e88a29feba0578516 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Sun, 19 Apr 2026 16:52:47 +0200
Subject: [PATCH 104/157] docs(release-automation): clarify common-cache-sync
 handler gating
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Refine the caller template header comment and technical-architecture
2.13 to reflect that the common-cache-sync handler is always wired
into update-issue and gates internally on commonalities_release. The
primary purpose of the code/common/** push path is to refresh the
Release Issue body (typically right after a sync PR merge) — the
r4.2+ gate decides whether the handler actually does anything, not
whether it runs.
---
 release_automation/docs/technical-architecture.md        | 2 +-
 .../workflows/release-automation-caller.yml              | 9 +++++----
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/release_automation/docs/technical-architecture.md b/release_automation/docs/technical-architecture.md
index b81c3f08..4a73ff9a 100644
--- a/release_automation/docs/technical-architecture.md
+++ b/release_automation/docs/technical-architecture.md
@@ -511,7 +511,7 @@ Creates a sync PR to main after release publication.
 
 ### 2.13 Common Cache Sync
 
-Keeps `code/common/` in sync with the Commonalities release declared under `dependencies.commonalities_release` in `release-plan.yaml`. Only active for repositories declaring `commonalities_release >= r4.2` (earlier releases do not consume common files via `$ref`).
+Keeps `code/common/` in sync with the Commonalities release declared under `dependencies.commonalities_release` in `release-plan.yaml`. The handler runs on every `update-issue` pass. For repositories declaring `commonalities_release >= r4.2` it checks the cache and acts on drift; for earlier releases (which don't consume common files via `$ref`) it reports `common_cache_status = ""` (unchecked) and exits without action.
 
 **Handler location:** Embedded in the `update-issue` job of [release-automation-reusable.yml](../../.github/workflows/release-automation-reusable.yml) rather than a standalone job, so the cache converges on every RA invocation that reaches update-issue (push, `workflow_dispatch`, slash commands ending in a state update).
 
diff --git a/release_automation/workflows/release-automation-caller.yml b/release_automation/workflows/release-automation-caller.yml
index ee4f6cf3..0fd60318 100644
--- a/release_automation/workflows/release-automation-caller.yml
+++ b/release_automation/workflows/release-automation-caller.yml
@@ -9,10 +9,11 @@
 # - Slash commands: /create-snapshot, /discard-snapshot, /delete-draft, /publish-release
 # - Issue events: close (with auto-reopen), reopen
 # - PR merge: on release-snapshot branches (creates draft release)
-# - Push to main: when release-plan.yaml, code/common/**, or this caller
-#   workflow itself change. release-plan.yaml and the caller trigger sync-issue;
-#   code/common/** additionally triggers the common-cache sync handler for
-#   repos declaring commonalities_release >= r4.2.
+# - Push to main: on release-plan.yaml, code/common/**, or this caller
+#   workflow itself. All three paths end up running sync-issue so the
+#   Release Issue body reflects the current repo state — most notably,
+#   pushing to code/common/** (typically after a sync PR merge) refreshes
+#   the common-cache status in the Release Issue and clears any stale warning.
 # - Manual: workflow_dispatch triggers sync-issue (reads from release-plan.yaml)
 
 name: CAMARA Release Automation

From 98c99a492f6c5955fdd11d1962bc07bce908fe46 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 20 Apr 2026 08:35:20 +0200
Subject: [PATCH 105/157] fix: validate-command crashes with SyntaxError on
 every slash command

---
 .github/workflows/release-automation-reusable.yml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index d96a3ae9..23af642b 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -738,7 +738,8 @@ jobs:
               return;
             }
 
-            // Block /create-snapshot when common file cache is stale            if (command === 'create-snapshot') {
+            // Block /create-snapshot when common file cache is stale
+            if (command === 'create-snapshot') {
               const cacheStatus = '${{ needs.derive-state.outputs.common_cache_status }}';
               const cacheDetails = '${{ needs.derive-state.outputs.common_cache_details }}';
               const syncPrUrl = '${{ needs.derive-state.outputs.common_sync_pr_url }}';

From 41401c4dd34b843ef7af4924cbccfeb430c4e38a Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 20 Apr 2026 09:50:13 +0200
Subject: [PATCH 106/157] fix: add tooling_lib to create-snapshot
 sparse-checkout

---
 .github/workflows/release-automation-reusable.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 23af642b..3566c550 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -950,6 +950,7 @@ jobs:
             shared-actions/run-validation
             validation
             linting/config
+            tooling_lib
 
       # ── Pre-snapshot validation ────────────────────────────────────
       #

From c1b19b50ecd0e8457108a4c8d8de8ee4c2886545 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 20 Apr 2026 12:37:37 +0200
Subject: [PATCH 107/157] fix(validation): gate P-015 and P-020 to
 Commonalities >=r4.2

Both rules reference artifacts introduced in r4.2 (the implicit-events
template and CAMARA_event_common.yaml). Running them on r3.4 repositories
emitted findings whose remediation was not available in the Commonalities
version those repositories consume.

Fixes #200
---
 validation/rules/python-rules.yaml               | 15 +++++++++------
 validation/tests/test_rule_metadata_integrity.py |  6 ++----
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index a7c53ea0..62a2dda4 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -146,19 +146,20 @@
 
 # P-015: check-event-type-format (DG-086)
 # Event types must follow org.camaraproject....
+# Gated to Commonalities >=r4.2, where the named ApiEventType schema
+# pattern (via $ref to CAMARA_event_common.yaml) is established and the
+# hint below is actionable.
 #
 # Level is conditional on api_pattern:
-#   - explicit-subscription: error (named EventType schema expected today)
-#   - implicit-subscription: warn (r4.1-era specs often inline the enum at
-#     CloudEvent.properties.type.enum, which the detector cannot see; the
-#     r4.2 migration path replaces inline CloudEvent with a $ref to
-#     CAMARA_event_common.yaml and a named ApiEventType schema, at which
-#     point this rule detects the event type correctly).
+#   - explicit-subscription: error
+#   - implicit-subscription: warn (conservative during migration to the
+#     named ApiEventType pattern)
 - id: P-015
   engine: python
   engine_rule: check-event-type-format
   applicability:
     api_pattern: [explicit-subscription, implicit-subscription]
+    commonalities_release: ">=r4.2"
   conditional_level:
     default: error
     overrides:
@@ -216,11 +217,13 @@
 # Detection: components.schemas.CloudEvent exists with top-level
 # `properties`. The $ref-only and `allOf: [{$ref: ...}]` forms have no
 # top-level `properties` and are not flagged.
+# Gated to Commonalities >=r4.2, where CAMARA_event_common.yaml exists.
 - id: P-020
   engine: python
   engine_rule: check-cloudevent-via-ref
   applicability:
     api_pattern: [explicit-subscription, implicit-subscription]
+    commonalities_release: ">=r4.2"
   conditional_level:
     default: warn
   hint: >-
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index 323ec36f..69c9f1cd 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -318,10 +318,8 @@ def test_hints_are_exception_not_norm(self, all_rules):
     def test_p015_conditional_on_api_pattern(self, rule_index):
         """P-015 stays error on explicit-subscription, warn on implicit.
 
-        Implicit-subscription APIs using the r4.1-era inline CloudEvent
-        pattern (enum at CloudEvent.properties.type.enum) cannot be
-        detected by the check, so the rule downgrades to warn until the
-        r4.2 migration to $ref + named ApiEventType schema is complete.
+        Implicit-subscription APIs remain at warn as a conservative level
+        during migration to the named ApiEventType pattern.
         """
         rule = rule_index[("python", "check-event-type-format")]
         assert rule.id == "P-015"

From 5c495b3eee51bfec90ebc7fd37bca14b967c68b4 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Mon, 20 Apr 2026 13:01:23 +0200
Subject: [PATCH 108/157] fix(linting): wire camara-schema-casing-convention
 into r3.4 ruleset

The r3.4 ruleset still used the strict built-in casing:pascal function,
which rejects standard CloudEvents transport schemas (HTTPSettings,
MQTTSettings, etc.) whose uppercase acronyms are kept to stay aligned
with upstream CloudEvents naming. Switch it to the custom function
already used by the r4 ruleset, and extend the allowlist with the
MQTT / AMQP / NATS variants.

Fixes #201
---
 linting/config/.spectral-r3.4.yaml                 | 14 +++++++++-----
 .../camara-schema-casing-convention.js             |  9 +++++++++
 2 files changed, 18 insertions(+), 5 deletions(-)

diff --git a/linting/config/.spectral-r3.4.yaml b/linting/config/.spectral-r3.4.yaml
index 9d42924c..ac3e68c3 100644
--- a/linting/config/.spectral-r3.4.yaml
+++ b/linting/config/.spectral-r3.4.yaml
@@ -15,6 +15,7 @@ functions:
   - camara-reserved-words
   - camara-language-avoid-telco
   - camara-security-no-secrets-in-path-or-query-parameters
+  - camara-schema-casing-convention
 functionsDir: "./lint_function"
 rules:
   #  Built-in OpenAPI Specification ruleset. Each rule then can be enabled individually.
@@ -250,14 +251,17 @@ rules:
     recommended: true  # Set to true/false to enable/disable this rule
 
   camara-schema-casing-convention:
-    description: This rule checks schema should follow a specific case convention pascal case.
-    message: "{{property}} should be pascal case (UppperCamelCase)"
+    description: >
+      Schema names must be PascalCase (UpperCamelCase). CloudEvents schema
+      names (HTTPSettings, MQTTSettings, AMQPSettings, NATSSettings and their
+      SubscriptionRequest/SubscriptionResponse counterparts, plus
+      PrivateKeyJWTCredential) are allowed as explicit exceptions to stay
+      aligned with upstream CloudEvents naming.
+    message: "{{error}}"
     severity: warn
     given: $.components.schemas[*]~
     then:
-      function: casing
-      functionOptions:
-        type: pascal
+      function: camara-schema-casing-convention
     recommended: true  # Set to true/false to enable/disable this rule
 
   camara-parameter-casing-convention:
diff --git a/linting/config/lint_function/camara-schema-casing-convention.js b/linting/config/lint_function/camara-schema-casing-convention.js
index a6f59667..bd1dd6de 100644
--- a/linting/config/lint_function/camara-schema-casing-convention.js
+++ b/linting/config/lint_function/camara-schema-casing-convention.js
@@ -7,6 +7,15 @@ const ALLOWED = new Set([
   "HTTPSettings",
   "HTTPSubscriptionRequest",
   "HTTPSubscriptionResponse",
+  "MQTTSettings",
+  "MQTTSubscriptionRequest",
+  "MQTTSubscriptionResponse",
+  "AMQPSettings",
+  "AMQPSubscriptionRequest",
+  "AMQPSubscriptionResponse",
+  "NATSSettings",
+  "NATSSubscriptionRequest",
+  "NATSSubscriptionResponse",
   "PrivateKeyJWTCredential",
 ]);
 

From 19e3e0c57d235792b52628dc7efd0c5790263fa7 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 20 Apr 2026 22:21:40 +0000
Subject: [PATCH 109/157] chore(deps): bump tj-actions/changed-files from
 47.0.5 to 47.0.6

Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 47.0.5 to 47.0.6.
- [Release notes](https://github.com/tj-actions/changed-files/releases)
- [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md)
- [Commits](https://github.com/tj-actions/changed-files/compare/22103cc46bda19c2b464ffe86db46df6922fd323...9426d40962ed5378910ee2e21d5f8c6fcbf2dd96)

---
updated-dependencies:
- dependency-name: tj-actions/changed-files
  dependency-version: 47.0.6
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/pr_validation.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/pr_validation.yml b/.github/workflows/pr_validation.yml
index 03d2f810..2e597afd 100644
--- a/.github/workflows/pr_validation.yml
+++ b/.github/workflows/pr_validation.yml
@@ -44,7 +44,7 @@ jobs:
 
       - name: Detect changed files
         id: changes
-        uses: tj-actions/changed-files@22103cc46bda19c2b464ffe86db46df6922fd323 # v47.0.5
+        uses: tj-actions/changed-files@9426d40962ed5378910ee2e21d5f8c6fcbf2dd96 # v47.0.6
         with:
           files_yaml: |
             release_plan:

From 29616fe76858e9011c0e62c45d41ef1b87eed8fa Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 21 Apr 2026 14:11:27 +0200
Subject: [PATCH 110/157] feat(release-automation): flat CHANGELOG.md fallback
 for maintenance releases

Maintenance releases into a cycle that has no per-cycle CHANGELOG file
now write to flat CHANGELOG.md at the repo root, preserving the
pre-automation history layout of repos that have not yet run
/migrate-changelog. Every other release type, and maintenance releases
into cycles that already have a per-cycle file, keep writing to
CHANGELOG/CHANGELOG-r{cycle}.md.

Same rule applies to the post-release syncer (probes per-cycle first,
falls back to flat) and to the CHANGELOG URL rendered into the README
release-info section (blob/main/CHANGELOG.md vs tree/main/CHANGELOG).
---
 release_automation/docs/repository-setup.md   |  35 ++--
 .../scripts/changelog_generator.py            |  53 ++++--
 .../scripts/post_release_syncer.py            |  22 ++-
 .../scripts/snapshot_creator.py               |  37 +++-
 .../release-info-prerelease-only.mustache     |   2 +-
 ...lease-info-public-with-prerelease.mustache |   2 +-
 .../readme/release-info-public.mustache       |   2 +-
 .../tests/test_changelog_generator.py         | 170 ++++++++++++++++++
 .../tests/test_post_release_syncer.py         |  64 +++++++
 .../tests/test_readme_updater.py              |  28 +++
 .../tests/test_snapshot_creator.py            |  56 ++++++
 11 files changed, 441 insertions(+), 30 deletions(-)

diff --git a/release_automation/docs/repository-setup.md b/release_automation/docs/repository-setup.md
index d05777e8..d45345bc 100644
--- a/release_automation/docs/repository-setup.md
+++ b/release_automation/docs/repository-setup.md
@@ -392,16 +392,31 @@ The delimiters are distributed by the `campaign-release-info` campaign in `proje
 
 ## CHANGELOG Structure
 
-The release automation uses a per-cycle directory structure for changelog files:
+The release automation supports two CHANGELOG layouts on the same discriminator:
 
-```
-CHANGELOG/
-  CHANGELOG-r1.md   # All releases in cycle 1 (r1.1, r1.2, ...)
-  CHANGELOG-r2.md   # All releases in cycle 2 (r2.1, r2.2, ...)
-  README.md          # Index pointing to available files and legacy CHANGELOG.md
-```
+- **Per-cycle** (default): `CHANGELOG/CHANGELOG-r{cycle}.md` — one file per release cycle, with newest entries prepended. Used for every release type except maintenance, and for maintenance releases once a per-cycle file for the target cycle already exists.
+
+  ```
+  CHANGELOG/
+    CHANGELOG-r1.md   # All releases in cycle 1 (r1.1, r1.2, ...)
+    CHANGELOG-r2.md   # All releases in cycle 2 (r2.1, r2.2, ...)
+    README.md         # Index pointing to available files and legacy CHANGELOG.md
+  ```
+
+- **Flat** (fallback for pre-automation maintenance releases): root `CHANGELOG.md` — new section prepended before the first `# r...` heading; manual TOC is replaced by an automation-managed TOC on first write.
+
+### Discriminator
+
+`write_changelog()` and `_sync_changelog()` apply the same rule:
+
+> If `release_type == "maintenance-release"` AND `CHANGELOG/CHANGELOG-r{cycle}.md` does **not** exist → write flat `CHANGELOG.md`. Otherwise → write `CHANGELOG/CHANGELOG-r{cycle}.md` (creating the `CHANGELOG/` directory if needed).
+
+Effect:
+
+- New release cycles on any repo land in `CHANGELOG/CHANGELOG-r{cycle}.md`.
+- Maintenance releases on repos that have not yet migrated their legacy history keep writing to the existing flat `CHANGELOG.md` until `/migrate-changelog` splits the history into per-cycle files. After migration, subsequent maintenance releases for those cycles prepend to the per-cycle file.
 
-Each `/create-snapshot` command generates a release section in the appropriate per-cycle file. Multiple releases within the same cycle (e.g., r4.1 alpha, r4.1 RC, r4.2) accumulate in the same file with newest entries at the top.
+The same rule drives the CHANGELOG URL rendered into the README release-info section: flat writes point to `blob/main/CHANGELOG.md`, per-cycle writes point to `tree/main/CHANGELOG`.
 
 ### Onboarding: CHANGELOG.md handling
 
@@ -498,8 +513,8 @@ Validation caller:
 
 ### CHANGELOG Structure
 
-- [ ] `CHANGELOG/README.md` exists as index file
-- [ ] Root `CHANGELOG.md` either: has forward-reference note (repos with history), or is deleted (repos with unchanged template placeholder)
+- [ ] The repo has a live changelog on `main` — either `CHANGELOG/README.md` plus per-cycle `CHANGELOG/CHANGELOG-r{cycle}.md` files (per-cycle mode, default), or a flat root `CHANGELOG.md` (flat-mode fallback, acceptable for maintenance releases on repos that have not yet run `/migrate-changelog`)
+- [ ] Root `CHANGELOG.md` either: has forward-reference note (onboarded repos with legacy history), is the active flat changelog (not yet migrated), or is deleted (repos that were onboarded from the unchanged template placeholder)
 
 ### Smoke Test
 
diff --git a/release_automation/scripts/changelog_generator.py b/release_automation/scripts/changelog_generator.py
index 3b212116..8a24be85 100644
--- a/release_automation/scripts/changelog_generator.py
+++ b/release_automation/scripts/changelog_generator.py
@@ -158,11 +158,23 @@ def generate_draft(
         return self.renderer.render(template_content, context)
 
     def write_changelog(
-        self, work_dir: str, content: str, release_tag: str, repo_name: str
+        self,
+        work_dir: str,
+        content: str,
+        release_tag: str,
+        repo_name: str,
+        release_type: str = "",
     ) -> str:
-        """Write CHANGELOG section to the appropriate per-cycle file.
+        """Write CHANGELOG section to the appropriate file.
 
-        File naming: r4.1 -> cycle 4 -> CHANGELOG/CHANGELOG-r4.md
+        Target selection (maintenance releases preserve legacy flat CHANGELOG.md
+        until the repo has been migrated to per-cycle via ``/migrate-changelog``):
+
+            if release_type == "maintenance-release" and
+               CHANGELOG/CHANGELOG-r{cycle}.md does NOT exist:
+                write to CHANGELOG.md at the repo root (flat mode)
+            else:
+                write to CHANGELOG/CHANGELOG-r{cycle}.md (per-cycle mode)
 
         Behavior:
             - If file exists: prepend new section after the header block
@@ -173,17 +185,17 @@ def write_changelog(
             content: Rendered release section content
             release_tag: Release tag for cycle extraction
             repo_name: Repository name for header generation
+            release_type: Release type from release-plan/metadata; used only
+                to enable flat-mode fallback for maintenance releases.
 
         Returns:
-            Relative path to the written file (e.g., "CHANGELOG/CHANGELOG-r4.md")
+            Relative path to the written file, e.g. ``CHANGELOG/CHANGELOG-r4.md``
+            (per-cycle) or ``CHANGELOG.md`` (flat).
         """
         cycle = self._get_cycle(release_tag)
-        changelog_dir = Path(work_dir) / "CHANGELOG"
-        changelog_dir.mkdir(exist_ok=True)
-
-        filename = f"CHANGELOG-r{cycle}.md"
-        filepath = changelog_dir / filename
-        relative_path = f"CHANGELOG/{filename}"
+        filepath, relative_path = self._resolve_changelog_path(
+            Path(work_dir), cycle, release_type
+        )
 
         if filepath.exists():
             existing = filepath.read_text()
@@ -201,6 +213,27 @@ def write_changelog(
 
         return relative_path
 
+    @staticmethod
+    def _resolve_changelog_path(
+        work_dir: Path, cycle: str, release_type: str
+    ) -> Tuple[Path, str]:
+        """Pick flat vs per-cycle target for write_changelog().
+
+        Flat mode (``CHANGELOG.md`` at repo root) activates only when the
+        release is a maintenance release AND no per-cycle file exists yet
+        for the target cycle. Every other case uses per-cycle, and the
+        ``CHANGELOG/`` directory is created if missing.
+
+        Returns:
+            Tuple of (absolute Path to target file, relative path string).
+        """
+        per_cycle_file = work_dir / "CHANGELOG" / f"CHANGELOG-r{cycle}.md"
+        if release_type == "maintenance-release" and not per_cycle_file.exists():
+            return work_dir / "CHANGELOG.md", "CHANGELOG.md"
+
+        per_cycle_file.parent.mkdir(exist_ok=True)
+        return per_cycle_file, f"CHANGELOG/CHANGELOG-r{cycle}.md"
+
     def _find_header_end(self, content: str) -> int:
         """Find the position where release sections start in an existing file.
 
diff --git a/release_automation/scripts/post_release_syncer.py b/release_automation/scripts/post_release_syncer.py
index b22df845..d381d953 100644
--- a/release_automation/scripts/post_release_syncer.py
+++ b/release_automation/scripts/post_release_syncer.py
@@ -150,10 +150,13 @@ def _sync_changelog(
         target_branch: str,
         release_tag: str
     ) -> bool:
-        """Copy release-specific CHANGELOG from snapshot branch to target branch.
+        """Copy release CHANGELOG from snapshot branch to target branch.
 
-        Copies CHANGELOG/CHANGELOG-rX.md where X is the release cycle number
-        extracted from the release tag (e.g., r4.1 → CHANGELOG/CHANGELOG-r4.md).
+        Prefers the per-cycle file ``CHANGELOG/CHANGELOG-rX.md`` (where X is
+        the release cycle number from the release tag). Falls back to flat
+        ``CHANGELOG.md`` if the per-cycle file does not exist on the
+        snapshot branch (maintenance releases on repos that have not yet
+        migrated to per-cycle structure).
 
         Args:
             snapshot_branch: Source branch with release CHANGELOG
@@ -171,12 +174,19 @@ def _sync_changelog(
             return False
 
         cycle = match.group(1)
-        changelog_path = f"CHANGELOG/CHANGELOG-r{cycle}.md"
+        per_cycle_path = f"CHANGELOG/CHANGELOG-r{cycle}.md"
+        flat_path = "CHANGELOG.md"
 
-        # Get CHANGELOG from snapshot branch
+        # Probe per-cycle first, fall back to flat CHANGELOG.md
+        changelog_path = per_cycle_path
         changelog_content = self.gh.get_file_content(changelog_path, ref=snapshot_branch)
         if not changelog_content:
-            logger.warning(f"No {changelog_path} found on {snapshot_branch}")
+            changelog_path = flat_path
+            changelog_content = self.gh.get_file_content(changelog_path, ref=snapshot_branch)
+        if not changelog_content:
+            logger.warning(
+                f"No {per_cycle_path} or {flat_path} found on {snapshot_branch}"
+            )
             return False
 
         # Write to target branch
diff --git a/release_automation/scripts/snapshot_creator.py b/release_automation/scripts/snapshot_creator.py
index cc813b77..2c8c662e 100644
--- a/release_automation/scripts/snapshot_creator.py
+++ b/release_automation/scripts/snapshot_creator.py
@@ -877,9 +877,15 @@ def _update_readme(
             for api_name, version in api_versions.items()
         ]
 
+        cycle_match = re.match(r"r(\d+)\.", config.release_tag)
+        cycle = cycle_match.group(1) if cycle_match else ""
+
         # Build data dict
         data = {
             "repo_name": repo_name,
+            "changelog_url": self._changelog_url(
+                temp_dir, org, repo_name, release_type, cycle
+            ),
         }
 
         if release_state in ("public_release", "public_with_prerelease"):
@@ -928,6 +934,29 @@ def _update_readme(
         updater = ReadmeUpdater()
         return updater.update_release_info(readme_path, release_state, data)
 
+    @staticmethod
+    def _changelog_url(
+        temp_dir: str,
+        org: str,
+        repo_name: str,
+        release_type: str,
+        cycle: str,
+    ) -> str:
+        """Build the CHANGELOG link used in README release-info.
+
+        Mirrors the generator's dual-mode rule: a maintenance release
+        into a cycle with no per-cycle file writes flat ``CHANGELOG.md``
+        and the link points there. Every other case uses the per-cycle
+        ``CHANGELOG/`` directory tree view.
+        """
+        base = f"https://github.com/{org}/{repo_name}"
+        per_cycle_file = os.path.join(
+            temp_dir, "CHANGELOG", f"CHANGELOG-r{cycle}.md"
+        )
+        if release_type == "maintenance-release" and not os.path.isfile(per_cycle_file):
+            return f"{base}/blob/main/CHANGELOG.md"
+        return f"{base}/tree/main/CHANGELOG"
+
     def _generate_changelog(
         self,
         temp_dir: str,
@@ -970,7 +999,13 @@ def _generate_changelog(
             repo_name=repo_name,
             candidate_changes=candidate_changes,
         )
-        return generator.write_changelog(temp_dir, content, config.release_tag, repo_name)
+        return generator.write_changelog(
+            temp_dir,
+            content,
+            config.release_tag,
+            repo_name,
+            release_type=release_type,
+        )
 
     def _cleanup_branches(
         self,
diff --git a/release_automation/templates/readme/release-info-prerelease-only.mustache b/release_automation/templates/readme/release-info-prerelease-only.mustache
index 5b19307c..321f0c11 100644
--- a/release_automation/templates/readme/release-info-prerelease-only.mustache
+++ b/release_automation/templates/readme/release-info-prerelease-only.mustache
@@ -7,6 +7,6 @@
 
 * The latest pre-release is [{{newest_prerelease}}]({{prerelease_github_url}}) ({{prerelease_type}}), with the following API versions:
 {{{formatted_prerelease_apis}}}
-* For changes see [CHANGELOG](https://github.com/camaraproject/{{repo_name}}/tree/main/CHANGELOG)
+* For changes see [CHANGELOG]({{changelog_url}})
 
 _The above section is automatically synchronized by CAMARA project-administration._
diff --git a/release_automation/templates/readme/release-info-public-with-prerelease.mustache b/release_automation/templates/readme/release-info-public-with-prerelease.mustache
index c856e955..b6227fad 100644
--- a/release_automation/templates/readme/release-info-public-with-prerelease.mustache
+++ b/release_automation/templates/readme/release-info-public-with-prerelease.mustache
@@ -9,7 +9,7 @@
 {{{formatted_apis}}}
 * The latest public release is always available here: https://github.com/camaraproject/{{repo_name}}/releases/latest
 * Other releases of this repository are available in https://github.com/camaraproject/{{repo_name}}/releases
-* For changes see [CHANGELOG](https://github.com/camaraproject/{{repo_name}}/tree/main/CHANGELOG)
+* For changes see [CHANGELOG]({{changelog_url}})
 
 ### Upcoming Release Preview
 
diff --git a/release_automation/templates/readme/release-info-public.mustache b/release_automation/templates/readme/release-info-public.mustache
index 3895e845..2de4f95f 100644
--- a/release_automation/templates/readme/release-info-public.mustache
+++ b/release_automation/templates/readme/release-info-public.mustache
@@ -9,6 +9,6 @@
 {{{formatted_apis}}}
 * The latest public release is always available here: https://github.com/camaraproject/{{repo_name}}/releases/latest
 * Other releases of this repository are available in https://github.com/camaraproject/{{repo_name}}/releases
-* For changes see [CHANGELOG](https://github.com/camaraproject/{{repo_name}}/tree/main/CHANGELOG)
+* For changes see [CHANGELOG]({{changelog_url}})
 
 _The above section is automatically synchronized by CAMARA project-administration._
diff --git a/release_automation/tests/test_changelog_generator.py b/release_automation/tests/test_changelog_generator.py
index 34ca7de8..2410532f 100644
--- a/release_automation/tests/test_changelog_generator.py
+++ b/release_automation/tests/test_changelog_generator.py
@@ -352,6 +352,176 @@ def test_header_generation_contains_repo_name(self, generator):
         assert "best results, use the latest published release" in header
 
 
+# --- File Writing: Dual-Mode (flat vs per-cycle) ---
+
+
+class TestFileWritingFlatMode:
+    """Flat-mode (CHANGELOG.md at repo root) activates only for maintenance
+    releases when no per-cycle file exists yet. Every other release type
+    stays on per-cycle even if a legacy flat CHANGELOG.md is present."""
+
+    def test_maintenance_release_with_no_cycle_file_writes_flat(self, generator, tmp_path):
+        content = "# r2.3\n\n## Release Notes\n\nMaintenance patch\n"
+        path = generator.write_changelog(
+            str(tmp_path),
+            content,
+            "r2.3",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+        assert path == "CHANGELOG.md"
+        flat = tmp_path / "CHANGELOG.md"
+        assert flat.exists()
+        assert "# r2.3" in flat.read_text()
+        # No per-cycle directory should be created when writing flat
+        assert not (tmp_path / "CHANGELOG" / "CHANGELOG-r2.md").exists()
+
+    def test_maintenance_release_with_existing_cycle_file_uses_per_cycle(
+        self, generator, tmp_path
+    ):
+        # Pre-seed the per-cycle file
+        (tmp_path / "CHANGELOG").mkdir()
+        per_cycle = tmp_path / "CHANGELOG" / "CHANGELOG-r2.md"
+        per_cycle.write_text(
+            "# Changelog SimpleEdgeDiscovery\n\n"
+            "Recording rules...\n\n"
+            "# r2.2\n\n## Release Notes\n\nPrior public\n"
+        )
+        flat_before = "legacy content\n"
+        (tmp_path / "CHANGELOG.md").write_text(flat_before)
+
+        new_section = "# r2.3\n\n## Release Notes\n\nMaintenance patch\n"
+        path = generator.write_changelog(
+            str(tmp_path),
+            new_section,
+            "r2.3",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+        assert path == "CHANGELOG/CHANGELOG-r2.md"
+        # Per-cycle file was updated (r2.3 prepended), flat file untouched
+        assert "# r2.3" in per_cycle.read_text()
+        assert (tmp_path / "CHANGELOG.md").read_text() == flat_before
+
+    def test_public_release_always_uses_per_cycle(self, generator, tmp_path):
+        # Flat CHANGELOG.md present, no CHANGELOG/ directory
+        (tmp_path / "CHANGELOG.md").write_text("legacy content\n")
+
+        content = "# r4.2\n\n## Release Notes\n\nPublic\n"
+        path = generator.write_changelog(
+            str(tmp_path),
+            content,
+            "r4.2",
+            "QualityOnDemand",
+            release_type="public-release",
+        )
+        assert path == "CHANGELOG/CHANGELOG-r4.md"
+        assert (tmp_path / "CHANGELOG" / "CHANGELOG-r4.md").exists()
+
+    def test_pre_release_rc_always_uses_per_cycle(self, generator, tmp_path):
+        (tmp_path / "CHANGELOG.md").write_text("legacy content\n")
+
+        content = "# r4.1\n\n## Release Notes\n\nRC\n"
+        path = generator.write_changelog(
+            str(tmp_path),
+            content,
+            "r4.1",
+            "QualityOnDemand",
+            release_type="pre-release-rc",
+        )
+        assert path == "CHANGELOG/CHANGELOG-r4.md"
+
+    def test_empty_release_type_defaults_to_per_cycle(self, generator, tmp_path):
+        (tmp_path / "CHANGELOG.md").write_text("legacy content\n")
+
+        content = "# r4.1\n\n## Release Notes\n\nNo type\n"
+        path = generator.write_changelog(
+            str(tmp_path), content, "r4.1", "QualityOnDemand"
+        )
+        assert path == "CHANGELOG/CHANGELOG-r4.md"
+
+    def test_flat_write_prepends_before_first_release_heading(self, generator, tmp_path):
+        # Shape mirrors SimpleEdgeDiscovery's CHANGELOG.md:
+        # title + manual TOC + NOTE + preamble + release sections
+        legacy = (
+            "# Changelog Simple Edge Discovery\n\n"
+            "NOTE: \n\n"
+            "## Table of contents\n\n"
+            "- [r2.2](#r22)\n"
+            "- [r2.1 - rc](#r21---rc)\n\n"
+            "**Please use the latest published release.**\n\n"
+            "# r2.2 - Fall25 public release\n\n"
+            "Prior public release content\n"
+        )
+        (tmp_path / "CHANGELOG.md").write_text(legacy)
+
+        new_section = "# r2.3\n\n## Release Notes\n\nMaintenance patch\n"
+        generator.write_changelog(
+            str(tmp_path),
+            new_section,
+            "r2.3",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+
+        result = (tmp_path / "CHANGELOG.md").read_text()
+        r23 = result.index("# r2.3")
+        r22 = result.index("# r2.2")
+        assert r23 < r22
+
+    def test_flat_write_injects_toc_markers_on_first_run(self, generator, tmp_path):
+        # Legacy CHANGELOG.md has no automation TOC markers
+        legacy = (
+            "# Changelog Simple Edge Discovery\n\n"
+            "## Table of contents\n\n"
+            "- [r2.2](#r22)\n\n"
+            "# r2.2\n\nPrior content\n"
+        )
+        (tmp_path / "CHANGELOG.md").write_text(legacy)
+
+        new_section = "# r2.3\n\nThis maintenance release contains something\n"
+        generator.write_changelog(
+            str(tmp_path),
+            new_section,
+            "r2.3",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+
+        result = (tmp_path / "CHANGELOG.md").read_text()
+        assert TOC_START_MARKER in result
+        assert TOC_END_MARKER in result
+
+    def test_flat_write_updates_toc_idempotently(self, generator, tmp_path):
+        # First maintenance write inserts markers
+        (tmp_path / "CHANGELOG.md").write_text(
+            "# Changelog Simple Edge Discovery\n\n"
+            "# r2.2\n\nPrior content\n"
+        )
+        generator.write_changelog(
+            str(tmp_path),
+            "# r2.3\n\nThis maintenance release contains A\n",
+            "r2.3",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+        # Second maintenance write (hypothetical r2.4 still before migration)
+        generator.write_changelog(
+            str(tmp_path),
+            "# r2.4\n\nThis maintenance release contains B\n",
+            "r2.4",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+
+        result = (tmp_path / "CHANGELOG.md").read_text()
+        assert result.count(TOC_START_MARKER) == 1
+        assert result.count(TOC_END_MARKER) == 1
+        # Both release entries present in TOC
+        assert "[r2.4](#r24)" in result
+        assert "[r2.3](#r23)" in result
+
+
 # --- Table of Contents ---
 
 
diff --git a/release_automation/tests/test_post_release_syncer.py b/release_automation/tests/test_post_release_syncer.py
index a9d54ec3..be09b836 100644
--- a/release_automation/tests/test_post_release_syncer.py
+++ b/release_automation/tests/test_post_release_syncer.py
@@ -164,6 +164,70 @@ def test_sync_changelog_invalid_release_tag(self, syncer, mock_github_client):
         assert result is False
         mock_github_client.get_file_content.assert_not_called()
 
+    def test_sync_changelog_prefers_per_cycle(self, syncer, mock_github_client):
+        """When the per-cycle file is present on the snapshot, sync it and
+        do not probe the flat fallback."""
+        mock_github_client.get_file_content.return_value = "# per-cycle content"
+
+        result = syncer._sync_changelog(
+            "release-snapshot/r4.1-abc123",
+            "pr-to-main/r4.1",
+            "r4.1",
+        )
+
+        assert result is True
+        # Only one probe — for the per-cycle path
+        mock_github_client.get_file_content.assert_called_once_with(
+            "CHANGELOG/CHANGELOG-r4.md", ref="release-snapshot/r4.1-abc123"
+        )
+        mock_github_client.update_file.assert_called_once()
+        assert (
+            mock_github_client.update_file.call_args.kwargs["path"]
+            == "CHANGELOG/CHANGELOG-r4.md"
+        )
+
+    def test_sync_changelog_falls_back_to_flat_when_per_cycle_missing(
+        self, syncer, mock_github_client
+    ):
+        """Maintenance releases on unmigrated repos write flat CHANGELOG.md;
+        the syncer must pick that up when the per-cycle probe returns None."""
+        mock_github_client.get_file_content.side_effect = [None, "# flat content"]
+
+        result = syncer._sync_changelog(
+            "release-snapshot/r2.3-abc123",
+            "pr-to-main/r2.3",
+            "r2.3",
+        )
+
+        assert result is True
+        assert mock_github_client.get_file_content.call_count == 2
+        mock_github_client.get_file_content.assert_any_call(
+            "CHANGELOG/CHANGELOG-r2.md", ref="release-snapshot/r2.3-abc123"
+        )
+        mock_github_client.get_file_content.assert_any_call(
+            "CHANGELOG.md", ref="release-snapshot/r2.3-abc123"
+        )
+        mock_github_client.update_file.assert_called_once()
+        assert (
+            mock_github_client.update_file.call_args.kwargs["path"] == "CHANGELOG.md"
+        )
+
+    def test_sync_changelog_neither_present_returns_false(
+        self, syncer, mock_github_client
+    ):
+        """Both probes return None → False and no update attempt."""
+        mock_github_client.get_file_content.side_effect = [None, None]
+
+        result = syncer._sync_changelog(
+            "release-snapshot/r2.3-abc123",
+            "pr-to-main/r2.3",
+            "r2.3",
+        )
+
+        assert result is False
+        assert mock_github_client.get_file_content.call_count == 2
+        mock_github_client.update_file.assert_not_called()
+
 
 class TestCreatePR:
     """Tests for _create_pr method."""
diff --git a/release_automation/tests/test_readme_updater.py b/release_automation/tests/test_readme_updater.py
index fdb80e58..3a845cec 100644
--- a/release_automation/tests/test_readme_updater.py
+++ b/release_automation/tests/test_readme_updater.py
@@ -59,6 +59,7 @@ def public_release_data():
         "latest_public_release": "r3.2",
         "github_url": "https://github.com/camaraproject/QualityOnDemand/releases/tag/r3.2",
         "meta_release": "Spring25",
+        "changelog_url": "https://github.com/camaraproject/QualityOnDemand/tree/main/CHANGELOG",
         "formatted_apis": (
             "  * **quality-on-demand v1.1.0**\n"
             "  [[YAML]](https://github.com/camaraproject/QualityOnDemand/blob/r3.2/"
@@ -81,6 +82,7 @@ def prerelease_data():
         "newest_prerelease": "r4.1-rc.1",
         "prerelease_github_url": "https://github.com/camaraproject/QualityOnDemand/releases/tag/r4.1-rc.1",
         "prerelease_type": "release candidate",
+        "changelog_url": "https://github.com/camaraproject/QualityOnDemand/tree/main/CHANGELOG",
         "formatted_prerelease_apis": (
             "  * **quality-on-demand v1.2.0-rc.1**\n"
             "  [[YAML]](https://github.com/camaraproject/QualityOnDemand/blob/r4.1-rc.1/"
@@ -169,6 +171,32 @@ def test_render_public_with_prerelease_template(self, updater, public_release_da
         assert "Upcoming Release Preview" in result
         assert "NOTE" in result
 
+    def test_changelog_url_renders_directory_link_in_all_release_templates(
+        self, updater, public_release_data, prerelease_data
+    ):
+        """changelog_url variable is honored by every release template."""
+        dir_url = "https://github.com/camaraproject/QualityOnDemand/tree/main/CHANGELOG"
+        public_release_data["changelog_url"] = dir_url
+        prerelease_data["changelog_url"] = dir_url
+
+        public_result = updater._render_template("public_release", public_release_data)
+        prerelease_result = updater._render_template("prerelease_only", prerelease_data)
+        combined_result = updater._render_template(
+            "public_with_prerelease", {**public_release_data, **prerelease_data}
+        )
+
+        for result in (public_result, prerelease_result, combined_result):
+            assert f"[CHANGELOG]({dir_url})" in result
+
+    def test_changelog_url_renders_flat_file_link(self, updater, public_release_data):
+        """Flat-mode URL (blob/main/CHANGELOG.md) also renders cleanly."""
+        flat_url = "https://github.com/camaraproject/SimpleEdgeDiscovery/blob/main/CHANGELOG.md"
+        public_release_data["changelog_url"] = flat_url
+        public_release_data["repo_name"] = "SimpleEdgeDiscovery"
+
+        result = updater._render_template("public_release", public_release_data)
+        assert f"[CHANGELOG]({flat_url})" in result
+
 
 # --- Content Replacement ---
 
diff --git a/release_automation/tests/test_snapshot_creator.py b/release_automation/tests/test_snapshot_creator.py
index f67cb7c6..d82340a1 100644
--- a/release_automation/tests/test_snapshot_creator.py
+++ b/release_automation/tests/test_snapshot_creator.py
@@ -264,8 +264,64 @@ def test_preserves_release_tag_format(self, snapshot_creator):
         ) == "v1.0.0-abc1234"
 
 
+# --- Tests for _changelog_url ---
+
+
+class TestChangelogUrl:
+    """Tests for the CHANGELOG link rendered into README release-info.
+
+    Mirrors the dual-mode rule used by the generator: maintenance releases
+    into a cycle without a per-cycle file link to the flat CHANGELOG.md;
+    every other case links to the per-cycle CHANGELOG/ directory view.
+    """
+
+    def test_maintenance_with_no_per_cycle_file_points_to_flat(self, tmp_path):
+        (tmp_path / "CHANGELOG.md").write_text("legacy\n")
+        url = SnapshotCreator._changelog_url(
+            str(tmp_path),
+            org="camaraproject",
+            repo_name="SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+            cycle="2",
+        )
+        assert url == (
+            "https://github.com/camaraproject/SimpleEdgeDiscovery/blob/main/CHANGELOG.md"
+        )
+
+    def test_maintenance_with_existing_per_cycle_file_points_to_dir(self, tmp_path):
+        (tmp_path / "CHANGELOG").mkdir()
+        (tmp_path / "CHANGELOG" / "CHANGELOG-r2.md").write_text("per-cycle\n")
+        url = SnapshotCreator._changelog_url(
+            str(tmp_path),
+            org="camaraproject",
+            repo_name="SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+            cycle="2",
+        )
+        assert url == (
+            "https://github.com/camaraproject/SimpleEdgeDiscovery/tree/main/CHANGELOG"
+        )
+
+    def test_non_maintenance_always_points_to_dir(self, tmp_path):
+        # Even a repo with a flat CHANGELOG.md and no CHANGELOG/ yet gets
+        # the directory link when the release is not a maintenance release,
+        # because the generator will create the per-cycle file on first run.
+        (tmp_path / "CHANGELOG.md").write_text("legacy\n")
+        url = SnapshotCreator._changelog_url(
+            str(tmp_path),
+            org="camaraproject",
+            repo_name="QualityOnDemand",
+            release_type="public-release",
+            cycle="4",
+        )
+        assert url == (
+            "https://github.com/camaraproject/QualityOnDemand/tree/main/CHANGELOG"
+        )
+
+
 # --- Tests for validate_preconditions ---
 
+
 class TestValidatePreconditions:
     """Tests for precondition validation."""
 

From 7ea80ec6a4849d108dda5fc751d55b3254a247c2 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 21 Apr 2026 15:11:14 +0200
Subject: [PATCH 111/157] feat(validation): add release-plan-focused rules and
 suppress misleading findings on Commonalities advance

Adds P-022 (release-plan exclusivity), P-023 (declared dependency tag
existence), and a workflow-layer release_plan_check_only signal that
the orchestrator uses to skip Spectral and gherkin engines when a
Commonalities advance would otherwise produce findings against stale
content. The post-filter keeps only Python rules gated on
applicability.release_plan_changed=true on such PRs. ICM-only advances
do not trigger engine suppression.

Fixes #206
---
 .github/workflows/validation.yml              | 138 +++++++++++
 shared-actions/run-validation/action.yml      |  48 ++++
 validation/context/context_builder.py         |  30 +++
 validation/engines/python_checks/__init__.py  |   9 +-
 .../python_checks/release_plan_checks.py      | 151 ++++++++++++
 validation/orchestrator.py                    | 103 ++++++--
 validation/postfilter/engine.py               |  15 ++
 validation/rules/python-rules.yaml            |  30 +++
 validation/tests/test_context_builder.py      |   5 +
 validation/tests/test_orchestrator.py         |  57 +++++
 validation/tests/test_postfilter_engine.py    |  73 +++++-
 .../tests/test_python_checks_release_plan.py  | 228 ++++++++++++++++++
 .../tests/test_rule_metadata_integrity.py     |   6 +-
 13 files changed, 874 insertions(+), 19 deletions(-)

diff --git a/.github/workflows/validation.yml b/.github/workflows/validation.yml
index 6959fbbc..490779db 100644
--- a/.github/workflows/validation.yml
+++ b/.github/workflows/validation.yml
@@ -152,12 +152,144 @@ jobs:
         id: detect-changes
         if: github.event_name == 'pull_request'
         run: |
+          # Emit the list of non-release-plan files touched in this PR so
+          # P-022 (exclusivity) can report them.  Empty JSON array when
+          # release-plan.yaml is the only change.
           if git diff --name-only "origin/${{ github.base_ref }}...HEAD" -- release-plan.yaml | grep -q .; then
             echo "release_plan_changed=true" >> "$GITHUB_OUTPUT"
           else
             echo "release_plan_changed=false" >> "$GITHUB_OUTPUT"
           fi
 
+          OTHER_FILES=$(
+            git diff --name-only "origin/${{ github.base_ref }}...HEAD" \
+              | grep -v '^release-plan\.yaml$' \
+              | python3 -c 'import json, sys; print(json.dumps([line for line in sys.stdin.read().splitlines() if line]))'
+          )
+          echo "non_release_plan_files_changed=${OTHER_FILES}" >> "$GITHUB_OUTPUT"
+
+      # ── Step 6b: Resolve dependency changes and declared-tag existence
+      #
+      # When release-plan.yaml changed, diff base vs head to detect which
+      # dependency declarations advanced (commonalities_release,
+      # identity_consent_management_release).  For each advanced tag,
+      # look it up in the source repository via the GitHub API.  Emit a
+      # tri-state existence flag: 'true' / 'false' / '' (empty = lookup
+      # failed or skipped).  release_plan_check_only is set only when
+      # commonalities_release advances — that's the dependency whose
+      # content (code/common/*) is stale under the new ruleset, which is
+      # why the orchestrator suppresses the Spectral + gherkin engines.
+      # ICM has no common files to sync, so an ICM advance does not
+      # trigger engine suppression.
+      - name: Resolve dependency changes and declared tags
+        id: detect-deps
+        if: >-
+          github.event_name == 'pull_request'
+          && steps.detect-changes.outputs.release_plan_changed == 'true'
+        uses: actions/github-script@v9
+        with:
+          script: |
+            const { execSync } = require('child_process');
+            const fs = require('fs');
+            const path = require('path');
+            // js-yaml is bundled with actions/github-script v9+ and its
+            // dependencies; require by name.
+            const yaml = require('js-yaml');
+
+            function readYamlAtRef(ref, filePath) {
+              // Returns the parsed YAML at origin/:, or
+              // {} when the file is absent at that ref.
+              try {
+                const raw = execSync(
+                  `git show origin/${ref}:${filePath}`,
+                  { stdio: ['ignore', 'pipe', 'pipe'] }
+                ).toString('utf8');
+                const parsed = yaml.load(raw);
+                return parsed && typeof parsed === 'object' ? parsed : {};
+              } catch (err) {
+                return {};
+              }
+            }
+
+            function readYamlAtPath(filePath) {
+              try {
+                const raw = fs.readFileSync(filePath, 'utf8');
+                const parsed = yaml.load(raw);
+                return parsed && typeof parsed === 'object' ? parsed : {};
+              } catch (err) {
+                return {};
+              }
+            }
+
+            function getDep(plan, field) {
+              const deps = (plan && plan.dependencies) || {};
+              return deps[field] || null;
+            }
+
+            const baseRef = context.payload.pull_request.base.ref;
+            const planPath = 'release-plan.yaml';
+            const basePlan = readYamlAtRef(baseRef, planPath);
+            const headPlan = readYamlAtPath(
+              path.join(process.env.GITHUB_WORKSPACE, planPath)
+            );
+
+            const DEPENDENCIES = [
+              {
+                field: 'commonalities_release',
+                sourceRepo: 'camaraproject/Commonalities',
+                changedOutput: 'commonalities_release_changed',
+                tagExistsOutput: 'commonalities_tag_exists',
+              },
+              {
+                field: 'identity_consent_management_release',
+                sourceRepo: 'camaraproject/IdentityAndConsentManagement',
+                changedOutput: 'icm_release_changed',
+                tagExistsOutput: 'icm_tag_exists',
+              },
+            ];
+
+            async function tagExists(owner, repo, tag) {
+              try {
+                await github.rest.git.getRef({
+                  owner, repo, ref: `tags/${tag}`,
+                });
+                return 'true';
+              } catch (err) {
+                if (err && err.status === 404) return 'false';
+                // 5xx, rate-limit, network, auth issues — let P-023
+                // surface a warn-level finding rather than blocking.
+                core.warning(
+                  `Tag lookup for ${owner}/${repo}@${tag} failed: ${err.message}`
+                );
+                return '';
+              }
+            }
+
+            let commonalitiesChanged = false;
+            for (const dep of DEPENDENCIES) {
+              const baseTag = getDep(basePlan, dep.field);
+              const headTag = getDep(headPlan, dep.field);
+              const changed = baseTag !== headTag;
+              core.setOutput(dep.changedOutput, changed ? 'true' : 'false');
+
+              if (changed && dep.field === 'commonalities_release') {
+                commonalitiesChanged = true;
+              }
+
+              if (changed && headTag) {
+                const [owner, repo] = dep.sourceRepo.split('/');
+                const exists = await tagExists(owner, repo, headTag);
+                core.setOutput(dep.tagExistsOutput, exists);
+              } else {
+                core.setOutput(dep.tagExistsOutput, '');
+              }
+            }
+
+            core.setOutput(
+              'release_plan_check_only',
+              commonalitiesChanged ? 'true' : 'false'
+            );
+
       # ── Step 7: Run validation (shared action) ─────────────────────
       #
       # The run-validation action installs dependencies, runs the Python
@@ -171,6 +303,12 @@ jobs:
           mode: ${{ inputs.mode }}
           profile: ${{ inputs.profile }}
           release_plan_changed: ${{ steps.detect-changes.outputs.release_plan_changed || 'false' }}
+          release_plan_check_only: ${{ steps.detect-deps.outputs.release_plan_check_only || 'false' }}
+          commonalities_release_changed: ${{ steps.detect-deps.outputs.commonalities_release_changed || 'false' }}
+          icm_release_changed: ${{ steps.detect-deps.outputs.icm_release_changed || 'false' }}
+          commonalities_tag_exists: ${{ steps.detect-deps.outputs.commonalities_tag_exists || '' }}
+          icm_tag_exists: ${{ steps.detect-deps.outputs.icm_tag_exists || '' }}
+          non_release_plan_files_changed: ${{ steps.detect-changes.outputs.non_release_plan_files_changed || '[]' }}
           tooling_ref: ${{ steps.resolve-ref.outputs.tooling_checkout_ref }}
 
       # ── Step 8: Mint validation app token (PR only) ──────────────
diff --git a/shared-actions/run-validation/action.yml b/shared-actions/run-validation/action.yml
index 46395396..70da4c3d 100644
--- a/shared-actions/run-validation/action.yml
+++ b/shared-actions/run-validation/action.yml
@@ -28,6 +28,48 @@ inputs:
     description: 'Whether release-plan.yaml changed in this PR (true/false)'
     required: false
     default: 'false'
+  release_plan_check_only:
+    description: >
+      When true, a Commonalities dependency declaration advanced in this
+      PR — orchestrator skips Spectral + gherkin engines and post-filter
+      keeps only release-plan-validation rules.  Set by the caller
+      workflow''s release-plan detection step.
+    required: false
+    default: 'false'
+  commonalities_release_changed:
+    description: >
+      Whether dependencies.commonalities_release differs between base and
+      head in this PR diff (true/false).  Used by P-023 to gate the
+      tag-existence check.
+    required: false
+    default: 'false'
+  icm_release_changed:
+    description: >
+      Whether dependencies.icm_release (or identity_consent_management_release)
+      differs between base and head in this PR diff (true/false).
+    required: false
+    default: 'false'
+  commonalities_tag_exists:
+    description: >
+      Tri-state result of the GitHub API lookup for the declared
+      commonalities_release tag.  ''true'' = confirmed present, ''false''
+      = confirmed 404, '''' (empty) = lookup skipped or failed.  Consumed
+      by P-023.
+    required: false
+    default: ''
+  icm_tag_exists:
+    description: >
+      Tri-state result for the declared icm_release tag.  Same semantics
+      as commonalities_tag_exists.
+    required: false
+    default: ''
+  non_release_plan_files_changed:
+    description: >
+      JSON array of file paths changed in the PR alongside
+      release-plan.yaml.  Empty array (''[]'') when release-plan.yaml is
+      the only changed file.  Consumed by P-022 (exclusivity check).
+    required: false
+    default: '[]'
   tooling_ref:
     description: 'Tooling ref used for this run (for diagnostics)'
     required: false
@@ -111,6 +153,12 @@ runs:
         VALIDATION_PROFILE: ${{ inputs.profile }}
         VALIDATION_PR_NUMBER: ${{ github.event.pull_request.number }}
         VALIDATION_RELEASE_PLAN_CHANGED: ${{ inputs.release_plan_changed }}
+        VALIDATION_RELEASE_PLAN_CHECK_ONLY: ${{ inputs.release_plan_check_only }}
+        VALIDATION_COMMONALITIES_RELEASE_CHANGED: ${{ inputs.commonalities_release_changed }}
+        VALIDATION_ICM_RELEASE_CHANGED: ${{ inputs.icm_release_changed }}
+        VALIDATION_COMMONALITIES_TAG_EXISTS: ${{ inputs.commonalities_tag_exists }}
+        VALIDATION_ICM_TAG_EXISTS: ${{ inputs.icm_tag_exists }}
+        VALIDATION_NON_RELEASE_PLAN_FILES_CHANGED: ${{ inputs.non_release_plan_files_changed }}
         VALIDATION_WORKFLOW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
         VALIDATION_TOOLING_REF: ${{ inputs.tooling_ref }}
         VALIDATION_COMMIT_SHA: ${{ github.event.pull_request.head.sha || github.sha }}
diff --git a/validation/context/context_builder.py b/validation/context/context_builder.py
index 07c489ba..b0cfacba 100644
--- a/validation/context/context_builder.py
+++ b/validation/context/context_builder.py
@@ -123,6 +123,24 @@ class ValidationContext:
     workflow_run_url: str
     tooling_ref: str
 
+    # Release-plan validation context (Step 6b outputs; defaults when absent)
+    # commonalities_release_changed / icm_release_changed: True when the
+    # respective dependency declaration differs between base and head.
+    # release_plan_check_only: True when a Commonalities advance is detected —
+    # orchestrator skips Spectral/gherkin engines and post-filter keeps only
+    # rules in the release-plan-validation group.  ICM advance does NOT set
+    # this flag (no common files to sync).
+    # *_tag_exists: tri-state — True (confirmed), False (confirmed missing),
+    # None (check did not run or API lookup failed).
+    # non_release_plan_files_changed: files co-changed alongside release-plan.yaml
+    # in the current PR diff (P-022 exclusivity input).
+    commonalities_release_changed: bool = False
+    icm_release_changed: bool = False
+    release_plan_check_only: bool = False
+    commonalities_tag_exists: Optional[bool] = None
+    icm_tag_exists: Optional[bool] = None
+    non_release_plan_files_changed: Tuple[str, ...] = ()
+
     def to_dict(self) -> dict:
         """Serialize to dict with all keys present.
 
@@ -264,6 +282,12 @@ def build_validation_context(
     workflow_run_url: str = "",
     tooling_ref: str = "",
     commonalities_version: Optional[str] = None,
+    release_plan_check_only: bool = False,
+    commonalities_release_changed: bool = False,
+    icm_release_changed: bool = False,
+    commonalities_tag_exists: Optional[bool] = None,
+    icm_tag_exists: Optional[bool] = None,
+    non_release_plan_files_changed: Tuple[str, ...] = (),
 ) -> ValidationContext:
     """Assemble the unified validation context.
 
@@ -348,4 +372,10 @@ def build_validation_context(
         apis=api_contexts,
         workflow_run_url=workflow_run_url,
         tooling_ref=tooling_ref,
+        commonalities_release_changed=commonalities_release_changed,
+        icm_release_changed=icm_release_changed,
+        release_plan_check_only=release_plan_check_only,
+        commonalities_tag_exists=commonalities_tag_exists,
+        icm_tag_exists=icm_tag_exists,
+        non_release_plan_files_changed=non_release_plan_files_changed,
     )
diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py
index b4b5ad7f..01633e48 100644
--- a/validation/engines/python_checks/__init__.py
+++ b/validation/engines/python_checks/__init__.py
@@ -12,7 +12,12 @@
 from .metadata_checks import check_commonalities_version
 from .readme_checks import check_readme_placeholder_removal
 from .common_cache_checks import check_common_cache_sync
-from .release_plan_checks import check_orphan_api_definitions, check_release_plan_semantics
+from .release_plan_checks import (
+    check_declared_dependency_tags_exist,
+    check_orphan_api_definitions,
+    check_release_plan_exclusivity,
+    check_release_plan_semantics,
+)
 from .release_review_checks import check_release_review_file_restriction
 from .subscription_checks import (
     check_cloudevent_via_ref,
@@ -57,6 +62,8 @@
     CheckDescriptor("check-release-review-file-restriction", CheckScope.REPO, check_release_review_file_restriction),
     CheckDescriptor("check-orphan-api-definitions", CheckScope.REPO, check_orphan_api_definitions),
     CheckDescriptor("check-common-cache-sync", CheckScope.REPO, check_common_cache_sync),
+    CheckDescriptor("check-release-plan-exclusivity", CheckScope.REPO, check_release_plan_exclusivity),
+    CheckDescriptor("check-declared-dependency-tags-exist", CheckScope.REPO, check_declared_dependency_tags_exist),
 ]
 
 __all__ = ["CHECKS", "CheckDescriptor", "CheckScope"]
diff --git a/validation/engines/python_checks/release_plan_checks.py b/validation/engines/python_checks/release_plan_checks.py
index 30dd1d84..94b18dd8 100644
--- a/validation/engines/python_checks/release_plan_checks.py
+++ b/validation/engines/python_checks/release_plan_checks.py
@@ -329,3 +329,154 @@ def check_orphan_api_definitions(
         )
         for name in orphans
     ]
+
+
+# ---------------------------------------------------------------------------
+# P-022: check-release-plan-exclusivity
+# ---------------------------------------------------------------------------
+
+
+def check_release_plan_exclusivity(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Flag non-release-plan files co-changed with release-plan.yaml.
+
+    Repo-level check.  Reads ``context.non_release_plan_files_changed``
+    (populated by the workflow layer when release-plan.yaml is in the
+    diff).  Emits one error finding listing the co-changed files so the
+    codeowner can split the PR.
+    """
+    other_files = context.non_release_plan_files_changed
+    if not other_files:
+        return []
+
+    # Cap the listed files to keep the message readable; full list is
+    # still visible in the PR diff.
+    preview_limit = 10
+    file_list = list(other_files)
+    if len(file_list) > preview_limit:
+        preview = ", ".join(file_list[:preview_limit])
+        suffix = f", and {len(file_list) - preview_limit} more"
+    else:
+        preview = ", ".join(file_list)
+        suffix = ""
+
+    return [
+        make_finding(
+            engine_rule="check-release-plan-exclusivity",
+            level="error",
+            message=(
+                f"release-plan.yaml was changed alongside "
+                f"{len(file_list)} other file(s): {preview}{suffix}. "
+                f"release-plan.yaml changes should be submitted in a "
+                f"dedicated PR so that any new validation findings remain "
+                f"clearly attributable to the release-plan change."
+            ),
+            path=_RELEASE_PLAN_PATH,
+            line=1,
+        )
+    ]
+
+
+# ---------------------------------------------------------------------------
+# P-023: check-declared-dependency-tags-exist
+# ---------------------------------------------------------------------------
+
+# Dependency spec: (YAML field name, display name, source repo,
+# context-flag attribute, context-tag-exists attribute).  YAML field
+# names match release-plan-schema.yaml; display names mirror the short
+# form used in user-facing messages.
+_DEPENDENCY_SPEC = [
+    (
+        "commonalities_release",
+        "commonalities_release",
+        "camaraproject/Commonalities",
+        "commonalities_release_changed",
+        "commonalities_tag_exists",
+    ),
+    (
+        "identity_consent_management_release",
+        "icm_release",
+        "camaraproject/IdentityAndConsentManagement",
+        "icm_release_changed",
+        "icm_tag_exists",
+    ),
+]
+
+
+def check_declared_dependency_tags_exist(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Verify that declared dependency tags exist in their source repos.
+
+    Repo-level check.  For each dependency (``commonalities_release``,
+    ``identity_consent_management_release``):
+
+    - If the declaration did not change in this PR's diff, skip (the
+      existing state is not this PR's responsibility).
+    - If the declaration changed and the tag was confirmed absent by the
+      workflow layer (``_tag_exists == False``), emit an error.
+    - If the declaration changed and the workflow layer could not verify
+      the tag (``_tag_exists is None``), emit a warn finding so the
+      codeowner is aware the check was skipped.
+    """
+    plan_path = repo_path / _RELEASE_PLAN_PATH
+    release_plan = load_yaml_safe(plan_path)
+    if release_plan is None:
+        return []
+
+    dependencies = release_plan.get("dependencies") or {}
+
+    findings: List[dict] = []
+
+    for (
+        yaml_field,
+        display_name,
+        source_repo,
+        changed_attr,
+        exists_attr,
+    ) in _DEPENDENCY_SPEC:
+        if not getattr(context, changed_attr, False):
+            # Declaration unchanged in this PR — skip (fail open).
+            continue
+
+        declared_tag = dependencies.get(yaml_field)
+        if not declared_tag:
+            # Declaration advanced to null/removed — not P-023's concern
+            # (schema or P-009 semantics handle this).
+            continue
+
+        exists = getattr(context, exists_attr, None)
+
+        if exists is False:
+            findings.append(
+                make_finding(
+                    engine_rule="check-declared-dependency-tags-exist",
+                    level="error",
+                    message=(
+                        f"Declared {display_name} tag '{declared_tag}' "
+                        f"does not exist in {source_repo}. Verify the "
+                        f"tag name or publish it before advancing the "
+                        f"dependency."
+                    ),
+                    path=_RELEASE_PLAN_PATH,
+                    line=1,
+                )
+            )
+        elif exists is None:
+            findings.append(
+                make_finding(
+                    engine_rule="check-declared-dependency-tags-exist",
+                    level="warn",
+                    message=(
+                        f"Could not verify that {display_name} tag "
+                        f"'{declared_tag}' exists in {source_repo} "
+                        f"(GitHub API lookup unavailable). Re-run the "
+                        f"workflow to retry, or confirm the tag manually."
+                    ),
+                    path=_RELEASE_PLAN_PATH,
+                    line=1,
+                )
+            )
+
+    return findings
diff --git a/validation/orchestrator.py b/validation/orchestrator.py
index abc67d64..c81f9d5e 100644
--- a/validation/orchestrator.py
+++ b/validation/orchestrator.py
@@ -88,6 +88,14 @@ class OrchestratorArgs:
     commit_sha: str
     commonalities_version: Optional[str]
 
+    # Release-plan validation context (Step 6b outputs from validation.yml)
+    release_plan_check_only: bool
+    commonalities_release_changed: bool
+    icm_release_changed: bool
+    commonalities_tag_exists: Optional[bool]
+    icm_tag_exists: Optional[bool]
+    non_release_plan_files_changed: Tuple[str, ...]
+
 
 def _env(name: str, default: str = "") -> str:
     """Read a VALIDATION_* environment variable."""
@@ -115,6 +123,32 @@ def _env_optional_bool(name: str) -> Optional[bool]:
     return None
 
 
+def _env_bool(name: str, default: bool = False) -> bool:
+    """Read an env var as a bool with an explicit default."""
+    raw = _env(name).lower()
+    if raw in ("true", "1", "yes"):
+        return True
+    if raw in ("false", "0", "no"):
+        return False
+    return default
+
+
+def _env_json_list(name: str) -> Tuple[str, ...]:
+    """Read an env var as a JSON array of strings.  Returns empty tuple
+    on missing/invalid input.
+    """
+    raw = _env(name)
+    if not raw:
+        return ()
+    try:
+        value = json.loads(raw)
+    except json.JSONDecodeError:
+        return ()
+    if not isinstance(value, list):
+        return ()
+    return tuple(str(v) for v in value)
+
+
 def parse_args() -> OrchestratorArgs:
     """Parse all inputs from VALIDATION_* environment variables."""
     return OrchestratorArgs(
@@ -134,6 +168,18 @@ def parse_args() -> OrchestratorArgs:
         tooling_ref=_env("TOOLING_REF"),
         commit_sha=_env("COMMIT_SHA"),
         commonalities_version=_env("COMMONALITIES_VERSION") or None,
+        release_plan_check_only=_env_bool("RELEASE_PLAN_CHECK_ONLY"),
+        commonalities_release_changed=_env_bool(
+            "COMMONALITIES_RELEASE_CHANGED"
+        ),
+        icm_release_changed=_env_bool("ICM_RELEASE_CHANGED"),
+        commonalities_tag_exists=_env_optional_bool(
+            "COMMONALITIES_TAG_EXISTS"
+        ),
+        icm_tag_exists=_env_optional_bool("ICM_TAG_EXISTS"),
+        non_release_plan_files_changed=_env_json_list(
+            "NON_RELEASE_PLAN_FILES_CHANGED"
+        ),
     )
 
 
@@ -200,7 +246,19 @@ def run_engines(
     all_findings: List[dict] = []
     engine_statuses: Dict[str, str] = {}
 
+    # When release_plan_check_only is true, a Commonalities dependency
+    # declaration advanced in this PR.  The code/common/ cache and API
+    # spec content on disk are still tied to the previous tag — running
+    # Spectral with the new ruleset or gherkin-lint against those files
+    # produces misleading findings (DEC-029 exclusivity principle).
+    # Skip those engines entirely; Python engine still runs but its
+    # post-filter keeps only rules gated on release_plan_changed=true.
+    skip_context_dependent = bool(
+        getattr(context, "release_plan_check_only", False)
+    )
+
     # --- yamllint ---
+    # yamllint is structural (syntax/formatting) — always safe to run.
     try:
         yamllint_config = paths.linting_config_dir / ".yamllint.yaml"
         findings = run_yamllint_engine(
@@ -215,21 +273,29 @@ def run_engines(
         logger.error("yamllint failed: %s", exc)
 
     # --- Spectral ---
-    try:
-        commonalities_release = getattr(context, "commonalities_release", None)
-        findings = run_spectral_engine(
-            repo_path=repo_path,
-            config_dir=paths.linting_config_dir,
-            commonalities_release=commonalities_release,
-        )
-        all_findings.extend(findings)
-        engine_statuses["spectral"] = f"{len(findings)} finding(s)"
-        logger.info("Spectral: %d finding(s)", len(findings))
-    except Exception as exc:
-        engine_statuses["spectral"] = f"error: {exc}"
-        logger.error("Spectral failed: %s", exc)
+    if skip_context_dependent:
+        engine_statuses["spectral"] = "skipped (release-plan-check-only mode)"
+        logger.info("Spectral: skipped (release-plan-check-only mode)")
+    else:
+        try:
+            commonalities_release = getattr(
+                context, "commonalities_release", None
+            )
+            findings = run_spectral_engine(
+                repo_path=repo_path,
+                config_dir=paths.linting_config_dir,
+                commonalities_release=commonalities_release,
+            )
+            all_findings.extend(findings)
+            engine_statuses["spectral"] = f"{len(findings)} finding(s)"
+            logger.info("Spectral: %d finding(s)", len(findings))
+        except Exception as exc:
+            engine_statuses["spectral"] = f"error: {exc}"
+            logger.error("Spectral failed: %s", exc)
 
     # --- Python checks ---
+    # Python engine always runs; post-filter drops non-release-plan rules
+    # when release_plan_check_only is true.
     try:
         findings = run_python_engine(
             repo_path=repo_path,
@@ -243,7 +309,10 @@ def run_engines(
         logger.error("Python checks failed: %s", exc)
 
     # --- gherkin-lint ---
-    if not test_files:
+    if skip_context_dependent:
+        engine_statuses["gherkin"] = "skipped (release-plan-check-only mode)"
+        logger.info("gherkin-lint: skipped (release-plan-check-only mode)")
+    elif not test_files:
         engine_statuses["gherkin"] = "skipped (no test files)"
         logger.info("gherkin-lint: skipped (no test files)")
     else:
@@ -432,6 +501,12 @@ def main() -> int:
         workflow_run_url=args.workflow_run_url,
         tooling_ref=args.tooling_ref,
         commonalities_version=args.commonalities_version,
+        release_plan_check_only=args.release_plan_check_only,
+        commonalities_release_changed=args.commonalities_release_changed,
+        icm_release_changed=args.icm_release_changed,
+        commonalities_tag_exists=args.commonalities_tag_exists,
+        icm_tag_exists=args.icm_tag_exists,
+        non_release_plan_files_changed=args.non_release_plan_files_changed,
     )
     logger.info(
         "Context: branch=%s trigger=%s profile=%s release_review=%s apis=%d",
diff --git a/validation/postfilter/engine.py b/validation/postfilter/engine.py
index 6325b5c6..a1fe3b1e 100644
--- a/validation/postfilter/engine.py
+++ b/validation/postfilter/engine.py
@@ -263,6 +263,21 @@ def run_post_filter(
             if not is_applicable(rule.applicability, context, api_ctx):
                 continue
 
+            # Release-plan-check-only gate — when a Commonalities dependency
+            # declaration has advanced in this PR, the code/common/ cache
+            # and on-disk content are stale relative to the declared tag.
+            # Running version-context-dependent rules against that stale
+            # content produces misleading findings (DEC-029 exclusivity
+            # principle).  Only rules that explicitly gate on
+            # release_plan_changed: true survive — those are release-plan
+            # validation rules (P-009, P-022, P-023) which check the
+            # release-plan.yaml content itself, not the consumption side.
+            if (
+                context.release_plan_check_only
+                and rule.applicability.get("release_plan_changed") is not True
+            ):
+                continue
+
             # Conditional level resolution (skip for identity-only entries)
             if rule.conditional_level is not None:
                 resolved_level = resolve_level(rule, context, api_ctx)
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 62a2dda4..1a184451 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -253,3 +253,33 @@
   hint: >-
     Merge the auto-created sync PR or trigger the release automation
     workflow manually (workflow_dispatch) to update code/common/ files.
+
+# P-022: check-release-plan-exclusivity
+# release-plan.yaml changes must be in their own PR — any co-changed
+# files are reported so that any new validation errors stay clearly
+# attributable to the release-plan change rather than masked by
+# unrelated code changes in the same PR.
+- id: P-022
+  engine: python
+  engine_rule: check-release-plan-exclusivity
+  applicability:
+    release_plan_changed: true
+  conditional_level:
+    default: error
+  hint: >-
+    Split this PR: keep release-plan.yaml changes in a dedicated PR,
+    and move the other listed files to a separate PR.
+
+# P-023: check-declared-dependency-tags-exist
+# When a dependency declaration (commonalities_release or icm_release)
+# advances in this PR, verify the declared tag exists in the source
+# repository.  Confirmed missing → error; API lookup failure → warn.
+# When the declaration is unchanged in the diff, the check is skipped
+# (pre-existing state is not this PR's responsibility).
+- id: P-023
+  engine: python
+  engine_rule: check-declared-dependency-tags-exist
+  applicability:
+    release_plan_changed: true
+  conditional_level:
+    default: error
diff --git a/validation/tests/test_context_builder.py b/validation/tests/test_context_builder.py
index 71cd5fe3..06eea8ff 100644
--- a/validation/tests/test_context_builder.py
+++ b/validation/tests/test_context_builder.py
@@ -239,6 +239,11 @@ def test_all_keys_present(self, sample_context):
             "commonalities_version", "icm_release",
             "base_ref", "is_release_review_pr", "release_plan_changed",
             "pr_number", "apis", "workflow_run_url", "tooling_ref",
+            # Release-plan validation context (Step 6b outputs)
+            "commonalities_release_changed", "icm_release_changed",
+            "release_plan_check_only",
+            "commonalities_tag_exists", "icm_tag_exists",
+            "non_release_plan_files_changed",
         }
         assert set(d.keys()) == expected_keys
 
diff --git a/validation/tests/test_orchestrator.py b/validation/tests/test_orchestrator.py
index 6851ad2a..b5157f59 100644
--- a/validation/tests/test_orchestrator.py
+++ b/validation/tests/test_orchestrator.py
@@ -101,6 +101,12 @@ def _make_context(**overrides):
         "apis": (),
         "workflow_run_url": "https://github.com/example/runs/1",
         "tooling_ref": "abc123",
+        "release_plan_check_only": False,
+        "commonalities_release_changed": False,
+        "icm_release_changed": False,
+        "commonalities_tag_exists": None,
+        "icm_tag_exists": None,
+        "non_release_plan_files_changed": (),
     }
     defaults.update(overrides)
     ctx = MagicMock()
@@ -338,6 +344,57 @@ def test_engine_exception_captured(
         assert "error:" in statuses["yamllint"]
         assert "finding(s)" in statuses["spectral"]
 
+    @patch("validation.orchestrator.run_gherkin_engine")
+    @patch("validation.orchestrator.run_python_engine")
+    @patch("validation.orchestrator.run_spectral_engine")
+    @patch("validation.orchestrator.run_yamllint_engine")
+    def test_release_plan_check_only_skips_spectral_and_gherkin(
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+    ):
+        """On a Commonalities advance (release_plan_check_only=True):
+        yamllint + Python still run; Spectral and gherkin are skipped
+        with explicit status messages.
+        """
+        mock_yamllint.return_value = []
+        mock_python.return_value = []
+        context = _make_context(release_plan_check_only=True)
+
+        findings, statuses = run_engines(
+            Path("/repo"), paths, context,
+            test_files=[Path("some.feature")],
+        )
+
+        assert mock_yamllint.called
+        assert mock_python.called
+        assert not mock_spectral.called
+        assert not mock_gherkin.called
+        assert "release-plan-check-only" in statuses["spectral"]
+        assert "release-plan-check-only" in statuses["gherkin"]
+
+    @patch("validation.orchestrator.run_gherkin_engine")
+    @patch("validation.orchestrator.run_python_engine")
+    @patch("validation.orchestrator.run_spectral_engine")
+    @patch("validation.orchestrator.run_yamllint_engine")
+    def test_release_plan_check_only_false_runs_all_engines(
+        self, mock_yamllint, mock_spectral, mock_python, mock_gherkin, paths
+    ):
+        """Default context (release_plan_check_only=False) runs all engines."""
+        mock_yamllint.return_value = []
+        mock_spectral.return_value = []
+        mock_python.return_value = []
+        mock_gherkin.return_value = []
+        context = _make_context(release_plan_check_only=False)
+
+        findings, statuses = run_engines(
+            Path("/repo"), paths, context,
+            test_files=[Path("some.feature")],
+        )
+
+        assert mock_yamllint.called
+        assert mock_spectral.called
+        assert mock_python.called
+        assert mock_gherkin.called
+
 
 # ---------------------------------------------------------------------------
 # TestWriteOutputs
diff --git a/validation/tests/test_postfilter_engine.py b/validation/tests/test_postfilter_engine.py
index 6045d751..9b01ff32 100644
--- a/validation/tests/test_postfilter_engine.py
+++ b/validation/tests/test_postfilter_engine.py
@@ -32,6 +32,8 @@ def _make_context(
     commonalities_release: str | None = "r4.1",
     is_release_review_pr: bool = False,
     apis: tuple[ApiContext, ...] = (),
+    release_plan_changed: bool | None = None,
+    release_plan_check_only: bool = False,
 ) -> ValidationContext:
     return ValidationContext(
         repository="TestRepo",
@@ -45,11 +47,12 @@ def _make_context(
         icm_release=None,
         base_ref=None,
         is_release_review_pr=is_release_review_pr,
-        release_plan_changed=None,
+        release_plan_changed=release_plan_changed,
         pr_number=None,
         apis=apis,
         workflow_run_url="",
         tooling_ref="",
+        release_plan_check_only=release_plan_check_only,
     )
 
 
@@ -282,6 +285,74 @@ def test_applicability_filters_finding(self, tmp_path: Path):
         assert result.findings == []
         assert result.result == "pass"
 
+    def test_release_plan_check_only_keeps_release_plan_gated_rules(
+        self, tmp_path: Path
+    ):
+        """On a Commonalities advance, rules gated on release_plan_changed=true pass through."""
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                applicability={"release_plan_changed": True},
+            )
+        ])
+        ctx = _make_context(
+            release_plan_changed=True,
+            release_plan_check_only=True,
+        )
+        findings = [_make_finding()]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert len(result.findings) == 1
+
+    def test_release_plan_check_only_drops_rules_without_release_plan_gate(
+        self, tmp_path: Path
+    ):
+        """On a Commonalities advance, rules without release_plan_changed applicability are dropped."""
+        _write_rules(tmp_path, [
+            _minimal_rule(engine_rule="some-rule")  # no applicability
+        ])
+        ctx = _make_context(
+            release_plan_changed=True,
+            release_plan_check_only=True,
+        )
+        findings = [_make_finding()]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.findings == []
+
+    def test_release_plan_check_only_drops_false_gated_rules(
+        self, tmp_path: Path
+    ):
+        """Rules gated on release_plan_changed=false would already fail the
+        applicability check (release_plan_changed is true under
+        release_plan_check_only).  Verifies the gate doesn't accidentally
+        keep them.
+        """
+        _write_rules(tmp_path, [
+            _minimal_rule(
+                engine_rule="some-rule",
+                applicability={"release_plan_changed": False},
+            )
+        ])
+        ctx = _make_context(
+            release_plan_changed=True,
+            release_plan_check_only=True,
+        )
+        findings = [_make_finding()]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert result.findings == []
+
+    def test_release_plan_check_only_false_does_not_filter(
+        self, tmp_path: Path
+    ):
+        """When release_plan_check_only is false (normal PR), rules without
+        a release_plan_changed applicability should run normally."""
+        _write_rules(tmp_path, [
+            _minimal_rule(engine_rule="some-rule")
+        ])
+        ctx = _make_context(release_plan_check_only=False)
+        findings = [_make_finding()]
+        result = run_post_filter(findings, ctx, tmp_path)
+        assert len(result.findings) == 1
+
     def test_level_muted_removes_finding(self, tmp_path: Path):
         """Level resolved to 'muted' removes the finding."""
         _write_rules(tmp_path, [
diff --git a/validation/tests/test_python_checks_release_plan.py b/validation/tests/test_python_checks_release_plan.py
index 0342f0c7..d9a63093 100644
--- a/validation/tests/test_python_checks_release_plan.py
+++ b/validation/tests/test_python_checks_release_plan.py
@@ -13,7 +13,9 @@
     _check_file_existence,
     _check_release_type_consistency,
     _check_track_consistency,
+    check_declared_dependency_tags_exist,
     check_orphan_api_definitions,
+    check_release_plan_exclusivity,
     check_release_plan_semantics,
 )
 
@@ -312,3 +314,229 @@ def test_non_yaml_files_ignored(self, tmp_path: Path):
         (api_dir / "README.md").touch()
         findings = check_orphan_api_definitions(tmp_path, _make_context())
         assert findings == []
+
+
+# ---------------------------------------------------------------------------
+# TestCheckReleasePlanExclusivity (P-022)
+# ---------------------------------------------------------------------------
+
+
+def _context_with_other_files(*files: str) -> ValidationContext:
+    """Build a context with a populated non_release_plan_files_changed."""
+    base = _make_context()
+    return ValidationContext(
+        repository=base.repository,
+        branch_type=base.branch_type,
+        trigger_type=base.trigger_type,
+        profile=base.profile,
+        stage=base.stage,
+        target_release_type=base.target_release_type,
+        commonalities_release=base.commonalities_release,
+        commonalities_version=base.commonalities_version,
+        icm_release=base.icm_release,
+        base_ref=base.base_ref,
+        is_release_review_pr=base.is_release_review_pr,
+        release_plan_changed=True,
+        pr_number=base.pr_number,
+        apis=base.apis,
+        workflow_run_url=base.workflow_run_url,
+        tooling_ref=base.tooling_ref,
+        non_release_plan_files_changed=tuple(files),
+    )
+
+
+class TestCheckReleasePlanExclusivity:
+    def test_no_other_files(self, tmp_path: Path):
+        context = _context_with_other_files()
+        assert check_release_plan_exclusivity(tmp_path, context) == []
+
+    def test_single_other_file(self, tmp_path: Path):
+        context = _context_with_other_files("code/API_definitions/qod.yaml")
+        findings = check_release_plan_exclusivity(tmp_path, context)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert findings[0]["engine_rule"] == "check-release-plan-exclusivity"
+        assert findings[0]["path"] == "release-plan.yaml"
+        assert "code/API_definitions/qod.yaml" in findings[0]["message"]
+        assert "1 other file" in findings[0]["message"]
+
+    def test_multiple_other_files(self, tmp_path: Path):
+        files = [
+            "code/API_definitions/qod.yaml",
+            "code/Test_definitions/qod.feature",
+            "CHANGELOG.md",
+        ]
+        context = _context_with_other_files(*files)
+        findings = check_release_plan_exclusivity(tmp_path, context)
+        assert len(findings) == 1
+        assert "3 other file" in findings[0]["message"]
+        for f in files:
+            assert f in findings[0]["message"]
+
+    def test_preview_truncation_over_ten_files(self, tmp_path: Path):
+        files = [f"file-{i}.yaml" for i in range(15)]
+        context = _context_with_other_files(*files)
+        findings = check_release_plan_exclusivity(tmp_path, context)
+        assert len(findings) == 1
+        msg = findings[0]["message"]
+        # First 10 files listed, remaining count summarised
+        for f in files[:10]:
+            assert f in msg
+        assert "and 5 more" in msg
+
+    def test_default_context_has_no_other_files(self, tmp_path: Path):
+        # Ensures _make_context() default does not trigger the rule.
+        assert check_release_plan_exclusivity(tmp_path, _make_context()) == []
+
+
+# ---------------------------------------------------------------------------
+# TestCheckDeclaredDependencyTagsExist (P-023)
+# ---------------------------------------------------------------------------
+
+
+def _context_with_dependency_changes(
+    *,
+    commonalities_release_changed: bool = False,
+    icm_release_changed: bool = False,
+    commonalities_tag_exists: bool | None = None,
+    icm_tag_exists: bool | None = None,
+) -> ValidationContext:
+    base = _make_context()
+    return ValidationContext(
+        repository=base.repository,
+        branch_type=base.branch_type,
+        trigger_type=base.trigger_type,
+        profile=base.profile,
+        stage=base.stage,
+        target_release_type=base.target_release_type,
+        commonalities_release=base.commonalities_release,
+        commonalities_version=base.commonalities_version,
+        icm_release=base.icm_release,
+        base_ref=base.base_ref,
+        is_release_review_pr=base.is_release_review_pr,
+        release_plan_changed=True,
+        pr_number=base.pr_number,
+        apis=base.apis,
+        workflow_run_url=base.workflow_run_url,
+        tooling_ref=base.tooling_ref,
+        commonalities_release_changed=commonalities_release_changed,
+        icm_release_changed=icm_release_changed,
+        commonalities_tag_exists=commonalities_tag_exists,
+        icm_tag_exists=icm_tag_exists,
+    )
+
+
+def _write_release_plan_with_dependencies(
+    tmp_path: Path,
+    commonalities: str | None = "r4.2",
+    icm: str | None = "r2.3",
+) -> None:
+    plan = _make_plan()
+    deps: dict = {}
+    if commonalities is not None:
+        deps["commonalities_release"] = commonalities
+    if icm is not None:
+        # Schema field name (not the shorter context attribute 'icm_release').
+        deps["identity_consent_management_release"] = icm
+    plan["dependencies"] = deps
+    _write_release_plan(tmp_path, plan)
+
+
+class TestCheckDeclaredDependencyTagsExist:
+    def test_no_release_plan(self, tmp_path: Path):
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=True,
+            commonalities_tag_exists=False,
+        )
+        assert check_declared_dependency_tags_exist(tmp_path, context) == []
+
+    def test_no_dependency_changed(self, tmp_path: Path):
+        _write_release_plan_with_dependencies(tmp_path)
+        # Default context: no *_release_changed flags set
+        assert check_declared_dependency_tags_exist(tmp_path, _make_context()) == []
+
+    def test_commonalities_changed_tag_exists(self, tmp_path: Path):
+        _write_release_plan_with_dependencies(tmp_path)
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=True,
+            commonalities_tag_exists=True,
+        )
+        assert check_declared_dependency_tags_exist(tmp_path, context) == []
+
+    def test_commonalities_changed_tag_missing(self, tmp_path: Path):
+        _write_release_plan_with_dependencies(tmp_path, commonalities="r9.9")
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=True,
+            commonalities_tag_exists=False,
+        )
+        findings = check_declared_dependency_tags_exist(tmp_path, context)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert findings[0]["engine_rule"] == "check-declared-dependency-tags-exist"
+        assert "r9.9" in findings[0]["message"]
+        assert "camaraproject/Commonalities" in findings[0]["message"]
+
+    def test_commonalities_changed_lookup_failed(self, tmp_path: Path):
+        _write_release_plan_with_dependencies(tmp_path, commonalities="r4.2")
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=True,
+            commonalities_tag_exists=None,
+        )
+        findings = check_declared_dependency_tags_exist(tmp_path, context)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "warn"
+        assert "r4.2" in findings[0]["message"]
+        assert "Could not verify" in findings[0]["message"]
+
+    def test_commonalities_changed_declaration_removed(self, tmp_path: Path):
+        # Dependency declaration was advanced to null — not P-023's
+        # concern (P-009 / schema handles this).
+        _write_release_plan_with_dependencies(tmp_path, commonalities=None)
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=True,
+            commonalities_tag_exists=None,
+        )
+        assert check_declared_dependency_tags_exist(tmp_path, context) == []
+
+    def test_icm_changed_tag_missing(self, tmp_path: Path):
+        _write_release_plan_with_dependencies(tmp_path, icm="r9.9")
+        context = _context_with_dependency_changes(
+            icm_release_changed=True,
+            icm_tag_exists=False,
+        )
+        findings = check_declared_dependency_tags_exist(tmp_path, context)
+        assert len(findings) == 1
+        assert findings[0]["level"] == "error"
+        assert "r9.9" in findings[0]["message"]
+        assert "camaraproject/IdentityAndConsentManagement" in findings[0]["message"]
+
+    def test_both_changed_both_missing(self, tmp_path: Path):
+        _write_release_plan_with_dependencies(
+            tmp_path, commonalities="r9.9", icm="r9.9"
+        )
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=True,
+            commonalities_tag_exists=False,
+            icm_release_changed=True,
+            icm_tag_exists=False,
+        )
+        findings = check_declared_dependency_tags_exist(tmp_path, context)
+        assert len(findings) == 2
+        messages = "\n".join(f["message"] for f in findings)
+        assert "commonalities_release" in messages
+        assert "icm_release" in messages
+
+    def test_icm_changed_commonalities_unchanged(self, tmp_path: Path):
+        # ICM-only advance: commonalities_release unchanged, icm_release changed.
+        # Only ICM tag checked.
+        _write_release_plan_with_dependencies(
+            tmp_path, commonalities="r4.2", icm="r9.9"
+        )
+        context = _context_with_dependency_changes(
+            commonalities_release_changed=False,
+            icm_release_changed=True,
+            icm_tag_exists=False,
+        )
+        findings = check_declared_dependency_tags_exist(tmp_path, context)
+        assert len(findings) == 1
+        assert "icm_release" in findings[0]["message"]
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index 69c9f1cd..f305ea6c 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -77,7 +77,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 21
+        assert counts["python"] == 23
         assert counts["spectral"] == 84
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13
@@ -306,8 +306,8 @@ def test_hints_are_exception_not_norm(self, all_rules):
         """
         with_hints = [r.id for r in all_rules if r.hint is not None]
         with_overrides = [r.id for r in all_rules if r.message_override is not None]
-        assert len(with_hints) == 14, (
-            f"Expected 14 explicit hints (update test if adding hints): "
+        assert len(with_hints) == 15, (
+            f"Expected 15 explicit hints (update test if adding hints): "
             f"{with_hints}"
         )
         assert len(with_overrides) == 0, (

From ac739248ea40f26bf8824acb864bc4890610f8fd Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Tue, 21 Apr 2026 22:18:17 +0200
Subject: [PATCH 112/157] fix(release-automation): regenerate TOC correctly for
 legacy suffixed headings

The CHANGELOG TOC regenerator missed release-tag headings carrying
trailing descriptor text (e.g. `# r2.2 - Fall25 public release`,
`# r0.9.3 - rc`) and emitted anchors built from only the captured
tag, which does not match GitHub's rendered anchor when the heading
has additional text.

Fix in `release_automation/scripts/changelog_generator.py`:

- `_extract_toc_entries` regex relaxed from `^# (r\d+\.\d+)\s*$` to
  `^#\s+r\d+\.\d+\b`, and the entry now carries the **full heading
  text** (everything after `# `) rather than just the captured tag.
- `_format_toc` uses that full heading text for both the TOC link
  label and anchor generation, so link labels mirror the underlying
  heading and anchors match what GitHub actually renders.

Effect: TOC for repos on the DEC-033 flat-mode fallback path (SED
r2.3 maintenance release scenario) contains accurate entries for
every legacy release section, including three-part pre-standardization
tags like `r0.9.3 - rc`. Verified end-to-end on the SED fork.

7 new tests cover: suffixed-heading extraction, mixed plain/suffixed
extraction, three-part legacy tag capture, link label using full
heading text, three-part tag preservation in rendered TOC, and an
end-to-end SED-style scenario. Full suite: 1451 / 1451 passing.
---
 .../scripts/changelog_generator.py            |  20 +++-
 .../tests/test_changelog_generator.py         | 108 ++++++++++++++++++
 2 files changed, 122 insertions(+), 6 deletions(-)

diff --git a/release_automation/scripts/changelog_generator.py b/release_automation/scripts/changelog_generator.py
index 8a24be85..e7743dd2 100644
--- a/release_automation/scripts/changelog_generator.py
+++ b/release_automation/scripts/changelog_generator.py
@@ -345,9 +345,15 @@ def _heading_to_anchor(heading_text: str) -> str:
     def _extract_toc_entries(content: str) -> List[Dict[str, Any]]:
         """Extract TOC entries from file content by scanning level-1 headings.
 
-        Finds all ``# rX.Y`` headings and determines if the release is a
-        public release (bold in TOC) by checking the "This {type} contains"
-        line in the few lines following the heading.
+        Finds all ``# rX.Y`` release-tag headings (optionally followed by
+        trailing descriptor text, e.g. ``# r2.2 - Fall25 public release``)
+        and determines if the release is a public release (bold in TOC)
+        by checking the "This {type} contains" line in the few lines
+        following the heading.
+
+        ``heading`` is the full heading text after ``# `` — used both as
+        the TOC link label and as the anchor source so link text and
+        anchor track what the underlying heading actually says.
 
         Returns:
             List of dicts ordered by appearance (newest first):
@@ -356,10 +362,9 @@ def _extract_toc_entries(content: str) -> List[Dict[str, Any]]:
         lines = content.split("\n")
         entries: List[Dict[str, Any]] = []
         for i, line in enumerate(lines):
-            match = re.match(r"^# (r\d+\.\d+)\s*$", line)
-            if not match:
+            if not re.match(r"^#\s+r\d+\.\d+\b", line):
                 continue
-            heading = match.group(1)
+            heading = line.lstrip("#").strip()
             is_public = False
             for j in range(i + 1, min(i + 6, len(lines))):
                 if re.search(
@@ -376,6 +381,9 @@ def _format_toc(entries: List[Dict[str, Any]]) -> str:
         """Format TOC entries into markdown.
 
         Public/maintenance releases are bold, pre-releases are plain.
+        Link text and anchor both derive from the full heading text so
+        the TOC faithfully mirrors each heading and the anchor matches
+        GitHub's rendered anchor for that heading.
 
         Returns:
             TOC section string including ``## Table of Contents`` heading,
diff --git a/release_automation/tests/test_changelog_generator.py b/release_automation/tests/test_changelog_generator.py
index 2410532f..d7664929 100644
--- a/release_automation/tests/test_changelog_generator.py
+++ b/release_automation/tests/test_changelog_generator.py
@@ -521,6 +521,51 @@ def test_flat_write_updates_toc_idempotently(self, generator, tmp_path):
         assert "[r2.4](#r24)" in result
         assert "[r2.3](#r23)" in result
 
+    def test_flat_write_end_to_end_sed_style_headings(self, generator, tmp_path):
+        """End-to-end: a repo with SED-style suffixed headings
+        (``# r2.2 - Fall25 public release``) gets a new maintenance
+        section prepended. The regenerated TOC lists every legacy
+        heading with the anchor GitHub actually renders for the full
+        heading text, not the short-tag anchor."""
+        # Simulate SimpleEdgeDiscovery's legacy CHANGELOG.md: mixed heading
+        # styles, one with a trailing descriptor, one without.
+        legacy = (
+            "# Changelog Simple Edge Discovery\n\n"
+            "# r2.2 - Fall25 public release\n\n"
+            "This public release contains the definition and documentation of\n"
+            "* simple-edge-discovery v2.0.0\n\n"
+            "# r2.1 - rc\n\n"
+            "This pre-release contains the definition\n\n"
+            "# r1.3\n\n"
+            "This public release contains the definition\n"
+        )
+        (tmp_path / "CHANGELOG.md").write_text(legacy)
+
+        new_section = (
+            "# r2.3\n\n## Release Notes\n\n"
+            "This maintenance release contains patches\n"
+        )
+        generator.write_changelog(
+            str(tmp_path),
+            new_section,
+            "r2.3",
+            "SimpleEdgeDiscovery",
+            release_type="maintenance-release",
+        )
+
+        result = (tmp_path / "CHANGELOG.md").read_text()
+
+        # New section lands before the first legacy section
+        assert result.index("# r2.3") < result.index("# r2.2 - Fall25")
+
+        # TOC contains an entry for every release-tag heading, with
+        # link text and anchor both derived from the full heading text
+        # so anchors match GitHub's rendering and link labels match the
+        # underlying headings verbatim.
+        assert "[r2.3](#r23)" in result
+        assert "[r2.2 - Fall25 public release](#r22---fall25-public-release)" in result
+        assert "[r2.1 - rc](#r21---rc)" in result
+        assert "[r1.3](#r13)" in result
 
 # --- Table of Contents ---
 
@@ -577,6 +622,49 @@ def test_extract_entries_pre_release_not_public(self):
         entries = ChangelogGenerator._extract_toc_entries(content)
         assert entries[0]["is_public"] is False
 
+    def test_extract_entries_matches_heading_with_suffix(self):
+        """Legacy headings may carry trailing descriptor text such as
+        ``# r2.2 - Fall25 public release``. The full heading text is
+        captured so both link label and anchor track what the heading
+        actually says."""
+        content = (
+            "# r2.2 - Fall25 public release\n\n"
+            "## Release Notes\n\n"
+            "This public release contains the definition\n"
+        )
+        entries = ChangelogGenerator._extract_toc_entries(content)
+        assert len(entries) == 1
+        assert entries[0]["heading"] == "r2.2 - Fall25 public release"
+        assert entries[0]["is_public"] is True
+
+    def test_extract_entries_mixed_plain_and_suffixed(self):
+        """Mixed heading styles each get their full heading text in
+        the entry's ``heading`` field."""
+        content = (
+            "# r2.2 - Fall25 public release\n\n"
+            "This public release contains the definition\n\n"
+            "# r2.1 - rc\n\n"
+            "This pre-release contains the definition\n\n"
+            "# r1.3\n\n"
+            "This public release contains the definition\n"
+        )
+        entries = ChangelogGenerator._extract_toc_entries(content)
+        assert [e["heading"] for e in entries] == [
+            "r2.2 - Fall25 public release",
+            "r2.1 - rc",
+            "r1.3",
+        ]
+
+    def test_extract_entries_matches_three_part_legacy_tag(self):
+        """Pre-standardization repos have three-part tags like
+        ``# r0.9.3 - rc``. The full heading text is captured verbatim."""
+        content = (
+            "# r0.9.3 - rc\n\n"
+            "This pre-release contains the definition\n"
+        )
+        entries = ChangelogGenerator._extract_toc_entries(content)
+        assert entries[0]["heading"] == "r0.9.3 - rc"
+
     # --- TOC formatting ---
 
     def test_format_toc_empty_entries(self):
@@ -598,6 +686,26 @@ def test_format_toc_mixed_entries_order(self):
         assert "- [r4.1](#r41)" in result
         assert result.index("r4.2") < result.index("r4.1")
 
+    def test_format_toc_suffixed_heading_uses_full_text(self):
+        """Legacy suffixed headings render with both link text and anchor
+        derived from the full heading text — matching GitHub's rendered
+        anchor for that heading."""
+        entries = [
+            {"heading": "r2.2 - Fall25 public release", "is_public": True}
+        ]
+        result = ChangelogGenerator._format_toc(entries)
+        assert (
+            "- **[r2.2 - Fall25 public release](#r22---fall25-public-release)**"
+            in result
+        )
+
+    def test_format_toc_three_part_tag_preserved(self):
+        """Three-part legacy tags keep the full tag in both link text and
+        anchor, so SED-style ``r0.9.3 - rc`` renders faithfully."""
+        entries = [{"heading": "r0.9.3 - rc", "is_public": False}]
+        result = ChangelogGenerator._format_toc(entries)
+        assert "- [r0.9.3 - rc](#r093---rc)" in result
+
     # --- File integration ---
 
     def test_new_file_contains_toc(self, generator, tmp_path):

From b7caef2459d92c230dac3211a6146af9b2e81008 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 10:02:12 +0200
Subject: [PATCH 113/157] fix(validation): gate P-020 off Release Review PRs

P-020 (check-cloudevent-via-ref) fires on bundled API definitions in
Release Review PRs, where inline CloudEvent is the expected output of
bundling rather than drift. Observed on ReleaseTest #88.

Add is_release_review_pr: false to P-020 applicability so the post-filter
skips the rule on Release Review PRs. Source-file drift detection is
unaffected.

Fixes #199
---
 validation/rules/python-rules.yaml | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 1a184451..60a81e25 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -218,12 +218,15 @@
 # `properties`. The $ref-only and `allOf: [{$ref: ...}]` forms have no
 # top-level `properties` and are not flagged.
 # Gated to Commonalities >=r4.2, where CAMARA_event_common.yaml exists.
+# Skipped on Release Review PRs, where inline CloudEvent is the expected
+# output of bundling rather than drift.
 - id: P-020
   engine: python
   engine_rule: check-cloudevent-via-ref
   applicability:
     api_pattern: [explicit-subscription, implicit-subscription]
     commonalities_release: ">=r4.2"
+    is_release_review_pr: false
   conditional_level:
     default: warn
   hint: >-

From 93d38bc80932e66312647418794aae7122552c9c Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 16:38:33 +0200
Subject: [PATCH 114/157] fix(workflows): clean PR body markdown and tighten
 slash-command matching

- Sync PR bodies (post-release and Commonalities common-sync) are
  now assembled via heredoc into a PR_BODY variable, so YAML
  run-block indentation no longer leaks into rendered markdown.
  Previously the 10-space common indent surfaced as 4+ leading
  spaces on continuation lines, which GitHub rendered as code
  blocks instead of paragraphs, bullets, and headings.

- Slash-command detection now uses anchored word-boundary regex
  (/^/cmd(?:\s|$)/) instead of body.startsWith(). Stops
  /create-snapshotextra and similar from matching /create-snapshot.
---
 .../workflows/release-automation-reusable.yml | 55 ++++++++++++-------
 1 file changed, 34 insertions(+), 21 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index 3566c550..c9bef433 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -106,31 +106,33 @@ jobs:
                 return;
               }
 
-              // Parse slash commands
-              if (body.startsWith('/create-snapshot')) {
+              // Parse slash commands. Each pattern anchors at start and
+              // requires either whitespace or end-of-string after the
+              // command so /create-snapshotextra does not match /create-snapshot.
+              if (/^\/create-snapshot(?:\s|$)/.test(body)) {
                 triggerType = 'slash_command';
                 command = 'create-snapshot';
-                commandArgs = body.replace('/create-snapshot', '').trim();
+                commandArgs = body.replace(/^\/create-snapshot/, '').trim();
                 shouldContinue = 'true';
-              } else if (body.startsWith('/discard-snapshot')) {
+              } else if (/^\/discard-snapshot(?:\s|$)/.test(body)) {
                 triggerType = 'slash_command';
                 command = 'discard-snapshot';
-                commandArgs = body.replace('/discard-snapshot', '').trim();
+                commandArgs = body.replace(/^\/discard-snapshot/, '').trim();
                 shouldContinue = 'true';
-              } else if (body.startsWith('/delete-draft')) {
+              } else if (/^\/delete-draft(?:\s|$)/.test(body)) {
                 triggerType = 'slash_command';
                 command = 'delete-draft';
-                commandArgs = body.replace('/delete-draft', '').trim();
+                commandArgs = body.replace(/^\/delete-draft/, '').trim();
                 shouldContinue = 'true';
-              } else if (body.startsWith('/sync-issue')) {
+              } else if (/^\/sync-issue(?:\s|$)/.test(body)) {
                 triggerType = 'slash_command';
                 command = 'sync-issue';
-                commandArgs = body.replace('/sync-issue', '').trim();
+                commandArgs = body.replace(/^\/sync-issue/, '').trim();
                 shouldContinue = 'true';
-              } else if (body.startsWith('/publish-release')) {
+              } else if (/^\/publish-release(?:\s|$)/.test(body)) {
                 triggerType = 'slash_command';
                 command = 'publish-release';
-                commandArgs = body.replace('/publish-release', '').trim();
+                commandArgs = body.replace(/^\/publish-release/, '').trim();
                 // Parse --confirm  argument
                 const confirmMatch = commandArgs.match(/--confirm\s+(\S+)/);
                 confirmTag = confirmMatch ? confirmMatch[1] : '';
@@ -1686,16 +1688,22 @@ jobs:
           git commit -m "chore: post-release sync for ${RELEASE_TAG}"
           git push origin "$SYNC_BRANCH"
 
-          # Create PR — capture failure as sync_status=failed
-          if PR_URL=$(gh pr create \
-            --title "Release Automation: Post-release sync (${RELEASE_TAG})" \
-            --body "Automated post-release sync PR for ${RELEASE_TAG}.
+          # Create PR — capture failure as sync_status=failed.
+          # Body assembled via heredoc so the markdown has no
+          # YAML-run-block indent leaking into the rendered PR description.
+          PR_BODY=$(cat <
Date: Wed, 22 Apr 2026 17:11:18 +0200
Subject: [PATCH 115/157] fix(rules): accept bare 'wip' in feature files;
 always-error on un-transformable form (fixes #211)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

T1b pattern relaxed from 'vwip' to 'v?wip' so both 'wip' and 'vwip'
are normalized to 'v{api_version}' at snapshot time. Bare 'wip' is a
style variation parallel to '0.1.0' vs 'v0.1.0' in info.version.

P-007 parser (_FEATURE_VERSION_RE) is aligned with T1b's matching
surface: leading separator is now any whitespace so both the
comma-and-space form ('Feature: X, vwip') and the space-only form
('Feature: X vwip') are recognized. This incidentally closes a
latent gap where the comma-only parser under-detected space-only
feature lines.

The 'no version token at all' case (Feature line has no wip, vwip,
or v{version}) is now emitted under a distinct engine_rule
'check-test-file-feature-line-untransformable' mapped to P-024 with
default: error. This prevents the severity from being masked by
P-007's conditional_level (hint/warn) — the failure mode that
masked a Simple Edge Discovery bug at pre-snapshot validation.

P-007 and P-024 are mutually exclusive per the check's code path
(continue after None branch), so no finding cascades.
---
 .../config/transformations.yaml               | 15 +++---
 .../tests/test_mechanical_transformer.py      | 37 +++++++++++--
 .../engines/python_checks/test_checks.py      | 36 ++++++++++---
 validation/rules/python-rules.yaml            | 15 ++++++
 validation/tests/test_python_checks_test.py   | 54 ++++++++++++++++++-
 .../tests/test_rule_metadata_integrity.py     |  2 +-
 6 files changed, 139 insertions(+), 20 deletions(-)

diff --git a/release_automation/config/transformations.yaml b/release_automation/config/transformations.yaml
index 89823737..9ec453c7 100644
--- a/release_automation/config/transformations.yaml
+++ b/release_automation/config/transformations.yaml
@@ -49,15 +49,18 @@ transformations:
     replacement: "/{url_version}"
 
   # T1b: Test definition API version in Feature line
-  # Replaces "vwip" with "v{api_version}" (e.g., "v1.1.0") in Feature declarations.
-  # Matches both the comma-separated form ("Feature: , vwip - …") and the
-  # space-only form ("Feature:  vwip - …"); the space before "vwip" is the
-  # single common anchor across CAMARA test-file conventions.
+  # Replaces "wip" or "vwip" with "v{api_version}" (e.g., "v1.1.0") in
+  # Feature declarations. Matches both the comma-separated form
+  # ("Feature: , vwip - …") and the space-only form
+  # ("Feature:  vwip - …"); the space before the version token
+  # is the single common anchor across CAMARA test-file conventions.
+  # The optional `v?` accepts bare `wip` as a style variation (parallel
+  # to `0.1.0` vs `v0.1.0` in info.version).
   - name: test_def_api_version
-    description: Replace vwip in test definition Feature line (comma- or space-separated)
+    description: Replace wip/vwip in test definition Feature line (comma- or space-separated)
     type: regex
     file_pattern: "code/Test_definitions/*.feature"
-    pattern: "(Feature: .*?) vwip\\b"
+    pattern: "(Feature: .*?) v?wip\\b"
     replacement: "\\g<1> v{api_version}"
 
   # T3: Commonalities reference in x-camara-commonalities
diff --git a/release_automation/tests/test_mechanical_transformer.py b/release_automation/tests/test_mechanical_transformer.py
index 37c907fb..5da09871 100644
--- a/release_automation/tests/test_mechanical_transformer.py
+++ b/release_automation/tests/test_mechanical_transformer.py
@@ -370,10 +370,10 @@ class TestFeatureLineReplacement:
 
     T1B_RULE = TransformationRule(
         name="test_def_api_version",
-        description="Replace vwip in test definition Feature line",
+        description="Replace wip/vwip in test definition Feature line",
         type=TransformationType.REGEX,
         file_pattern="code/Test_definitions/*.feature",
-        pattern=r"(Feature: .*?) vwip\b",
+        pattern=r"(Feature: .*?) v?wip\b",
         replacement=r"\g<1> v{api_version}",
     )
 
@@ -429,13 +429,44 @@ def test_already_versioned_unchanged(self, transformer, context):
         assert content == original
 
     def test_no_vwip_unchanged(self, transformer, context):
-        """A Feature line with no `vwip` anywhere is a no-op."""
+        """A Feature line with no `wip` or `vwip` anywhere is a no-op."""
         original = "Feature: CAMARA Quality On Demand - Operation createSession\n"
         result, content, feature_path = self._run(transformer, context, original)
         assert result.success
         assert feature_path not in result.files_modified
         assert content == original
 
+    def test_bare_wip_comma_form(self, transformer, context):
+        """Bare `wip` (no leading v) on a comma-separated Feature line.
+
+        Observed on Simple Edge Discovery — treated as a style variation
+        parallel to `0.1.0` vs `v0.1.0` in info.version.
+        """
+        result, content, feature_path = self._run(
+            transformer,
+            context,
+            "Feature: CAMARA Simple Edge Discovery, wip - Operation readClosestEdgeCloudZone\n",
+        )
+        assert result.success
+        assert feature_path in result.files_modified
+        assert content == (
+            "Feature: CAMARA Simple Edge Discovery, v3.2.0-rc.2 - "
+            "Operation readClosestEdgeCloudZone\n"
+        )
+
+    def test_bare_wip_space_form(self, transformer, context):
+        """Bare `wip` on a space-only Feature line."""
+        result, content, feature_path = self._run(
+            transformer,
+            context,
+            "Feature: CAMARA Quality On Demand wip - Operation createSession\n",
+        )
+        assert result.success
+        assert feature_path in result.files_modified
+        assert content == (
+            "Feature: CAMARA Quality On Demand v3.2.0-rc.2 - Operation createSession\n"
+        )
+
 
 class TestYamlPathTransformation:
     """Tests for YAML path transformations."""
diff --git a/validation/engines/python_checks/test_checks.py b/validation/engines/python_checks/test_checks.py
index ab790c64..f17f4ccc 100644
--- a/validation/engines/python_checks/test_checks.py
+++ b/validation/engines/python_checks/test_checks.py
@@ -106,14 +106,20 @@ def check_test_files_exist(
     ]
 
 
-# Regex to extract version from CAMARA Feature line.
-# Matches ", v{segment}" where segment runs until " - " or end of line.
-# On main: "vwip".  On release: "v2.2.0-alpha.5" (full semver with v).
+# Regex to extract version from CAMARA Feature line. Aligned with the T1b
+# transformation pattern in release_automation/config/transformations.yaml
+# so that any line T1b can transform is also recognized here. The leading
+# ``\s`` separator accepts both the comma-and-space form ("Feature: X, vwip")
+# and the space-only form ("Feature: X vwip"). The captured token is
+# ``wip`` / ``vwip`` (style variation on main/maintenance) or ``v{semver}``
+# (release branches).
 # Examples:
 #   "Feature: CAMARA QoD API, vwip - Operation deleteSession"       → "vwip"
+#   "Feature: CAMARA QoD API, wip - Operation deleteSession"        → "wip"
+#   "Feature: CAMARA QoD API vwip - Operation deleteSession"        → "vwip"
 #   "Feature: CAMARA QoD API, v2.2.0-alpha.5 - Operation create"    → "v2.2.0-alpha.5"
 #   "Feature: CAMARA QoD API, v1.0.0"                               → "v1.0.0"
-_FEATURE_VERSION_RE = re.compile(r",\s*(v\S+?)(?:\s+-\s| *$)")
+_FEATURE_VERSION_RE = re.compile(r"\s(v?wip|v\S+?)(?:\s+-\s|\s*$)")
 
 
 def _extract_feature_version(file_path: Path) -> Optional[str]:
@@ -181,18 +187,31 @@ def check_test_file_version(
         # No test files found — check_test_files_exist reports this.
         return []
 
+    # Compare with leading-v stripped so bare "wip" and "vwip" are treated
+    # as equivalent on main/maintenance (a style variation, parallel to
+    # "0.1.0" vs "v0.1.0" in info.version). Release branches always carry
+    # T1b's "v{api_version}" output, so the normalized comparison still
+    # enforces an exact match there.
+    expected_token = expected_segment.lower().removeprefix("v")
+
     findings: List[dict] = []
     for test_file in matching:
         actual_version = _extract_feature_version(test_file)
 
         if actual_version is None:
+            # No wip/vwip/v* token on the Feature line — T1b has nothing to
+            # replace and a release cut would carry the literal text into
+            # the snapshot. Emitted under a distinct rule ID (P-024) so its
+            # severity cannot be masked by P-007's conditional_level.
             findings.append(
                 make_finding(
-                    engine_rule="check-test-file-version",
+                    engine_rule="check-test-file-feature-line-untransformable",
                     level="error",
                     message=(
-                        f"Test file '{test_file.name}' has no version in its "
-                        f"Feature line (expected '{expected_segment}')"
+                        f"Test file '{test_file.name}' Feature line has no "
+                        f"'wip', 'vwip', or 'v{{version}}' token — nothing "
+                        f"for snapshot transformation to replace "
+                        f"(expected '{expected_segment}')"
                     ),
                     path=f"{_TEST_DIR}/{test_file.name}",
                     line=1,
@@ -201,7 +220,8 @@ def check_test_file_version(
             )
             continue
 
-        if actual_version.lower() != expected_segment.lower():
+        actual_token = actual_version.lower().removeprefix("v")
+        if actual_token != expected_token:
             findings.append(
                 make_finding(
                     engine_rule="check-test-file-version",
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index 60a81e25..aa6a3417 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -286,3 +286,18 @@
     release_plan_changed: true
   conditional_level:
     default: error
+
+# P-024: check-test-file-feature-line-untransformable
+# Fires when the Feature line of a test file has no `wip`, `vwip`, or
+# `v{version}` token — nothing for snapshot transformation T1b to
+# replace, so a release cut would carry the literal text into the
+# snapshot. Always error: distinct from P-007 (which covers the
+# "present but mismatched" case under conditional_level hint/warn).
+# P-007 and P-024 are mutually exclusive per the check's code path
+# (`continue` after the None branch prevents both from firing on the
+# same file).
+- id: P-024
+  engine: python
+  engine_rule: check-test-file-feature-line-untransformable
+  conditional_level:
+    default: error
diff --git a/validation/tests/test_python_checks_test.py b/validation/tests/test_python_checks_test.py
index 57458b3e..23bc21ad 100644
--- a/validation/tests/test_python_checks_test.py
+++ b/validation/tests/test_python_checks_test.py
@@ -153,6 +153,26 @@ def test_main_vwip_passes(self, tmp_path: Path):
         ctx = _make_context("qod", branch_type="main")
         assert check_test_file_version(tmp_path, ctx) == []
 
+    def test_main_bare_wip_passes(self, tmp_path: Path):
+        """Bare 'wip' (no leading v) is a style variation — accepted on main."""
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, wip - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="main")
+        assert check_test_file_version(tmp_path, ctx) == []
+
+    def test_main_space_only_vwip_passes(self, tmp_path: Path):
+        """Space-only Feature line (no comma) is a legacy style — accepted."""
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API vwip - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="main")
+        assert check_test_file_version(tmp_path, ctx) == []
+
     def test_main_real_version_fails(self, tmp_path: Path):
         """On main, v1 is wrong even when target_api_version is 1.0.0."""
         test_dir = _make_test_dir(tmp_path)
@@ -163,6 +183,7 @@ def test_main_real_version_fails(self, tmp_path: Path):
         ctx = _make_context("qod", version="1.0.0", branch_type="main")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
+        assert findings[0]["engine_rule"] == "check-test-file-version"
         assert "v1" in findings[0]["message"]
         assert "vwip" in findings[0]["message"]
 
@@ -177,6 +198,15 @@ def test_maintenance_vwip_passes(self, tmp_path: Path):
         ctx = _make_context("qod", branch_type="maintenance")
         assert check_test_file_version(tmp_path, ctx) == []
 
+    def test_maintenance_bare_wip_passes(self, tmp_path: Path):
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, wip - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="maintenance")
+        assert check_test_file_version(tmp_path, ctx) == []
+
     # --- release branch: must match v{api_version} from T1b transformer ---
 
     def test_release_matching_version(self, tmp_path: Path):
@@ -244,6 +274,7 @@ def test_feature_branch_skipped(self, tmp_path: Path):
     # --- common edge cases ---
 
     def test_no_version_in_feature_line(self, tmp_path: Path):
+        """Feature line with no wip/vwip/v* token → untransformable (P-024)."""
         test_dir = _make_test_dir(tmp_path)
         self._write_feature(
             test_dir / "qod.feature",
@@ -252,7 +283,24 @@ def test_no_version_in_feature_line(self, tmp_path: Path):
         ctx = _make_context("qod", branch_type="main")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "no version" in findings[0]["message"]
+        assert findings[0]["engine_rule"] == (
+            "check-test-file-feature-line-untransformable"
+        )
+        assert findings[0]["level"] == "error"
+
+    def test_garbage_token_is_untransformable(self, tmp_path: Path):
+        """A non-version token like 'xyz' is untransformable, not a mismatch."""
+        test_dir = _make_test_dir(tmp_path)
+        self._write_feature(
+            test_dir / "qod.feature",
+            "Feature: CAMARA QoD API, xyz - Operation createSession",
+        )
+        ctx = _make_context("qod", branch_type="main")
+        findings = check_test_file_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["engine_rule"] == (
+            "check-test-file-feature-line-untransformable"
+        )
 
     def test_empty_file(self, tmp_path: Path):
         test_dir = _make_test_dir(tmp_path)
@@ -260,7 +308,9 @@ def test_empty_file(self, tmp_path: Path):
         ctx = _make_context("qod", branch_type="main")
         findings = check_test_file_version(tmp_path, ctx)
         assert len(findings) == 1
-        assert "no version" in findings[0]["message"]
+        assert findings[0]["engine_rule"] == (
+            "check-test-file-feature-line-untransformable"
+        )
 
     def test_no_test_dir(self, tmp_path: Path):
         ctx = _make_context("qod")
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index f305ea6c..b67e3812 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -77,7 +77,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 23
+        assert counts["python"] == 24
         assert counts["spectral"] == 84
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13

From df5512c68d4d24ffad25e6ba34ae0a1ca41988d0 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 18:10:42 +0200
Subject: [PATCH 116/157] feat(rules): P-025 validate server-URL version in
 feature file steps (fixes #212)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

P-004 (check-server-url-version) covers servers[].url in the API YAML.
P-025 extends the same version-segment discipline to URL paths inside
code/Test_definitions/*.feature scenario steps.

Snapshot transformation T2b rewrites /vwip to /{url_version} but only
matches the strict /vwip form. A bare /wip (or a wrong /v{version})
segment in a scenario step survives into the snapshot. Unlike the
Feature-line wip vs vwip case in P-007 (a style variation), URL-path
versions have no style-variation excuse — the leading v is required.

API-scoped: reads info.version from the API spec, derives the expected
segment via build_version_segment(), and scans every line of the
matching .feature file for CAMARA-shaped URL paths. No branch guard —
main (info.version=wip -> expected vwip), release, and maintenance all
flow through the same comparison.
---
 validation/engines/python_checks/__init__.py  |   2 +
 .../engines/python_checks/version_checks.py   | 100 +++++++++++
 validation/rules/python-rules.yaml            |  13 ++
 .../tests/test_python_checks_version.py       | 166 ++++++++++++++++++
 .../tests/test_rule_metadata_integrity.py     |   2 +-
 5 files changed, 282 insertions(+), 1 deletion(-)

diff --git a/validation/engines/python_checks/__init__.py b/validation/engines/python_checks/__init__.py
index 01633e48..7dd970cc 100644
--- a/validation/engines/python_checks/__init__.py
+++ b/validation/engines/python_checks/__init__.py
@@ -31,6 +31,7 @@
     check_test_files_exist,
 )
 from .version_checks import (
+    check_feature_file_url_version,
     check_info_version_format,
     check_server_url_api_name,
     check_server_url_version,
@@ -47,6 +48,7 @@
     CheckDescriptor("check-server-url-api-name", CheckScope.API, check_server_url_api_name),
     CheckDescriptor("check-test-files-exist", CheckScope.API, check_test_files_exist),
     CheckDescriptor("check-test-file-version", CheckScope.API, check_test_file_version),
+    CheckDescriptor("check-feature-file-url-version", CheckScope.API, check_feature_file_url_version),
     CheckDescriptor("check-commonalities-version", CheckScope.API, check_commonalities_version),
     CheckDescriptor("check-subscription-filename", CheckScope.API, check_subscription_filename),
     CheckDescriptor("check-event-type-format", CheckScope.API, check_event_type_format),
diff --git a/validation/engines/python_checks/version_checks.py b/validation/engines/python_checks/version_checks.py
index 1a5f7374..1044f47d 100644
--- a/validation/engines/python_checks/version_checks.py
+++ b/validation/engines/python_checks/version_checks.py
@@ -13,6 +13,9 @@
 from validation.context import ValidationContext
 
 from ._types import load_yaml_safe, make_finding
+from .test_checks import _stem_matches_api
+
+_TEST_DIR = "code/Test_definitions"
 
 # Matches a semantic version (optionally with pre-release label).
 # Examples: "1.0.0", "0.2.0-alpha.2", "1.0.0-rc.1"
@@ -31,6 +34,19 @@
 # e.g. "{apiRoot}/quality-on-demand/v1" -> "quality-on-demand"
 _URL_API_NAME_RE = re.compile(r"/(?P[^/]+)/v[a-z0-9.]+/?$", re.IGNORECASE)
 
+# Matches a CAMARA-shaped version segment inside a scenario-step URL path:
+# an api-name segment (lowercase letters/digits/hyphens) followed by
+# either ``v{segment}`` or bare ``wip`` as the next segment. Used by P-025
+# to locate the version-bearing portion of a URL anywhere in a feature-
+# file line (not just at the end of the URL).
+# e.g. ``/quality-on-demand/vwip/sessions`` -> captured segment ``vwip``
+#      ``/qod/v1/sessions``                 -> captured ``v1``
+#      ``/device-status/wip/status``        -> captured ``wip``
+_STEP_URL_VERSION_RE = re.compile(
+    r"/[a-z0-9\-]+/(v[a-z0-9.]+|wip)(?=/|\s|$)",
+    re.IGNORECASE,
+)
+
 
 # ---------------------------------------------------------------------------
 # Version segment builder
@@ -230,6 +246,90 @@ def check_server_url_version(
     return findings
 
 
+def check_feature_file_url_version(
+    repo_path: Path, context: ValidationContext
+) -> List[dict]:
+    """Validate server URL version segments in feature-file scenario steps.
+
+    P-004 (``check_server_url_version``) covers ``servers[].url`` in the API
+    YAML.  This check extends the same version-segment discipline to URL
+    paths inside ``code/Test_definitions/*.feature`` scenario steps.
+
+    Rationale: snapshot transformation T2b rewrites ``/vwip`` to the
+    target version path (``/{url_version}``), but the rewrite only matches
+    the strict ``/vwip`` form.  A bare ``/wip`` (or a wrong ``/v{...}``
+    segment) in a scenario step survives into the snapshot.  Unlike the
+    Feature-line ``wip`` vs ``vwip`` case in P-007 (which is a style
+    variation), a URL-path version segment has no style-variation escape
+    hatch — the leading ``v`` is required.
+
+    Always error.  The expected segment is derived from the API spec's
+    ``info.version`` via :func:`build_version_segment`, so the check
+    naturally handles main (``info.version == 'wip'`` -> ``vwip``),
+    release, and maintenance branches without branch-type code.
+    """
+    api = context.apis[0]
+    spec_path = repo_path / api.spec_file
+
+    spec = load_yaml_safe(spec_path)
+    if spec is None:
+        return []
+
+    info_version = str(spec.get("info", {}).get("version", "")).strip()
+    if not info_version:
+        return []  # Caught by check_info_version_format.
+
+    expected_segment = build_version_segment(info_version)
+    if expected_segment is None:
+        return []  # Caught by check_info_version_format.
+
+    test_dir = repo_path / _TEST_DIR
+    if not test_dir.is_dir():
+        return []
+
+    matching = [
+        f for f in test_dir.iterdir()
+        if f.is_file()
+        and f.suffix == ".feature"
+        and _stem_matches_api(f.stem, api.api_name)
+    ]
+    if not matching:
+        return []
+
+    findings: List[dict] = []
+    expected_lower = expected_segment.lower()
+
+    for feature_file in matching:
+        try:
+            with open(feature_file, encoding="utf-8") as fh:
+                lines = fh.readlines()
+        except (OSError, UnicodeDecodeError):
+            continue
+
+        for line_number, line in enumerate(lines, start=1):
+            for match in _STEP_URL_VERSION_RE.finditer(line):
+                actual_segment = match.group(1)
+                if actual_segment.lower() == expected_lower:
+                    continue
+                findings.append(
+                    make_finding(
+                        engine_rule="check-feature-file-url-version",
+                        level="error",
+                        message=(
+                            f"Scenario-step URL version segment "
+                            f"'/{actual_segment}' does not match expected "
+                            f"'/{expected_segment}' (derived from "
+                            f"info.version '{info_version}')"
+                        ),
+                        path=f"{_TEST_DIR}/{feature_file.name}",
+                        line=line_number,
+                        api_name=api.api_name,
+                    )
+                )
+
+    return findings
+
+
 def check_server_url_api_name(
     repo_path: Path, context: ValidationContext
 ) -> List[dict]:
diff --git a/validation/rules/python-rules.yaml b/validation/rules/python-rules.yaml
index aa6a3417..0c64a596 100644
--- a/validation/rules/python-rules.yaml
+++ b/validation/rules/python-rules.yaml
@@ -301,3 +301,16 @@
   engine_rule: check-test-file-feature-line-untransformable
   conditional_level:
     default: error
+
+# P-025: check-feature-file-url-version
+# Validates that URL paths inside .feature scenario steps carry the
+# expected version segment derived from info.version. Sibling to P-004
+# (which covers servers[].url in API YAMLs). Unlike Feature-line `wip`
+# (covered by P-007), a URL path version has no style-variation escape
+# hatch: bare `/wip` is never valid because snapshot transformation
+# T2b rewrites only the strict `/vwip` form. Always error.
+- id: P-025
+  engine: python
+  engine_rule: check-feature-file-url-version
+  conditional_level:
+    default: error
diff --git a/validation/tests/test_python_checks_version.py b/validation/tests/test_python_checks_version.py
index 0b5eaabc..f4661b95 100644
--- a/validation/tests/test_python_checks_version.py
+++ b/validation/tests/test_python_checks_version.py
@@ -10,6 +10,7 @@
 from validation.context import ApiContext, ValidationContext
 from validation.engines.python_checks.version_checks import (
     build_version_segment,
+    check_feature_file_url_version,
     check_info_version_format,
     check_server_url_api_name,
     check_server_url_version,
@@ -346,3 +347,168 @@ def test_multiple_servers_one_mismatch(self, tmp_path: Path):
         findings = check_server_url_api_name(tmp_path, ctx)
         assert len(findings) == 1
         assert "wrong-name" in findings[0]["message"]
+
+
+# ---------------------------------------------------------------------------
+# TestCheckFeatureFileUrlVersion  (P-025)
+# ---------------------------------------------------------------------------
+
+
+def _write_feature(tmp_path: Path, name: str, body: str) -> Path:
+    test_dir = tmp_path / "code" / "Test_definitions"
+    test_dir.mkdir(parents=True, exist_ok=True)
+    feature_path = test_dir / name
+    feature_path.write_text(body)
+    return feature_path
+
+
+class TestCheckFeatureFileUrlVersion:
+    """Tests for check_feature_file_url_version — P-025.
+
+    On main, ``info.version == 'wip'`` -> expected ``vwip``.
+    On release, ``info.version`` is a semver -> expected derived via
+    :func:`build_version_segment` (e.g. ``1.0.0`` -> ``v1``).
+    """
+
+    def test_main_vwip_scenarios_pass(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  Scenario: Create session\n"
+            "    When I send a POST to /quality-on-demand/vwip/sessions\n"
+            "    Then the status code is 201\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_main_bare_wip_in_url_is_error(self, tmp_path: Path):
+        """Bare /wip in a scenario step has no style-variation excuse."""
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  Scenario: Create session\n"
+            "    When I send a POST to /quality-on-demand/wip/sessions\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        findings = check_feature_file_url_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert findings[0]["engine_rule"] == "check-feature-file-url-version"
+        assert findings[0]["level"] == "error"
+        assert "/wip" in findings[0]["message"]
+        assert "/vwip" in findings[0]["message"]
+        assert findings[0]["line"] == 3
+
+    def test_main_wrong_version_segment_is_error(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  When I send a POST to /quality-on-demand/v1/sessions\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        findings = check_feature_file_url_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "/v1" in findings[0]["message"]
+        assert "/vwip" in findings[0]["message"]
+
+    def test_release_matching_version_passes(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "1.0.0")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, v1.0.0\n"
+            "  When I send a POST to /quality-on-demand/v1/sessions\n",
+        )
+        ctx = _make_context("qod", branch_type="release", version="1.0.0")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_release_wrong_version_is_error(self, tmp_path: Path):
+        """/vwip survives into a release snapshot only if T2b failed —
+        treat it as error relative to the release target version."""
+        _write_spec(tmp_path, "qod", "1.0.0")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, v1.0.0\n"
+            "  When I send a POST to /quality-on-demand/vwip/sessions\n",
+        )
+        ctx = _make_context("qod", branch_type="release", version="1.0.0")
+        findings = check_feature_file_url_version(tmp_path, ctx)
+        assert len(findings) == 1
+        assert "/vwip" in findings[0]["message"]
+        assert "/v1" in findings[0]["message"]
+
+    def test_initial_version_minor_segment(self, tmp_path: Path):
+        """info.version 0.3.0 -> v0.3 (initial-maturity mapping)."""
+        _write_spec(tmp_path, "qod", "0.3.0")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, v0.3.0\n"
+            "  When I send a POST to /quality-on-demand/v0.3/sessions\n",
+        )
+        ctx = _make_context("qod", branch_type="release", version="0.3.0")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_feature_without_url_steps(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  Scenario: plain prose only\n"
+            "    Given an authenticated user\n"
+            "    Then the service responds successfully\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_url_without_version_segment_skipped(self, tmp_path: Path):
+        """A URL with no version/wip segment is silently ignored — scope
+        is URL-version validation, not presence checking."""
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  When I send a GET to /static/index.html\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_multiple_lines_collect_all_findings(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  When I send a POST to /quality-on-demand/v1/sessions\n"
+            "  And I send a GET to /quality-on-demand/wip/sessions/{id}\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        findings = check_feature_file_url_version(tmp_path, ctx)
+        assert len(findings) == 2
+        lines = {f["line"] for f in findings}
+        assert lines == {2, 3}
+
+    def test_spec_missing_returns_no_findings(self, tmp_path: Path):
+        """No spec file => silent skip (filename/presence checks report)."""
+        _write_feature(
+            tmp_path, "qod.feature",
+            "Feature: QoD, vwip\n"
+            "  When I send a POST to /quality-on-demand/wip/sessions\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_test_dir_missing_returns_no_findings(self, tmp_path: Path):
+        _write_spec(tmp_path, "qod", "wip")
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
+
+    def test_no_matching_feature_file(self, tmp_path: Path):
+        """Feature file for a different API => skip (stem doesn't match)."""
+        _write_spec(tmp_path, "qod", "wip")
+        _write_feature(
+            tmp_path, "other-api.feature",
+            "Feature: Other, vwip\n"
+            "  When I send a POST to /other-api/wip/resource\n",
+        )
+        ctx = _make_context("qod", branch_type="main", version="wip")
+        assert check_feature_file_url_version(tmp_path, ctx) == []
diff --git a/validation/tests/test_rule_metadata_integrity.py b/validation/tests/test_rule_metadata_integrity.py
index b67e3812..34df53a8 100644
--- a/validation/tests/test_rule_metadata_integrity.py
+++ b/validation/tests/test_rule_metadata_integrity.py
@@ -77,7 +77,7 @@ def test_expected_rule_counts(self, all_rules):
         counts = {}
         for r in all_rules:
             counts[r.engine] = counts.get(r.engine, 0) + 1
-        assert counts["python"] == 24
+        assert counts["python"] == 25
         assert counts["spectral"] == 84
         assert counts["gherkin"] == 25
         assert counts["yamllint"] == 13

From 61a2d30637114b710f8c1e8e21f51a27b276d8bd Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 22:12:10 +0200
Subject: [PATCH 117/157] rename: regression-runner.yml ->
 validation-regression.yml
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Prep for sibling Release Automation Regression canary in the next
commit — the parallel naming groups both regression workflows together
in the Actions UI and distinguishes their surfaces (validation rules vs
release automation runtime). Script name (validation/scripts/regression_runner.py)
unchanged; it describes script behavior, not workflow identity.
---
 ...n-runner.yml => validation-regression.yml} | 20 +++++++++----------
 validation/docs/regression-testing.md         |  4 ++--
 2 files changed, 12 insertions(+), 12 deletions(-)
 rename .github/workflows/{regression-runner.yml => validation-regression.yml} (80%)

diff --git a/.github/workflows/regression-runner.yml b/.github/workflows/validation-regression.yml
similarity index 80%
rename from .github/workflows/regression-runner.yml
rename to .github/workflows/validation-regression.yml
index a89e0e78..917a13c7 100644
--- a/.github/workflows/regression-runner.yml
+++ b/.github/workflows/validation-regression.yml
@@ -1,4 +1,4 @@
-# CAMARA Validation Framework — Regression Runner (canary)
+# CAMARA Validation Framework — Validation Regression (canary)
 #
 # Auto-dispatches validation/scripts/regression_runner.py against
 # camaraproject/ReleaseTest on every push to validation-framework, so
@@ -16,7 +16,7 @@
 #
 # See validation/docs/regression-testing.md for the full picture.
 
-name: Regression Runner
+name: Validation Regression
 
 on:
   push:
@@ -26,11 +26,11 @@ on:
       - 'shared-actions/**'
       - 'tooling_lib/**'
       - '.github/workflows/validation.yml'
-      - '.github/workflows/regression-runner.yml'
+      - '.github/workflows/validation-regression.yml'
   workflow_dispatch:
 
 concurrency:
-  group: regression-runner-${{ github.ref }}
+  group: validation-regression-${{ github.ref }}
   cancel-in-progress: true
 
 permissions:
@@ -72,22 +72,22 @@ jobs:
           python3 validation/scripts/regression_runner.py \
             --repo camaraproject/ReleaseTest \
             --branch-filter 'regression/*' \
-            --summary-file regression-summary.md
+            --summary-file validation-regression-summary.md
 
       - name: Publish summary
         if: always()
         run: |
-          if [ -f regression-summary.md ]; then
-            cat regression-summary.md >> "$GITHUB_STEP_SUMMARY"
+          if [ -f validation-regression-summary.md ]; then
+            cat validation-regression-summary.md >> "$GITHUB_STEP_SUMMARY"
           else
-            echo "::warning::regression-summary.md not produced (runner likely failed before writing)"
+            echo "::warning::validation-regression-summary.md not produced (runner likely failed before writing)"
           fi
 
       - name: Upload summary artifact
         if: always()
         uses: actions/upload-artifact@v7
         with:
-          name: regression-runner-summary
-          path: regression-summary.md
+          name: validation-regression-summary
+          path: validation-regression-summary.md
           if-no-files-found: warn
           retention-days: 30
diff --git a/validation/docs/regression-testing.md b/validation/docs/regression-testing.md
index 4961faa1..d42f06a2 100644
--- a/validation/docs/regression-testing.md
+++ b/validation/docs/regression-testing.md
@@ -189,7 +189,7 @@ points at.
 The regression runner fires automatically on every push to
 `validation-framework` that touches `validation/**`, `shared-actions/**`,
 or the workflow itself. The workflow lives at
-[.github/workflows/regression-runner.yml](../../.github/workflows/regression-runner.yml)
+[.github/workflows/validation-regression.yml](../../.github/workflows/validation-regression.yml)
 on this same branch (so it only exists where it matters and does not
 run on `main`). Manual dispatch is available via the Actions UI for
 fix-then-verify cycles.
@@ -201,7 +201,7 @@ persisted PAT.
 
 Results surface in three places: (1) the workflow's own pass/fail
 status in the Actions tab on `camaraproject/tooling`, (2) the markdown
-summary on the run's summary page, and (3) the `regression-runner-summary`
+summary on the run's summary page, and (3) the `validation-regression-summary`
 artifact attached to each run (30-day retention).
 
 ### Verify all canary branches

From 9e5002f2c0408afb026ee38718ff5c8ec7393d17 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 22:12:29 +0200
Subject: [PATCH 118/157] feat(ci): add Release Automation Regression canary
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

New workflow fires a /create-snapshot + /discard-snapshot round-trip on
camaraproject/ReleaseTest whenever pushes to validation-framework touch
RA-surface paths (release-automation-reusable.yml, release_automation/**,
shared-actions/**, tooling_lib/**). Catches runtime bugs that escape the
Validation Regression canary — classes demonstrated by tooling#197
(validate-command SyntaxError) and tooling#198 (tooling_lib missing
from sparse-checkout), both of which sat dormant for 3 days before a
manual test surfaced them.

Round-trip only; merging the Release Review PR and /publish-release
remain out of scope — full manual E2E still gates @v1-rc moves for
publish-side changes. Uses the existing camara-validation App token
(actions:write + issues:write + pull_requests:write), same pattern as
validation-regression.yml.
---
 .../release-automation-regression.yml         |  94 ++
 .../scripts/regression_runner.py              | 813 ++++++++++++++++++
 .../tests/test_regression_runner.py           | 583 +++++++++++++
 3 files changed, 1490 insertions(+)
 create mode 100644 .github/workflows/release-automation-regression.yml
 create mode 100644 release_automation/scripts/regression_runner.py
 create mode 100644 release_automation/tests/test_regression_runner.py

diff --git a/.github/workflows/release-automation-regression.yml b/.github/workflows/release-automation-regression.yml
new file mode 100644
index 00000000..4de04a5a
--- /dev/null
+++ b/.github/workflows/release-automation-regression.yml
@@ -0,0 +1,94 @@
+# CAMARA Release Automation — Regression (canary)
+#
+# Sibling to validation-regression.yml. Exercises the release-automation-reusable.yml
+# workflow on camaraproject/ReleaseTest via a /create-snapshot + /discard-snapshot
+# round-trip, catching runtime bugs in the RA workflow and shared actions
+# that the Validation Regression canary cannot reach.
+#
+# Round-trip only — never merges the Release Review PR, never creates a
+# draft build, never calls /publish-release. No real tags or releases
+# are produced. Full manual E2E remains the gate for publish-side changes
+# before @v1-rc moves.
+#
+# Cross-repo access is provided by a short-lived camara-validation
+# GitHub App installation token scoped to camaraproject/ReleaseTest.
+# Permissions used: issues:write (post slash-command comment),
+# actions:write (poll caller runs), pull_requests:write (verify Release
+# Review PR presence). The App has no contents:write — and none is needed.
+# The default GITHUB_TOKEN is only used for the initial checkout.
+
+name: Release Automation Regression
+
+on:
+  push:
+    branches: [validation-framework]
+    paths:
+      - '.github/workflows/release-automation-reusable.yml'
+      - '.github/workflows/release-automation-regression.yml'
+      - 'release_automation/**'
+      - 'shared-actions/**'
+      - 'tooling_lib/**'
+  workflow_dispatch:
+
+concurrency:
+  group: release-automation-regression-${{ github.ref }}
+  cancel-in-progress: true
+
+permissions:
+  contents: read
+
+jobs:
+  regression:
+    name: RA regression canary
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout tooling
+        uses: actions/checkout@v6
+        with:
+          persist-credentials: false
+
+      - name: Setup Python
+        uses: actions/setup-python@v6
+        with:
+          python-version: "3.11"
+
+      - name: Install runner dependencies
+        run: |
+          pip install --upgrade pip
+          pip install pyyaml
+
+      - name: Mint validation app token
+        id: app-token
+        uses: actions/create-github-app-token@v3
+        with:
+          client-id: ${{ vars.VALIDATION_APP_CLIENT_ID }}
+          private-key: ${{ secrets.VALIDATION_APP_PRIVATE_KEY }}
+          owner: camaraproject
+          repositories: ReleaseTest
+
+      - name: Run RA regression runner
+        env:
+          GH_TOKEN: ${{ steps.app-token.outputs.token }}
+        run: |
+          python3 release_automation/scripts/regression_runner.py \
+            --repo camaraproject/ReleaseTest \
+            --release-issue 90 \
+            --summary-file release-automation-regression-summary.md
+
+      - name: Publish summary
+        if: always()
+        run: |
+          if [ -f release-automation-regression-summary.md ]; then
+            cat release-automation-regression-summary.md >> "$GITHUB_STEP_SUMMARY"
+          else
+            echo "::warning::release-automation-regression-summary.md not produced (runner likely failed before writing)"
+          fi
+
+      - name: Upload summary artifact
+        if: always()
+        uses: actions/upload-artifact@v7
+        with:
+          name: release-automation-regression-summary
+          path: release-automation-regression-summary.md
+          if-no-files-found: warn
+          retention-days: 30
diff --git a/release_automation/scripts/regression_runner.py b/release_automation/scripts/regression_runner.py
new file mode 100644
index 00000000..64d380b1
--- /dev/null
+++ b/release_automation/scripts/regression_runner.py
@@ -0,0 +1,813 @@
+#!/usr/bin/env python3
+"""
+CAMARA Release Automation — Regression Runner
+
+Exercises the release-automation-reusable.yml workflow on a persistent
+test repository (default: camaraproject/ReleaseTest) via a round-trip
+/create-snapshot + /discard-snapshot pair. Catches the bug class that
+the validation regression canary cannot reach — runtime bugs in the RA
+workflow and its shared actions. See the sibling validation regression
+runner at validation/scripts/regression_runner.py.
+
+Usage:
+    python3 regression_runner.py --repo camaraproject/ReleaseTest \\
+        --release-issue 90 \\
+        [--summary-file release-automation-regression-summary.md]
+
+    # Dry-run (no comments posted, no runs polled):
+    python3 regression_runner.py --repo ... --release-issue 90 --dry-run
+
+Exit codes:
+    0  all phases PASS
+    1  verification failure (run concluded non-success or post-state mismatch)
+    2  infrastructure failure (gh error, timeout, state unsafe to proceed)
+
+No real releases, tags, or draft builds are produced. The round-trip only
+covers commands that are reversible — merging the Release Review PR or
+invoking /publish-release is explicitly out of scope.
+"""
+
+from __future__ import annotations
+
+import argparse
+import json
+import logging
+import subprocess
+import sys
+import time
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any
+
+# Package-relative imports so unit tests can patch at the right boundary.
+# The release_automation/ package is already on sys.path when invoked via
+# python3 release_automation/scripts/regression_runner.py (pytest uses
+# repo root and the package's __init__.py).
+_HERE = Path(__file__).resolve().parent
+if str(_HERE) not in sys.path:
+    sys.path.insert(0, str(_HERE))
+
+from config import (  # noqa: E402
+    LABEL_RELEASE_MGMT_BOT,
+    RELEASE_REVIEW_BRANCH_PREFIX,
+    SNAPSHOT_BRANCH_PREFIX,
+    STATE_PLANNED,
+    STATE_SNAPSHOT_ACTIVE,
+)
+
+
+logger = logging.getLogger("ra_regression_runner")
+
+
+# Label prefix used by release-automation-reusable.yml for issue state.
+# Deliberately hardcoded here rather than added to config.py — the value
+# is defined by the reusable workflow's state-update step, not by the
+# Python modules. If the workflow changes the prefix, update this line.
+# Source of truth: .github/workflows/release-automation-reusable.yml
+_STATE_LABEL_PREFIX = "release-state:"
+
+# Caller workflow filename on the target test repo. Each release-plan
+# repo copies this caller from the shared template.
+_RA_CALLER_WORKFLOW = "release-automation.yml"
+
+
+# ---------------------------------------------------------------------------
+# Errors + phase report
+# ---------------------------------------------------------------------------
+
+
+class InfrastructureError(RuntimeError):
+    """Raised for gh errors, missing preconditions, or timeouts.
+
+    Distinct from a verification failure (which is a regression in RA, not
+    infrastructure). Infrastructure errors map to exit code 2; verification
+    failures map to exit 1.
+    """
+
+
+@dataclass
+class PhaseReport:
+    """Per-phase PASS/FAIL record with human-readable detail."""
+
+    name: str
+    passed: bool = False
+    detail: str = ""
+    run_url: str | None = None
+    run_conclusion: str | None = None
+    extras: list[str] = field(default_factory=list)
+
+
+# ---------------------------------------------------------------------------
+# GitHub I/O helpers (duplicated from validation/scripts/regression_runner.py)
+# ---------------------------------------------------------------------------
+
+
+def gh(args: list[str], *, parse_json: bool = False) -> Any:
+    """Run `gh ` and return stdout (optionally JSON-parsed).
+
+    Raises InfrastructureError on non-zero exit. Stderr is captured and
+    included in the exception message for diagnosis.
+    """
+    cmd = ["gh", *args]
+    logger.debug("gh call: %s", " ".join(cmd))
+    try:
+        result = subprocess.run(
+            cmd, capture_output=True, text=True, check=True
+        )
+    except FileNotFoundError as exc:
+        raise InfrastructureError(
+            "gh CLI not found — install https://cli.github.com and run `gh auth login`"
+        ) from exc
+    except subprocess.CalledProcessError as exc:
+        raise InfrastructureError(
+            f"gh {' '.join(args)}: exit {exc.returncode}\n"
+            f"stderr: {exc.stderr.strip()}"
+        ) from exc
+    if parse_json:
+        try:
+            return json.loads(result.stdout)
+        except json.JSONDecodeError as exc:
+            raise InfrastructureError(
+                f"gh {' '.join(args)}: could not parse stdout as JSON: {exc}"
+            ) from exc
+    return result.stdout
+
+
+def _iso_to_dt(stamp: str) -> datetime:
+    """Parse a GitHub ISO-8601 timestamp (UTC) to a timezone-aware datetime."""
+    return datetime.strptime(stamp, "%Y-%m-%dT%H:%M:%SZ").replace(
+        tzinfo=timezone.utc
+    )
+
+
+def poll_run(
+    repo: str,
+    run_id: str,
+    *,
+    interval: int,
+    timeout: int,
+) -> str:
+    """Wait until *run_id* completes; return its conclusion string.
+
+    Raises InfrastructureError on timeout. Conclusion may be "success",
+    "failure", "cancelled", "neutral", "skipped", etc. — caller decides
+    what to treat as verification failure vs infrastructure failure.
+    """
+    deadline = time.monotonic() + timeout
+    while True:
+        data = gh(
+            [
+                "run", "view", run_id,
+                "--repo", repo,
+                "--json", "status,conclusion",
+            ],
+            parse_json=True,
+        )
+        status = data.get("status")
+        conclusion = data.get("conclusion") or ""
+        logger.debug("run %s status=%s conclusion=%s", run_id, status, conclusion)
+        if status == "completed":
+            return conclusion
+        if time.monotonic() >= deadline:
+            raise InfrastructureError(
+                f"run {run_id} did not complete within {timeout}s "
+                f"(last status={status})"
+            )
+        time.sleep(interval)
+
+
+# ---------------------------------------------------------------------------
+# Issue / branch / PR readers
+# ---------------------------------------------------------------------------
+
+
+def read_state_label(labels: list[dict[str, Any]]) -> str | None:
+    """Extract the release-state:* value from a list of label objects.
+
+    Returns the state string (e.g. "planned", "snapshot-active") or None
+    if no release-state:* label is present. Raises InfrastructureError if
+    more than one release-state:* label is found — that indicates workflow
+    corruption and must not be silently collapsed.
+    """
+    found: list[str] = []
+    for label in labels:
+        name = label.get("name") if isinstance(label, dict) else None
+        if isinstance(name, str) and name.startswith(_STATE_LABEL_PREFIX):
+            found.append(name[len(_STATE_LABEL_PREFIX):])
+    if len(found) > 1:
+        raise InfrastructureError(
+            f"Release Issue carries multiple {_STATE_LABEL_PREFIX}* labels: "
+            f"{sorted(found)}"
+        )
+    return found[0] if found else None
+
+
+def get_release_issue_state(repo: str, issue_number: int) -> str | None:
+    """Read the release-state:* label value on the given issue."""
+    data = gh(
+        [
+            "api", f"repos/{repo}/issues/{issue_number}",
+            "--jq", "{labels: .labels, state: .state}",
+        ],
+        parse_json=True,
+    )
+    if data.get("state") != "open":
+        raise InfrastructureError(
+            f"{repo}#{issue_number}: issue is not open (state={data.get('state')!r})"
+        )
+    return read_state_label(data.get("labels", []))
+
+
+def snapshot_id_from_branch(branch_name: str) -> str:
+    """Extract snapshot id from a release-snapshot/ or release-review/ branch name.
+
+    Example:
+        'release-snapshot/r1.2-abc1234' -> 'r1.2-abc1234'
+        'release-review/r1.2-abc1234' -> 'r1.2-abc1234'
+        'release-review/r1.2-abc1234-preserved' -> 'r1.2-abc1234'
+    """
+    for prefix in (SNAPSHOT_BRANCH_PREFIX, RELEASE_REVIEW_BRANCH_PREFIX):
+        if branch_name.startswith(prefix):
+            tail = branch_name[len(prefix):]
+            if tail.endswith("-preserved"):
+                tail = tail[: -len("-preserved")]
+            return tail
+    raise InfrastructureError(
+        f"not a snapshot/review branch name: {branch_name!r}"
+    )
+
+
+def find_snapshot_branch(repo: str) -> str | None:
+    """Return the name of the single active release-snapshot/* branch, or None.
+
+    Raises InfrastructureError if more than one snapshot branch exists —
+    that indicates prior-run corruption and must not be silently collapsed.
+    """
+    data = gh(
+        [
+            "api", f"repos/{repo}/branches",
+            "--paginate",
+            "--jq", ".[].name",
+        ]
+    )
+    names = [
+        line.strip()
+        for line in data.splitlines()
+        if line.strip().startswith(SNAPSHOT_BRANCH_PREFIX)
+    ]
+    if len(names) > 1:
+        raise InfrastructureError(
+            f"{repo}: multiple active snapshot branches: {names}"
+        )
+    return names[0] if names else None
+
+
+def branch_exists(repo: str, branch_name: str) -> bool:
+    """Return True iff *branch_name* currently exists on *repo*."""
+    try:
+        gh(
+            [
+                "api", f"repos/{repo}/branches/{branch_name}",
+                "--jq", ".name",
+            ]
+        )
+        return True
+    except InfrastructureError as exc:
+        # `gh api` returns non-zero with "HTTP 404" in stderr for missing
+        # branches. Any other error re-raises.
+        if "HTTP 404" in str(exc) or "Not Found" in str(exc):
+            return False
+        raise
+
+
+def release_review_pr_for_issue(repo: str, issue_number: int) -> int | None:
+    """Return the PR number of the Release Review PR referencing *issue_number*, or None.
+
+    The Release Review PR is the one whose head branch matches
+    release-review/* AND whose body references #. Since the
+    head-branch invariant is sufficient on a round-trip-tested repo, we
+    search on head branch only.
+    """
+    data = gh(
+        [
+            "pr", "list",
+            "--repo", repo,
+            "--state", "open",
+            "--json", "number,headRefName,title",
+            "--limit", "20",
+        ],
+        parse_json=True,
+    )
+    for pr in data:
+        head = pr.get("headRefName", "")
+        if head.startswith(RELEASE_REVIEW_BRANCH_PREFIX) and not head.endswith(
+            "-preserved"
+        ):
+            return pr.get("number")
+    return None
+
+
+# ---------------------------------------------------------------------------
+# Fire + run discovery
+# ---------------------------------------------------------------------------
+
+
+def post_issue_comment(repo: str, issue_number: int, body: str) -> None:
+    """Post *body* as a new comment on *issue_number* in *repo*."""
+    gh(
+        [
+            "issue", "comment", str(issue_number),
+            "--repo", repo,
+            "--body", body,
+        ]
+    )
+
+
+def find_recent_caller_run(
+    repo: str,
+    *,
+    workflow_file: str,
+    since: datetime,
+    attempts: int = 15,
+    interval: float = 2.0,
+) -> dict[str, Any]:
+    """Poll `gh run list` for an issue_comment-triggered run newer than *since*.
+
+    Returns the run dict (with databaseId, createdAt, url, status, conclusion)
+    once one is observed. Raises InfrastructureError on timeout.
+    """
+    for _ in range(attempts):
+        time.sleep(interval)
+        runs = gh(
+            [
+                "run", "list",
+                "--repo", repo,
+                "--workflow", workflow_file,
+                "--event", "issue_comment",
+                "--json", "databaseId,createdAt,status,conclusion,url",
+                "--limit", "10",
+            ],
+            parse_json=True,
+        )
+        candidates: list[dict[str, Any]] = []
+        for run in runs:
+            try:
+                created = _iso_to_dt(run["createdAt"])
+            except (KeyError, ValueError):
+                continue
+            if created >= since:
+                candidates.append(run)
+        if candidates:
+            # Newest first — take the one with the latest createdAt.
+            candidates.sort(key=lambda r: r["createdAt"], reverse=True)
+            run = candidates[0]
+            logger.info(
+                "found caller run id=%s (%s)",
+                run.get("databaseId"), run.get("url"),
+            )
+            return run
+    raise InfrastructureError(
+        f"{repo}: no {workflow_file} issue_comment run appeared within "
+        f"{attempts * interval:.0f}s since {since.isoformat()}"
+    )
+
+
+# ---------------------------------------------------------------------------
+# Phases
+# ---------------------------------------------------------------------------
+
+
+def phase_pre_check(repo: str, issue_number: int) -> PhaseReport:
+    """Phase 1 — confirm the target issue is in release-state:planned.
+
+    Fail-loudly if it is not. Any other state risks stomping on a real
+    release cycle (or a prior smoke run that left state dirty); recovery
+    is a manual /discard-snapshot by an operator.
+    """
+    report = PhaseReport(name="pre-check")
+    try:
+        state = get_release_issue_state(repo, issue_number)
+    except InfrastructureError as exc:
+        report.detail = f"could not read state: {exc}"
+        return report
+
+    if state is None:
+        report.detail = (
+            f"{repo}#{issue_number} has no {_STATE_LABEL_PREFIX}* label — "
+            f"unsafe to proceed"
+        )
+        return report
+
+    if state != STATE_PLANNED:
+        hint = ""
+        if state == STATE_SNAPSHOT_ACTIVE:
+            hint = (
+                " — a prior run may have left state dirty; manual "
+                "`/discard-snapshot` on the Release Issue is required to "
+                "recover before the next run"
+            )
+        report.detail = (
+            f"state is {state!r}, expected {STATE_PLANNED!r}{hint}"
+        )
+        return report
+
+    report.passed = True
+    report.detail = f"state={state!r} on {repo}#{issue_number}"
+    return report
+
+
+def phase_fire_create_snapshot(
+    repo: str,
+    issue_number: int,
+    *,
+    poll_timeout: int,
+    dry_run: bool,
+) -> tuple[PhaseReport, str | None]:
+    """Phase 2 — post /create-snapshot, discover the caller run, poll it.
+
+    Returns (report, run_id). run_id is None on dry-run or failure.
+    """
+    report = PhaseReport(name="fire /create-snapshot")
+    if dry_run:
+        report.passed = True
+        report.detail = f"DRY-RUN: would post /create-snapshot on {repo}#{issue_number}"
+        return report, None
+
+    marker = datetime.now(timezone.utc).replace(microsecond=0)
+    try:
+        post_issue_comment(repo, issue_number, "/create-snapshot")
+        logger.info("posted /create-snapshot on %s#%s; polling for run", repo, issue_number)
+        run = find_recent_caller_run(
+            repo,
+            workflow_file=_RA_CALLER_WORKFLOW,
+            since=marker,
+        )
+    except InfrastructureError as exc:
+        report.detail = f"could not fire /create-snapshot: {exc}"
+        return report, None
+
+    run_id = str(run["databaseId"])
+    report.run_url = run.get("url")
+
+    try:
+        conclusion = poll_run(
+            repo, run_id, interval=15, timeout=poll_timeout
+        )
+    except InfrastructureError as exc:
+        report.detail = f"run {run_id} polling error: {exc}"
+        return report, run_id
+
+    report.run_conclusion = conclusion
+    if conclusion == "success":
+        report.passed = True
+        report.detail = f"run completed with conclusion={conclusion!r}"
+    else:
+        report.detail = (
+            f"run concluded {conclusion!r} (expected 'success') — "
+            f"see run for details"
+        )
+    return report, run_id
+
+
+def phase_verify_post_create(repo: str, issue_number: int) -> tuple[PhaseReport, str | None]:
+    """Phase 3 — confirm the world looks like a successful /create-snapshot.
+
+    Requirements:
+    - issue state label == snapshot-active
+    - exactly one release-snapshot/* branch exists
+    - a Release Review PR exists (head branch starts with release-review/)
+
+    Returns (report, snapshot_id) — snapshot_id is needed by verify-post-discard
+    to confirm the rename to -preserved.
+    """
+    report = PhaseReport(name="verify post-create")
+    try:
+        state = get_release_issue_state(repo, issue_number)
+    except InfrastructureError as exc:
+        report.detail = f"could not read state: {exc}"
+        return report, None
+
+    checks: list[tuple[bool, str]] = []
+
+    state_ok = state == STATE_SNAPSHOT_ACTIVE
+    checks.append(
+        (state_ok, f"state={state!r} {'==' if state_ok else '!='} snapshot-active")
+    )
+
+    try:
+        snapshot_branch = find_snapshot_branch(repo)
+    except InfrastructureError as exc:
+        report.detail = f"snapshot branch lookup failed: {exc}"
+        return report, None
+    checks.append(
+        (snapshot_branch is not None, f"snapshot branch={snapshot_branch!r}")
+    )
+
+    try:
+        pr_number = release_review_pr_for_issue(repo, issue_number)
+    except InfrastructureError as exc:
+        report.detail = f"PR lookup failed: {exc}"
+        return report, None
+    checks.append(
+        (pr_number is not None, f"release review PR=#{pr_number}" if pr_number else "no release review PR")
+    )
+
+    report.extras = [msg for _, msg in checks]
+    report.passed = all(ok for ok, _ in checks)
+    report.detail = "; ".join(msg for _, msg in checks)
+
+    snapshot_id: str | None = None
+    if snapshot_branch:
+        try:
+            snapshot_id = snapshot_id_from_branch(snapshot_branch)
+        except InfrastructureError:
+            snapshot_id = None
+    return report, snapshot_id
+
+
+def phase_fire_discard_snapshot(
+    repo: str,
+    issue_number: int,
+    *,
+    poll_timeout: int,
+    dry_run: bool,
+) -> tuple[PhaseReport, str | None]:
+    """Phase 4 — post /discard-snapshot, discover the caller run, poll it."""
+    report = PhaseReport(name="fire /discard-snapshot")
+    if dry_run:
+        report.passed = True
+        report.detail = f"DRY-RUN: would post /discard-snapshot on {repo}#{issue_number}"
+        return report, None
+
+    marker = datetime.now(timezone.utc).replace(microsecond=0)
+    try:
+        post_issue_comment(repo, issue_number, "/discard-snapshot")
+        logger.info("posted /discard-snapshot on %s#%s; polling for run", repo, issue_number)
+        run = find_recent_caller_run(
+            repo,
+            workflow_file=_RA_CALLER_WORKFLOW,
+            since=marker,
+        )
+    except InfrastructureError as exc:
+        report.detail = f"could not fire /discard-snapshot: {exc}"
+        return report, None
+
+    run_id = str(run["databaseId"])
+    report.run_url = run.get("url")
+
+    try:
+        conclusion = poll_run(
+            repo, run_id, interval=15, timeout=poll_timeout
+        )
+    except InfrastructureError as exc:
+        report.detail = f"run {run_id} polling error: {exc}"
+        return report, run_id
+
+    report.run_conclusion = conclusion
+    if conclusion == "success":
+        report.passed = True
+        report.detail = f"run completed with conclusion={conclusion!r}"
+    else:
+        report.detail = (
+            f"run concluded {conclusion!r} (expected 'success') — "
+            f"see run for details"
+        )
+    return report, run_id
+
+
+def phase_verify_post_discard(
+    repo: str, issue_number: int, snapshot_id: str | None
+) -> PhaseReport:
+    """Phase 5 — confirm the world looks like a successful /discard-snapshot.
+
+    Requirements:
+    - issue state label == planned
+    - the prior release-snapshot/ branch is gone
+    - a release-review/-preserved branch exists
+    """
+    report = PhaseReport(name="verify post-discard")
+    try:
+        state = get_release_issue_state(repo, issue_number)
+    except InfrastructureError as exc:
+        report.detail = f"could not read state: {exc}"
+        return report
+
+    checks: list[tuple[bool, str]] = []
+
+    state_ok = state == STATE_PLANNED
+    checks.append(
+        (state_ok, f"state={state!r} {'==' if state_ok else '!='} planned")
+    )
+
+    if snapshot_id:
+        snap_branch = f"{SNAPSHOT_BRANCH_PREFIX}{snapshot_id}"
+        preserved_branch = f"{RELEASE_REVIEW_BRANCH_PREFIX}{snapshot_id}-preserved"
+        try:
+            snap_still = branch_exists(repo, snap_branch)
+            preserved = branch_exists(repo, preserved_branch)
+        except InfrastructureError as exc:
+            report.detail = f"branch existence check failed: {exc}"
+            return report
+        checks.append(
+            (not snap_still, f"snapshot branch {snap_branch!r} {'still exists' if snap_still else 'gone'}")
+        )
+        checks.append(
+            (preserved, f"preserved branch {preserved_branch!r} {'present' if preserved else 'missing'}")
+        )
+    else:
+        checks.append(
+            (False, "snapshot_id not captured in phase 3 — skipping branch checks")
+        )
+
+    report.extras = [msg for _, msg in checks]
+    report.passed = all(ok for ok, _ in checks)
+    report.detail = "; ".join(msg for _, msg in checks)
+    return report
+
+
+# ---------------------------------------------------------------------------
+# Reporting
+# ---------------------------------------------------------------------------
+
+
+def render_markdown(reports: list[PhaseReport], repo: str, issue_number: int) -> str:
+    """Render a phase-by-phase PASS/FAIL summary as markdown."""
+    passed = sum(1 for r in reports if r.passed)
+    total = len(reports)
+    lines: list[str] = []
+    lines.append(
+        f"## Release Automation Regression — {passed}/{total} phases PASS"
+    )
+    lines.append("")
+    lines.append(f"- target repo: `{repo}`")
+    lines.append(f"- release issue: #{issue_number}")
+    lines.append("")
+    lines.append("| Phase | Result | Detail |")
+    lines.append("|---|---|---|")
+    for report in reports:
+        status = "PASS" if report.passed else "FAIL"
+        detail = report.detail.replace("|", "\\|") if report.detail else "-"
+        lines.append(f"| {report.name} | {status} | {detail} |")
+
+    # Per-phase detail (run URLs, extras)
+    for report in reports:
+        if report.run_url is None and not report.extras:
+            continue
+        lines.append("")
+        lines.append(f"### {report.name}")
+        if report.run_url:
+            lines.append(f"- run: {report.run_url}")
+        if report.run_conclusion:
+            lines.append(f"- conclusion: `{report.run_conclusion}`")
+        for extra in report.extras:
+            lines.append(f"- {extra}")
+
+    return "\n".join(lines) + "\n"
+
+
+# ---------------------------------------------------------------------------
+# CLI
+# ---------------------------------------------------------------------------
+
+
+def _build_argparser() -> argparse.ArgumentParser:
+    parser = argparse.ArgumentParser(
+        prog="regression_runner.py",
+        description=__doc__,
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+    )
+    parser.add_argument(
+        "--repo",
+        required=True,
+        help="owner/repo of the test repository (e.g. camaraproject/ReleaseTest)",
+    )
+    parser.add_argument(
+        "--release-issue",
+        type=int,
+        required=True,
+        help="issue number of the persistent Release Issue on --repo",
+    )
+    parser.add_argument(
+        "--summary-file",
+        type=Path,
+        help="write a markdown summary report to this path",
+    )
+    parser.add_argument(
+        "--poll-timeout",
+        type=int,
+        default=600,
+        help="max seconds to wait for each caller run to complete (default: %(default)s)",
+    )
+    parser.add_argument(
+        "--dry-run",
+        action="store_true",
+        help="do not post comments or poll runs; only exercise the pre-check",
+    )
+    parser.add_argument(
+        "-v", "--verbose",
+        action="store_true",
+        help="verbose logging",
+    )
+    return parser
+
+
+def _setup_logging(verbose: bool) -> None:
+    logging.basicConfig(
+        level=logging.DEBUG if verbose else logging.INFO,
+        format="%(asctime)s %(levelname)-5s %(message)s",
+        datefmt="%H:%M:%S",
+    )
+
+
+def run_phases(
+    repo: str,
+    issue_number: int,
+    *,
+    poll_timeout: int,
+    dry_run: bool,
+) -> list[PhaseReport]:
+    """Orchestrate all phases. Stop at the first fatal-to-continue failure."""
+    reports: list[PhaseReport] = []
+
+    pre = phase_pre_check(repo, issue_number)
+    reports.append(pre)
+    if not pre.passed:
+        return reports
+
+    create_report, _create_run_id = phase_fire_create_snapshot(
+        repo, issue_number,
+        poll_timeout=poll_timeout, dry_run=dry_run,
+    )
+    reports.append(create_report)
+    if dry_run:
+        logger.info("dry-run: skipping post-create verify, discard, post-discard verify")
+        return reports
+    if not create_report.passed:
+        # A failed /create-snapshot should not be followed by /discard-snapshot:
+        # if /create-snapshot failed before changing state, discard is invalid;
+        # if /create-snapshot failed after changing state, an operator needs
+        # to investigate before automation mutates further.
+        return reports
+
+    verify_create, snapshot_id = phase_verify_post_create(repo, issue_number)
+    reports.append(verify_create)
+
+    discard_report, _discard_run_id = phase_fire_discard_snapshot(
+        repo, issue_number,
+        poll_timeout=poll_timeout, dry_run=dry_run,
+    )
+    reports.append(discard_report)
+    if not discard_report.passed:
+        return reports
+
+    verify_discard = phase_verify_post_discard(repo, issue_number, snapshot_id)
+    reports.append(verify_discard)
+    return reports
+
+
+def main(argv: list[str] | None = None) -> int:
+    args = _build_argparser().parse_args(argv)
+    _setup_logging(args.verbose)
+
+    try:
+        reports = run_phases(
+            args.repo, args.release_issue,
+            poll_timeout=args.poll_timeout,
+            dry_run=args.dry_run,
+        )
+    except InfrastructureError as exc:
+        print(f"INFRA: {exc}", file=sys.stderr)
+        return 2
+
+    markdown = render_markdown(reports, args.repo, args.release_issue)
+    print(markdown)
+    if args.summary_file:
+        args.summary_file.write_text(markdown, encoding="utf-8")
+
+    all_passed = all(r.passed for r in reports)
+    passed = sum(1 for r in reports if r.passed)
+    total = len(reports)
+    print(
+        f"{'PASS' if all_passed else 'FAIL'}: {passed}/{total} phases",
+        file=sys.stderr,
+    )
+
+    # Distinguish infrastructure exit (2) from verification exit (1).
+    # Infrastructure exit is surfaced earlier via the InfrastructureError
+    # path above. If we got here but not all phases passed, it's either
+    # a verification failure (conclusion != success or state mismatch)
+    # OR an infrastructure problem captured inside a phase's detail (the
+    # phase put a "could not ..." message into report.detail). We treat
+    # the former as exit 1 and the latter as exit 2 by looking at the
+    # first failing phase's detail for the sentinel prefix "could not".
+    if all_passed:
+        return 0
+    for report in reports:
+        if not report.passed and report.detail.startswith("could not "):
+            return 2
+    return 1
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/release_automation/tests/test_regression_runner.py b/release_automation/tests/test_regression_runner.py
new file mode 100644
index 00000000..23c5d8b3
--- /dev/null
+++ b/release_automation/tests/test_regression_runner.py
@@ -0,0 +1,583 @@
+"""
+Unit tests for the Release Automation regression runner.
+
+Pure-logic coverage only — network and subprocess calls are mocked at the
+`gh()` boundary. These tests verify state-label parsing, run-discovery
+filtering, markdown rendering, phase decision matrix, and branch-name
+parsing. Integration behaviour is covered by CI staging (see
+private-dev-docs/validation-framework/prompts/prompt-project-session.md).
+"""
+
+from __future__ import annotations
+
+from datetime import datetime, timezone
+from unittest.mock import patch
+
+import pytest
+
+from release_automation.scripts.regression_runner import (
+    InfrastructureError,
+    PhaseReport,
+    _iso_to_dt,
+    find_recent_caller_run,
+    get_release_issue_state,
+    phase_pre_check,
+    phase_verify_post_create,
+    phase_verify_post_discard,
+    read_state_label,
+    render_markdown,
+    snapshot_id_from_branch,
+)
+
+
+# ---------------------------------------------------------------------------
+# read_state_label
+# ---------------------------------------------------------------------------
+
+
+class TestReadStateLabel:
+    def test_returns_planned(self):
+        labels = [
+            {"name": "release-issue"},
+            {"name": "release-state:planned"},
+        ]
+        assert read_state_label(labels) == "planned"
+
+    def test_returns_snapshot_active(self):
+        labels = [{"name": "release-state:snapshot-active"}]
+        assert read_state_label(labels) == "snapshot-active"
+
+    def test_returns_none_when_no_state_label(self):
+        labels = [{"name": "release-issue"}, {"name": "bug"}]
+        assert read_state_label(labels) is None
+
+    def test_returns_none_for_empty_list(self):
+        assert read_state_label([]) is None
+
+    def test_raises_on_multiple_state_labels(self):
+        labels = [
+            {"name": "release-state:planned"},
+            {"name": "release-state:snapshot-active"},
+        ]
+        with pytest.raises(InfrastructureError, match="multiple"):
+            read_state_label(labels)
+
+    def test_ignores_non_dict_entries(self):
+        labels = [None, "weird", {"name": "release-state:planned"}]
+        assert read_state_label(labels) == "planned"
+
+    def test_ignores_non_string_name(self):
+        labels = [{"name": 42}, {"name": "release-state:planned"}]
+        assert read_state_label(labels) == "planned"
+
+    def test_ignores_labels_without_name_key(self):
+        labels = [{"color": "red"}, {"name": "release-state:planned"}]
+        assert read_state_label(labels) == "planned"
+
+
+# ---------------------------------------------------------------------------
+# snapshot_id_from_branch
+# ---------------------------------------------------------------------------
+
+
+class TestSnapshotIdFromBranch:
+    def test_snapshot_branch(self):
+        assert snapshot_id_from_branch("release-snapshot/r1.2-abc1234") == "r1.2-abc1234"
+
+    def test_release_review_branch(self):
+        assert snapshot_id_from_branch("release-review/r1.2-abc1234") == "r1.2-abc1234"
+
+    def test_preserved_branch(self):
+        assert (
+            snapshot_id_from_branch("release-review/r1.2-abc1234-preserved")
+            == "r1.2-abc1234"
+        )
+
+    def test_rejects_unrelated_branch(self):
+        with pytest.raises(InfrastructureError, match="not a snapshot/review branch"):
+            snapshot_id_from_branch("main")
+
+    def test_rejects_feature_branch(self):
+        with pytest.raises(InfrastructureError):
+            snapshot_id_from_branch("feat/something")
+
+
+# ---------------------------------------------------------------------------
+# _iso_to_dt
+# ---------------------------------------------------------------------------
+
+
+class TestIsoToDt:
+    def test_parses_utc_timestamp(self):
+        result = _iso_to_dt("2026-04-22T15:30:00Z")
+        assert result == datetime(2026, 4, 22, 15, 30, 0, tzinfo=timezone.utc)
+        assert result.tzinfo is timezone.utc
+
+
+# ---------------------------------------------------------------------------
+# phase_pre_check — decision matrix
+# ---------------------------------------------------------------------------
+
+
+def _mock_issue_labels(labels: list[dict[str, str]], state: str = "open"):
+    """Helper that returns a dict mimicking `gh api issues/` output."""
+    return {"labels": labels, "state": state}
+
+
+class TestPhasePreCheck:
+    def test_pass_when_state_planned(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels([{"name": "release-state:planned"}]),
+        ):
+            report = phase_pre_check("camaraproject/ReleaseTest", 90)
+        assert report.passed is True
+        assert "planned" in report.detail
+
+    def test_fail_when_snapshot_active(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels(
+                [{"name": "release-state:snapshot-active"}]
+            ),
+        ):
+            report = phase_pre_check("camaraproject/ReleaseTest", 90)
+        assert report.passed is False
+        assert "snapshot-active" in report.detail
+        # The snapshot-active branch emits the manual-recovery hint.
+        assert "/discard-snapshot" in report.detail
+
+    def test_fail_when_draft_ready(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels(
+                [{"name": "release-state:draft-ready"}]
+            ),
+        ):
+            report = phase_pre_check("camaraproject/ReleaseTest", 90)
+        assert report.passed is False
+        assert "draft-ready" in report.detail
+        # Non-snapshot-active states do NOT get the discard hint (it would be wrong).
+        assert "/discard-snapshot" not in report.detail
+
+    def test_fail_when_published(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels(
+                [{"name": "release-state:published"}]
+            ),
+        ):
+            report = phase_pre_check("camaraproject/ReleaseTest", 90)
+        assert report.passed is False
+
+    def test_fail_when_no_state_label(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels([{"name": "other"}]),
+        ):
+            report = phase_pre_check("camaraproject/ReleaseTest", 90)
+        assert report.passed is False
+        assert "no release-state:" in report.detail
+
+    def test_fail_when_issue_closed(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels(
+                [{"name": "release-state:planned"}], state="closed",
+            ),
+        ):
+            report = phase_pre_check("camaraproject/ReleaseTest", 90)
+        assert report.passed is False
+        assert "could not read state" in report.detail
+        # Infrastructure-style message — has the "could not" sentinel that
+        # drives exit-code 2 classification in main().
+        assert report.detail.startswith("could not ")
+
+
+# ---------------------------------------------------------------------------
+# get_release_issue_state (integration through gh mock)
+# ---------------------------------------------------------------------------
+
+
+class TestGetReleaseIssueState:
+    def test_extracts_state(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels([{"name": "release-state:planned"}]),
+        ):
+            assert get_release_issue_state("o/r", 1) == "planned"
+
+    def test_returns_none_when_absent(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels([]),
+        ):
+            assert get_release_issue_state("o/r", 1) is None
+
+    def test_raises_when_not_open(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=_mock_issue_labels([], state="closed"),
+        ):
+            with pytest.raises(InfrastructureError, match="not open"):
+                get_release_issue_state("o/r", 1)
+
+
+# ---------------------------------------------------------------------------
+# find_recent_caller_run — event filter + newest-after-marker selection
+# ---------------------------------------------------------------------------
+
+
+class TestFindRecentCallerRun:
+    def test_picks_newest_after_marker(self):
+        marker = datetime(2026, 4, 22, 10, 0, 0, tzinfo=timezone.utc)
+        runs = [
+            {
+                "databaseId": 1,
+                "createdAt": "2026-04-22T09:59:00Z",
+                "status": "completed",
+                "conclusion": "success",
+                "url": "https://x/1",
+            },
+            {
+                "databaseId": 2,
+                "createdAt": "2026-04-22T10:01:00Z",
+                "status": "in_progress",
+                "conclusion": None,
+                "url": "https://x/2",
+            },
+            {
+                "databaseId": 3,
+                "createdAt": "2026-04-22T10:02:00Z",
+                "status": "queued",
+                "conclusion": None,
+                "url": "https://x/3",
+            },
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=runs,
+        ):
+            run = find_recent_caller_run(
+                "o/r",
+                workflow_file="release-automation.yml",
+                since=marker,
+                attempts=1,
+                interval=0.0,
+            )
+        # Newest (after marker) is run 3.
+        assert run["databaseId"] == 3
+
+    def test_ignores_runs_before_marker(self):
+        marker = datetime(2026, 4, 22, 10, 0, 0, tzinfo=timezone.utc)
+        runs = [
+            {
+                "databaseId": 1,
+                "createdAt": "2026-04-22T09:00:00Z",
+                "status": "completed",
+                "conclusion": "success",
+                "url": "https://x/1",
+            },
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=runs,
+        ):
+            with pytest.raises(InfrastructureError, match="no .* run appeared"):
+                find_recent_caller_run(
+                    "o/r",
+                    workflow_file="release-automation.yml",
+                    since=marker,
+                    attempts=1,
+                    interval=0.0,
+                )
+
+    def test_ignores_malformed_timestamps(self):
+        marker = datetime(2026, 4, 22, 10, 0, 0, tzinfo=timezone.utc)
+        runs = [
+            {
+                "databaseId": 1,
+                "createdAt": "not-a-timestamp",
+                "status": "queued",
+                "conclusion": None,
+                "url": "https://x/1",
+            },
+            {
+                "databaseId": 2,
+                "createdAt": "2026-04-22T10:05:00Z",
+                "status": "queued",
+                "conclusion": None,
+                "url": "https://x/2",
+            },
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=runs,
+        ):
+            run = find_recent_caller_run(
+                "o/r",
+                workflow_file="release-automation.yml",
+                since=marker,
+                attempts=1,
+                interval=0.0,
+            )
+        assert run["databaseId"] == 2
+
+
+# ---------------------------------------------------------------------------
+# phase_verify_post_create
+# ---------------------------------------------------------------------------
+
+
+def _make_gh_router(responses):
+    """Dispatch gh(args) calls to successive responses keyed by first arg pattern.
+
+    Each response is (match_prefix, return_value). The router is called
+    with the full args list and returns the first matching response.
+    """
+    def router(args, parse_json=False):  # noqa: ARG001
+        for pattern, value in responses:
+            if pattern in " ".join(args):
+                return value
+        raise AssertionError(f"no mock configured for: {args}")
+    return router
+
+
+class TestPhaseVerifyPostCreate:
+    def test_pass_when_all_three_checks_ok(self):
+        issue_response = {
+            "labels": [{"name": "release-state:snapshot-active"}],
+            "state": "open",
+        }
+        branches_response = (
+            "main\nrelease-snapshot/r1.2-abc1234\nrelease-review/r1.2-abc1234\n"
+        )
+        pr_list_response = [
+            {
+                "number": 101,
+                "headRefName": "release-review/r1.2-abc1234",
+                "title": "Release Review: ...",
+            }
+        ]
+        responses = [
+            ("issues/90", issue_response),
+            ("/branches --paginate", branches_response),
+            ("pr list", pr_list_response),
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            side_effect=_make_gh_router(responses),
+        ):
+            report, snapshot_id = phase_verify_post_create(
+                "camaraproject/ReleaseTest", 90
+            )
+        assert report.passed is True
+        assert snapshot_id == "r1.2-abc1234"
+        assert report.extras  # should carry the per-check messages
+
+    def test_fail_when_state_still_planned(self):
+        issue_response = {
+            "labels": [{"name": "release-state:planned"}],
+            "state": "open",
+        }
+        branches_response = "main\n"
+        pr_list_response: list[dict] = []
+        responses = [
+            ("issues/90", issue_response),
+            ("/branches --paginate", branches_response),
+            ("pr list", pr_list_response),
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            side_effect=_make_gh_router(responses),
+        ):
+            report, snapshot_id = phase_verify_post_create(
+                "camaraproject/ReleaseTest", 90
+            )
+        assert report.passed is False
+        assert snapshot_id is None
+
+    def test_fail_when_multiple_snapshot_branches(self):
+        issue_response = {
+            "labels": [{"name": "release-state:snapshot-active"}],
+            "state": "open",
+        }
+        branches_response = (
+            "release-snapshot/r1.2-abc1234\nrelease-snapshot/r1.2-def5678\n"
+        )
+        responses = [
+            ("issues/90", issue_response),
+            ("/branches --paginate", branches_response),
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            side_effect=_make_gh_router(responses),
+        ):
+            report, snapshot_id = phase_verify_post_create(
+                "camaraproject/ReleaseTest", 90
+            )
+        # Should surface an infra-style failure (caught by find_snapshot_branch).
+        assert report.passed is False
+        assert snapshot_id is None
+        assert report.detail.startswith("could not ") or "multiple" in report.detail
+
+
+# ---------------------------------------------------------------------------
+# phase_verify_post_discard
+# ---------------------------------------------------------------------------
+
+
+class TestPhaseVerifyPostDiscard:
+    def test_pass_when_state_planned_and_preserved_exists(self):
+        issue_response = {
+            "labels": [{"name": "release-state:planned"}],
+            "state": "open",
+        }
+        # branch_exists calls: first the snapshot (should 404), then the preserved (should exist)
+        def gh_mock(args, parse_json=False):  # noqa: ARG001
+            joined = " ".join(args)
+            if "issues/90" in joined:
+                return issue_response
+            if "branches/release-snapshot/r1.2-abc1234" in joined:
+                raise InfrastructureError("HTTP 404: Not Found")
+            if "branches/release-review/r1.2-abc1234-preserved" in joined:
+                return ".name is release-review/r1.2-abc1234-preserved"
+            raise AssertionError(f"unmocked: {args}")
+
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            side_effect=gh_mock,
+        ):
+            report = phase_verify_post_discard(
+                "camaraproject/ReleaseTest", 90, snapshot_id="r1.2-abc1234",
+            )
+        assert report.passed is True
+
+    def test_fail_when_snapshot_branch_still_exists(self):
+        issue_response = {
+            "labels": [{"name": "release-state:planned"}],
+            "state": "open",
+        }
+
+        def gh_mock(args, parse_json=False):  # noqa: ARG001
+            joined = " ".join(args)
+            if "issues/90" in joined:
+                return issue_response
+            if "branches/release-snapshot/r1.2-abc1234" in joined:
+                return "release-snapshot/r1.2-abc1234"  # still present
+            if "branches/release-review/r1.2-abc1234-preserved" in joined:
+                return "release-review/r1.2-abc1234-preserved"
+            raise AssertionError(f"unmocked: {args}")
+
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            side_effect=gh_mock,
+        ):
+            report = phase_verify_post_discard(
+                "camaraproject/ReleaseTest", 90, snapshot_id="r1.2-abc1234",
+            )
+        assert report.passed is False
+        assert "still exists" in report.detail
+
+    def test_fail_when_preserved_branch_missing(self):
+        issue_response = {
+            "labels": [{"name": "release-state:planned"}],
+            "state": "open",
+        }
+
+        def gh_mock(args, parse_json=False):  # noqa: ARG001
+            joined = " ".join(args)
+            if "issues/90" in joined:
+                return issue_response
+            if "branches/release-snapshot/r1.2-abc1234" in joined:
+                raise InfrastructureError("HTTP 404: Not Found")
+            if "branches/release-review/r1.2-abc1234-preserved" in joined:
+                raise InfrastructureError("HTTP 404: Not Found")
+            raise AssertionError(f"unmocked: {args}")
+
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            side_effect=gh_mock,
+        ):
+            report = phase_verify_post_discard(
+                "camaraproject/ReleaseTest", 90, snapshot_id="r1.2-abc1234",
+            )
+        assert report.passed is False
+        assert "missing" in report.detail
+
+    def test_skips_branch_checks_when_snapshot_id_missing(self):
+        issue_response = {
+            "labels": [{"name": "release-state:planned"}],
+            "state": "open",
+        }
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issue_response,
+        ):
+            report = phase_verify_post_discard(
+                "camaraproject/ReleaseTest", 90, snapshot_id=None,
+            )
+        assert report.passed is False
+        assert "not captured" in report.detail
+
+
+# ---------------------------------------------------------------------------
+# render_markdown
+# ---------------------------------------------------------------------------
+
+
+class TestRenderMarkdown:
+    def test_all_pass_header(self):
+        reports = [
+            PhaseReport(name="pre-check", passed=True, detail="ok"),
+            PhaseReport(
+                name="fire /create-snapshot",
+                passed=True,
+                detail="run completed",
+                run_url="https://x/1",
+                run_conclusion="success",
+            ),
+        ]
+        out = render_markdown(reports, "o/r", 90)
+        assert "2/2 phases PASS" in out
+        assert "`o/r`" in out
+        assert "#90" in out
+        assert "PASS" in out
+        assert "https://x/1" in out
+        # Successful phases with a run_url still emit a detail section.
+        assert "conclusion: `success`" in out
+
+    def test_fail_shows_per_phase(self):
+        reports = [
+            PhaseReport(name="pre-check", passed=False, detail="state=planned"),
+        ]
+        out = render_markdown(reports, "o/r", 90)
+        assert "0/1 phases PASS" in out
+        assert "FAIL" in out
+        assert "state=planned" in out
+
+    def test_pipe_escape_in_detail(self):
+        reports = [
+            PhaseReport(name="x", passed=True, detail="a | b | c"),
+        ]
+        out = render_markdown(reports, "o/r", 90)
+        # Pipes in detail are escaped so the markdown table doesn't split them.
+        assert r"a \| b \| c" in out
+
+    def test_empty_detail_renders_dash(self):
+        reports = [PhaseReport(name="x", passed=True, detail="")]
+        out = render_markdown(reports, "o/r", 90)
+        assert "| x | PASS | - |" in out
+
+    def test_extras_render_as_bullets(self):
+        reports = [
+            PhaseReport(
+                name="verify post-create",
+                passed=True,
+                detail="all ok",
+                extras=["state=snapshot-active", "pr=#101"],
+            ),
+        ]
+        out = render_markdown(reports, "o/r", 90)
+        assert "- state=snapshot-active" in out
+        assert "- pr=#101" in out

From 513c7dc0c9deeeca5cbe5e9db729f92c05e0aadf Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 23:45:45 +0200
Subject: [PATCH 119/157] fix(ra): allowlist trusted CI bots in
 validate-command permission check
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

App bots don't register as collaborators, so getCollaboratorPermissionLevel
returns 'none' for them — blocking slash commands from CI identities that
have App-installation write permissions but no user role. Allowlist
camara-validation[bot] (used by the Release Automation Regression canary
in tooling) so its /create-snapshot + /discard-snapshot round-trip on
ReleaseTest reaches the RA workflow. Defense in depth unchanged:
/publish-release still requires CODEOWNERS membership (which bots lack),
and the caller's if: filter still blocks camara-release-automation[bot]
from self-triggering.
---
 .../workflows/release-automation-reusable.yml | 47 ++++++++++++-------
 1 file changed, 30 insertions(+), 17 deletions(-)

diff --git a/.github/workflows/release-automation-reusable.yml b/.github/workflows/release-automation-reusable.yml
index c9bef433..854a70b2 100644
--- a/.github/workflows/release-automation-reusable.yml
+++ b/.github/workflows/release-automation-reusable.yml
@@ -760,28 +760,41 @@ jobs:
             console.log('Checking user permission...');
             let userPermission;
 
-            try {
-              const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({
-                owner: context.repo.owner,
-                repo: context.repo.repo,
-                username: user
-              });
-
-              userPermission = permission.permission;
-              console.log(`User permission: ${userPermission}`);
-
-              const hasPermission = ['admin', 'maintain', 'write'].includes(userPermission);
+            // Trusted CI bot identities. App bots don't register as collaborators,
+            // so getCollaboratorPermissionLevel returns 'none' for them; the write
+            // intent is established by the App installation on the target repo.
+            // Currently used by the Release Automation Regression canary in
+            // camaraproject/tooling, which posts slash commands as
+            // camara-validation[bot].
+            const TRUSTED_BOT_USERS = new Set(['camara-validation[bot]']);
+
+            if (TRUSTED_BOT_USERS.has(user)) {
+              userPermission = 'write';
+              console.log(`User ${user} is a trusted CI bot; treating as write`);
+            } else {
+              try {
+                const { data: permission } = await github.rest.repos.getCollaboratorPermissionLevel({
+                  owner: context.repo.owner,
+                  repo: context.repo.repo,
+                  username: user
+                });
 
-              if (!hasPermission) {
+                userPermission = permission.permission;
+                console.log(`User permission: ${userPermission}`);
+              } catch (error) {
+                console.log(`Permission check failed: ${error.message}`);
+                // If we can't check permission, deny by default
                 core.setOutput('allowed', 'false');
-                core.setOutput('error_message', `You must have write access or higher to run release commands. Your current permission: ${userPermission}`);
+                core.setOutput('error_message', `Failed to verify permissions: ${error.message}`);
                 return;
               }
-            } catch (error) {
-              console.log(`Permission check failed: ${error.message}`);
-              // If we can't check permission, deny by default
+            }
+
+            const hasPermission = ['admin', 'maintain', 'write'].includes(userPermission);
+
+            if (!hasPermission) {
               core.setOutput('allowed', 'false');
-              core.setOutput('error_message', `Failed to verify permissions: ${error.message}`);
+              core.setOutput('error_message', `You must have write access or higher to run release commands. Your current permission: ${userPermission}`);
               return;
             }
 

From 9057898dae09fee41321f8be7c31fd279c59c61f Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 23:47:57 +0200
Subject: [PATCH 120/157] =?UTF-8?q?fix(canary):=20disambiguate=20summary?=
 =?UTF-8?q?=20output=20=E2=80=94=20markdown=20header=20+=20stderr?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

The markdown header '3/5 phases PASS' and the stderr 'FAIL: 3/5 phases'
both read ambiguously (could be 3 passed or 3 failed) and the markdown
form didn't even surface the overall verdict. Change both to unambiguous
': N of M phases passed' (markdown) and 'FAIL: N of M phases
passed' (stderr). Surfaced on run tooling#24801994238 where 3 phases
passed and 2 failed — the header said '3/5 phases PASS' without any FAIL
indication.
---
 release_automation/scripts/regression_runner.py  |  5 +++--
 .../tests/test_regression_runner.py              | 16 ++++++++++++----
 2 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/release_automation/scripts/regression_runner.py b/release_automation/scripts/regression_runner.py
index 64d380b1..a8d3d414 100644
--- a/release_automation/scripts/regression_runner.py
+++ b/release_automation/scripts/regression_runner.py
@@ -635,9 +635,10 @@ def render_markdown(reports: list[PhaseReport], repo: str, issue_number: int) ->
     """Render a phase-by-phase PASS/FAIL summary as markdown."""
     passed = sum(1 for r in reports if r.passed)
     total = len(reports)
+    verdict = "PASS" if passed == total and total > 0 else "FAIL"
     lines: list[str] = []
     lines.append(
-        f"## Release Automation Regression — {passed}/{total} phases PASS"
+        f"## Release Automation Regression — {verdict}: {passed} of {total} phases passed"
     )
     lines.append("")
     lines.append(f"- target repo: `{repo}`")
@@ -789,7 +790,7 @@ def main(argv: list[str] | None = None) -> int:
     passed = sum(1 for r in reports if r.passed)
     total = len(reports)
     print(
-        f"{'PASS' if all_passed else 'FAIL'}: {passed}/{total} phases",
+        f"{'PASS' if all_passed else 'FAIL'}: {passed} of {total} phases passed",
         file=sys.stderr,
     )
 
diff --git a/release_automation/tests/test_regression_runner.py b/release_automation/tests/test_regression_runner.py
index 23c5d8b3..0e498277 100644
--- a/release_automation/tests/test_regression_runner.py
+++ b/release_automation/tests/test_regression_runner.py
@@ -539,10 +539,9 @@ def test_all_pass_header(self):
             ),
         ]
         out = render_markdown(reports, "o/r", 90)
-        assert "2/2 phases PASS" in out
+        assert "PASS: 2 of 2 phases passed" in out
         assert "`o/r`" in out
         assert "#90" in out
-        assert "PASS" in out
         assert "https://x/1" in out
         # Successful phases with a run_url still emit a detail section.
         assert "conclusion: `success`" in out
@@ -552,10 +551,19 @@ def test_fail_shows_per_phase(self):
             PhaseReport(name="pre-check", passed=False, detail="state=planned"),
         ]
         out = render_markdown(reports, "o/r", 90)
-        assert "0/1 phases PASS" in out
-        assert "FAIL" in out
+        assert "FAIL: 0 of 1 phases passed" in out
         assert "state=planned" in out
 
+    def test_partial_pass_header_says_fail(self):
+        reports = [
+            PhaseReport(name="pre-check", passed=True, detail="ok"),
+            PhaseReport(name="verify", passed=False, detail="state unchanged"),
+        ]
+        out = render_markdown(reports, "o/r", 90)
+        # 1/2 is not overall PASS — header must say FAIL, not leave it ambiguous.
+        assert "FAIL: 1 of 2 phases passed" in out
+        assert "PASS: " not in out.split("\n")[0]
+
     def test_pipe_escape_in_detail(self):
         reports = [
             PhaseReport(name="x", passed=True, detail="a | b | c"),

From e92f65eb2687b8fb8625f83c9bb9f1063d713f26 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Wed, 22 Apr 2026 23:57:12 +0200
Subject: [PATCH 121/157] fix(ci): block RA bot by login, not Bot class

Mirrors ReleaseTest#92 into the caller template. The previous
'comment.user.type != Bot' filter also blocked non-RA bots that
legitimately fire slash commands from CI (e.g. camara-validation[bot]
posting from the RA regression canary). Narrow the filter to the RA
bot identity so its own replies still can't self-trigger, while other
bot identities pass through to the slash-command gate. Propagates to
API repos via the reconciliation campaign once v1-rc advances.
---
 release_automation/workflows/release-automation-caller.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/release_automation/workflows/release-automation-caller.yml b/release_automation/workflows/release-automation-caller.yml
index 0fd60318..e273b848 100644
--- a/release_automation/workflows/release-automation-caller.yml
+++ b/release_automation/workflows/release-automation-caller.yml
@@ -68,7 +68,7 @@ permissions:
 jobs:
   release-automation:
     # Skip if:
-    # - issue_comment from a Bot (release automation bot comments, not human commands)
+    # - issue_comment from the RA bot itself (its own replies, to prevent self-triggering)
     # - issue_comment but not a release command or not on a release issue
     # - issues event but not a release issue
     # - pull_request but not merged or not to a snapshot branch
@@ -76,7 +76,7 @@ jobs:
       (github.event_name == 'push') ||
       github.event_name == 'workflow_dispatch' ||
       (github.event_name == 'issue_comment' &&
-       github.event.comment.user.type != 'Bot' &&
+       github.event.comment.user.login != 'camara-release-automation[bot]' &&
        contains(github.event.issue.labels.*.name, 'release-issue') &&
        (startsWith(github.event.comment.body, '/create-snapshot') ||
         startsWith(github.event.comment.body, '/discard-snapshot') ||

From cc704367fc1eb7089f41183c39976d47ac1b70fd Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 23 Apr 2026 06:32:08 +0200
Subject: [PATCH 122/157] fix(canary): auto-discover the Release Issue on the
 target repo
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Release Issues cycle per release — each publish closes the current one
and opens a fresh issue for the next cycle, so a hardcoded number would
break after the first cycle (tooling#214 hardcoded #90; ReleaseTest#90
closed mid-session, #93 took its place, canary pre-check failed on the
next organic run).

Make --release-issue optional; when omitted, discover the single open
issue carrying both the 'release-issue' label and the workflow-owned
body marker. Zero or multiple matches raise InfrastructureError (same
fail-loudly philosophy as pre-check). Drop --release-issue 90 from
.github/workflows/release-automation-regression.yml. Keep the CLI flag
for local override / split-state recovery.
---
 .../release-automation-regression.yml         |  1 -
 .../scripts/regression_runner.py              | 79 ++++++++++++++--
 .../tests/test_regression_runner.py           | 91 +++++++++++++++++++
 3 files changed, 164 insertions(+), 7 deletions(-)

diff --git a/.github/workflows/release-automation-regression.yml b/.github/workflows/release-automation-regression.yml
index 4de04a5a..fb7f50d1 100644
--- a/.github/workflows/release-automation-regression.yml
+++ b/.github/workflows/release-automation-regression.yml
@@ -72,7 +72,6 @@ jobs:
         run: |
           python3 release_automation/scripts/regression_runner.py \
             --repo camaraproject/ReleaseTest \
-            --release-issue 90 \
             --summary-file release-automation-regression-summary.md
 
       - name: Publish summary
diff --git a/release_automation/scripts/regression_runner.py b/release_automation/scripts/regression_runner.py
index a8d3d414..087557b0 100644
--- a/release_automation/scripts/regression_runner.py
+++ b/release_automation/scripts/regression_runner.py
@@ -11,11 +11,19 @@
 
 Usage:
     python3 regression_runner.py --repo camaraproject/ReleaseTest \\
-        --release-issue 90 \\
         [--summary-file release-automation-regression-summary.md]
 
+    # Override discovery (useful when iterating locally):
+    python3 regression_runner.py --repo camaraproject/ReleaseTest --release-issue 93
+
     # Dry-run (no comments posted, no runs polled):
-    python3 regression_runner.py --repo ... --release-issue 90 --dry-run
+    python3 regression_runner.py --repo camaraproject/ReleaseTest --dry-run
+
+The Release Issue is auto-discovered on --repo via the 'release-issue' label
+and the workflow-owned body marker. Release Issues cycle per release — each
+publish closes the current one and opens a fresh issue for the next cycle —
+so a hardcoded number would break after the first cycle. Pass --release-issue
+only to override discovery (local testing, or recovering from a split state).
 
 Exit codes:
     0  all phases PASS
@@ -67,6 +75,13 @@
 # Source of truth: .github/workflows/release-automation-reusable.yml
 _STATE_LABEL_PREFIX = "release-state:"
 
+# Release Issue discovery markers — matched by find_release_issue().
+# Both the label and the body marker must be present; the combination
+# distinguishes the workflow-owned Release Issue from any other issue
+# a maintainer might tag as 'release-issue'.
+_RELEASE_ISSUE_LABEL = "release-issue"
+_RELEASE_ISSUE_BODY_MARKER = ""
+
 # Caller workflow filename on the target test repo. Each release-plan
 # repo copies this caller from the shared template.
 _RA_CALLER_WORKFLOW = "release-automation.yml"
@@ -182,6 +197,50 @@ def poll_run(
 # ---------------------------------------------------------------------------
 
 
+def find_release_issue(repo: str) -> int:
+    """Discover the single workflow-owned Release Issue on *repo*.
+
+    Release Issues cycle per release (closed on publish, a fresh one opened
+    for the next cycle), so the canary must discover the current one rather
+    than relying on a static number. A match requires both:
+
+    - the `release-issue` label (server-side filter), and
+    - the workflow-owned body marker (client-side check).
+
+    Returns the issue number. Raises InfrastructureError if zero matches
+    (no active release cycle on the target repo) or more than one match
+    (corrupted state that automation must not silently collapse).
+    """
+    data = gh(
+        [
+            "api", f"repos/{repo}/issues",
+            "-X", "GET",
+            "-f", "state=open",
+            "-f", f"labels={_RELEASE_ISSUE_LABEL}",
+            "--paginate",
+            "--jq", "[.[] | select(.pull_request == null) | {number, body}]",
+        ],
+        parse_json=True,
+    )
+    matching = [
+        item["number"]
+        for item in data
+        if _RELEASE_ISSUE_BODY_MARKER in (item.get("body") or "")
+    ]
+    if len(matching) == 0:
+        raise InfrastructureError(
+            f"{repo}: no open Release Issue found "
+            f"(need label '{_RELEASE_ISSUE_LABEL}' AND body marker "
+            f"'{_RELEASE_ISSUE_BODY_MARKER}')"
+        )
+    if len(matching) > 1:
+        nums = ", ".join(f"#{n}" for n in sorted(matching))
+        raise InfrastructureError(
+            f"{repo}: multiple open Release Issues found: {nums}"
+        )
+    return matching[0]
+
+
 def read_state_label(labels: list[dict[str, Any]]) -> str | None:
     """Extract the release-state:* value from a list of label objects.
 
@@ -686,8 +745,9 @@ def _build_argparser() -> argparse.ArgumentParser:
     parser.add_argument(
         "--release-issue",
         type=int,
-        required=True,
-        help="issue number of the persistent Release Issue on --repo",
+        help="issue number of the current Release Issue on --repo; "
+             "if omitted, auto-discovered via label + body marker "
+             "(the normal path — Release Issues cycle per release)",
     )
     parser.add_argument(
         "--summary-file",
@@ -772,8 +832,15 @@ def main(argv: list[str] | None = None) -> int:
     _setup_logging(args.verbose)
 
     try:
+        issue_number = args.release_issue
+        if issue_number is None:
+            issue_number = find_release_issue(args.repo)
+            logger.info(
+                "discovered Release Issue: #%d on %s", issue_number, args.repo
+            )
+
         reports = run_phases(
-            args.repo, args.release_issue,
+            args.repo, issue_number,
             poll_timeout=args.poll_timeout,
             dry_run=args.dry_run,
         )
@@ -781,7 +848,7 @@ def main(argv: list[str] | None = None) -> int:
         print(f"INFRA: {exc}", file=sys.stderr)
         return 2
 
-    markdown = render_markdown(reports, args.repo, args.release_issue)
+    markdown = render_markdown(reports, args.repo, issue_number)
     print(markdown)
     if args.summary_file:
         args.summary_file.write_text(markdown, encoding="utf-8")
diff --git a/release_automation/tests/test_regression_runner.py b/release_automation/tests/test_regression_runner.py
index 0e498277..15f13ff2 100644
--- a/release_automation/tests/test_regression_runner.py
+++ b/release_automation/tests/test_regression_runner.py
@@ -20,6 +20,7 @@
     PhaseReport,
     _iso_to_dt,
     find_recent_caller_run,
+    find_release_issue,
     get_release_issue_state,
     phase_pre_check,
     phase_verify_post_create,
@@ -589,3 +590,93 @@ def test_extras_render_as_bullets(self):
         out = render_markdown(reports, "o/r", 90)
         assert "- state=snapshot-active" in out
         assert "- pr=#101" in out
+
+
+# ---------------------------------------------------------------------------
+# find_release_issue
+# ---------------------------------------------------------------------------
+
+
+MARKER = ""
+
+
+class TestFindReleaseIssue:
+    def test_single_match(self):
+        issues = [
+            {"number": 93, "body": f"{MARKER}\n\nsome body text"},
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issues,
+        ):
+            assert find_release_issue("o/r") == 93
+
+    def test_zero_matches_raises(self):
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=[],
+        ):
+            with pytest.raises(InfrastructureError, match="no open Release Issue"):
+                find_release_issue("o/r")
+
+    def test_multiple_matches_raises(self):
+        issues = [
+            {"number": 90, "body": f"{MARKER}\n\nold cycle"},
+            {"number": 93, "body": f"{MARKER}\n\nnew cycle"},
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issues,
+        ):
+            with pytest.raises(InfrastructureError, match="multiple open Release Issues.*#90.*#93"):
+                find_release_issue("o/r")
+
+    def test_labeled_but_no_marker_is_excluded(self):
+        # gh API filters by label server-side, but a maintainer could
+        # hand-label an issue without the workflow-owned body marker.
+        # That's not a workflow-owned issue; it must not match.
+        issues = [
+            {"number": 42, "body": "This is a hand-labeled release issue, not the workflow's."},
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issues,
+        ):
+            with pytest.raises(InfrastructureError, match="no open Release Issue"):
+                find_release_issue("o/r")
+
+    def test_marker_case_sensitivity(self):
+        # The marker must match exactly — substring check on the body.
+        issues = [
+            {"number": 42, "body": ""},  # wrong case
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issues,
+        ):
+            with pytest.raises(InfrastructureError, match="no open Release Issue"):
+                find_release_issue("o/r")
+
+    def test_null_body_is_tolerated(self):
+        # GitHub occasionally returns null for empty bodies; don't crash.
+        issues = [
+            {"number": 42, "body": None},
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issues,
+        ):
+            with pytest.raises(InfrastructureError, match="no open Release Issue"):
+                find_release_issue("o/r")
+
+    def test_marker_anywhere_in_body_matches(self):
+        # Marker may appear after other content (future-proofing if the
+        # template ever reorders).
+        issues = [
+            {"number": 93, "body": f"First line\n\nSecond line\n\n{MARKER}\n"},
+        ]
+        with patch(
+            "release_automation.scripts.regression_runner.gh",
+            return_value=issues,
+        ):
+            assert find_release_issue("o/r") == 93

From 971c96ee71fd84092881c397138bde75f3fd7935 Mon Sep 17 00:00:00 2001
From: Herbert Damker <52109189+hdamker@users.noreply.github.com>
Date: Thu, 23 Apr 2026 08:22:06 +0200
Subject: [PATCH 123/157] fix(canary): check last bot reply + attribute the
 slash-command comment
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Two polish items surfaced by the first green canary run (tooling#214):

1. Fire-phase criterion was too loose. The caller run concludes 'success'
   even when validate-command rejects the command and skips downstream
   jobs (observed on run 24801994238: 3 phases PASSed despite rejection).
   After poll_run, read the RA bot's final reply on the Release Issue
   since the fire marker and compare its first line against an expected
   title prefix per command. Expected prefixes live in a tiny YAML at
   release_automation/regression/expected-comments.yaml and are edited
   by hand when the RA bot templates change — test-assertion discipline,
   not a captured artifact. Rejection (command_rejected template),
   internal error (snapshot_failed, internal_error), and warning paths
   (common_sync_failed) all produce different titles, so the one check
   catches them all. Caller non-success short-circuits before the
   comment check.

2. Slash-command comments on ReleaseTest were bare. Operators scanning
   the Release Issue had no way to tell the canary's /create-snapshot
   and /discard-snapshot posts from manual operator commands. Both
   comments now carry canary attribution and the triggering workflow
   run URL, composed from the Actions-provided GITHUB_SERVER_URL /
   GITHUB_REPOSITORY / GITHUB_RUN_ID env vars (URL clause omitted
   gracefully outside CI). The first line is still the bare slash
   command so both the caller's if: filter and the reusable workflow's
   word-boundary parser accept the body.

23 new tests (69/69 pass total). pyflakes clean.
---
 .../regression/expected-comments.yaml         |  18 +
 .../scripts/regression_runner.py              | 297 ++++++++++---
 .../tests/test_regression_runner.py           | 401 ++++++++++++++++++
 3 files changed, 662 insertions(+), 54 deletions(-)
 create mode 100644 release_automation/regression/expected-comments.yaml

diff --git a/release_automation/regression/expected-comments.yaml b/release_automation/regression/expected-comments.yaml
new file mode 100644
index 00000000..959708bf
--- /dev/null
+++ b/release_automation/regression/expected-comments.yaml
@@ -0,0 +1,18 @@
+# Expected leading title of the final bot comment on the Release Issue
+# when each slash command completes on the green path.
+#
+# This is a test assertion, not a captured artifact. When the RA workflow
+# changes the template used for a given command, edit this file alongside
+# the workflow change.
+#
+# Titles are taken from the first line of
+#   release_automation/templates/bot_messages/