feat(test): add downstream acceptance and fault harness artifacts

This commit packages the standalone task-14 acceptance and task-15 fault-suite execution toolchain for downstream validation.

It includes all runnable harness scripts, helper utilities, and generated evidence captures so downstream behavior can be reproduced and reviewed independently from docs and core implementation.

Bundling these assets separately allows QA/automation workflows to validate runtime changes without dragging operational notes or release-gate documentation into the same review unit.

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-opencode)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
This commit is contained in:
2026-03-05 20:32:12 +08:00
parent 56e874ab6d
commit 991f7ded34
6 changed files with 1894 additions and 0 deletions
+384
View File
@@ -0,0 +1,384 @@
#!/usr/bin/env bash
set -u -o pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
STREAMER_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
WORKTREE_ROOT="$(cd "${STREAMER_ROOT}/../.." && pwd)"
BUILD_DIR="${STREAMER_ROOT}/build"
EVIDENCE_ROOT="${WORKTREE_ROOT}/.sisyphus/evidence"
TASK_EVIDENCE_DIR="${EVIDENCE_ROOT}/task-14-acceptance"
SUMMARY_HELPER="${SCRIPT_DIR}/acceptance_summary_helper.py"
RUN_ID=""
RUN_DIR=""
MANIFEST_TSV="${RUN_DIR}/rows.tsv"
SUMMARY_JSON="${RUN_DIR}/summary.json"
LATEST_SUMMARY_JSON="${EVIDENCE_ROOT}/task-14-acceptance-summary.json"
EVIDENCE_TEXT="${EVIDENCE_ROOT}/task-14-acceptance.txt"
STARTED_AT_UTC="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
mkdir -p "${TASK_EVIDENCE_DIR}"
allocate_run_dir() {
local attempts=0
while (( attempts < 50 )); do
local candidate_id
candidate_id="$(date +"%Y%m%dT%H%M%S")-$(date +"%N")-p$$-$RANDOM"
local candidate_dir="${TASK_EVIDENCE_DIR}/${candidate_id}"
if mkdir "${candidate_dir}" 2>/dev/null; then
RUN_ID="${candidate_id}"
RUN_DIR="${candidate_dir}"
MANIFEST_TSV="${RUN_DIR}/rows.tsv"
SUMMARY_JSON="${RUN_DIR}/summary.json"
return 0
fi
attempts=$((attempts + 1))
sleep 0.01
done
echo "failed to allocate unique acceptance run directory" >&2
return 1
}
allocate_run_dir || exit 1
echo -e "order\trow_id\tname\tprotocol\tcodec\trtmp_mode\tstatus\treason\tduration_ms\tsim_rc\tstreamer_rc\ttester_rc\tsim_log\tstreamer_log\ttester_log\tsdp_path" > "${MANIFEST_TSV}"
cleanup_pids=()
cleanup_all() {
for pid in "${cleanup_pids[@]:-}"; do
if [[ -n "${pid}" ]] && kill -0 "${pid}" 2>/dev/null; then
kill "${pid}" 2>/dev/null || true
fi
done
}
trap cleanup_all EXIT
binary_exists() {
local path="$1"
[[ -x "${path}" ]]
}
wait_pid() {
local pid="$1"
local timeout_s="$2"
local elapsed=0
while kill -0 "${pid}" 2>/dev/null; do
if (( elapsed >= timeout_s )); then
kill "${pid}" 2>/dev/null || true
wait "${pid}" 2>/dev/null || true
return 124
fi
sleep 1
elapsed=$((elapsed + 1))
done
wait "${pid}" 2>/dev/null
return $?
}
append_manifest_row() {
local order="$1"
local row_id="$2"
local name="$3"
local protocol="$4"
local codec="$5"
local rtmp_mode="$6"
local status="$7"
local reason="$8"
local duration_ms="$9"
local sim_rc="${10}"
local streamer_rc="${11}"
local tester_rc="${12}"
local sim_log="${13}"
local streamer_log="${14}"
local tester_log="${15}"
local sdp_path="${16}"
echo -e "${order}\t${row_id}\t${name}\t${protocol}\t${codec}\t${rtmp_mode}\t${status}\t${reason}\t${duration_ms}\t${sim_rc}\t${streamer_rc}\t${tester_rc}\t${sim_log}\t${streamer_log}\t${tester_log}\t${sdp_path}" >> "${MANIFEST_TSV}"
}
run_matrix_row() {
local order="$1"
local row_id="$2"
local name="$3"
local protocol="$4"
local codec="$5"
local rtmp_mode="$6"
local row_dir="${RUN_DIR}/${order}-${row_id}"
mkdir -p "${row_dir}"
local sim_log="${row_dir}/sim.log"
local streamer_log="${row_dir}/streamer.log"
local tester_log="${row_dir}/tester.log"
local sdp_path=""
local shm_name="cvmmap_accept_${row_id}_${RUN_ID}"
local zmq_endpoint="ipc:///tmp/cvmmap_accept_${row_id}_${RUN_ID}.ipc"
local sim_label="acc_${order}_${protocol}_${codec}"
local sim_cmd=(
"${BUILD_DIR}/cvmmap_sim"
--shm-name "${shm_name}"
--zmq-endpoint "${zmq_endpoint}"
--label "${sim_label}"
--frames 320
--fps 200
--width 640
--height 360
)
local streamer_cmd=(
"${BUILD_DIR}/cvmmap_streamer"
--run-mode pipeline
--codec "${codec}"
--shm-name "${shm_name}"
--zmq-endpoint "${zmq_endpoint}"
--queue-size 1
--gop 30
--b-frames 0
--ingest-max-frames 120
--ingest-idle-timeout-ms 6000
)
local tester_cmd=()
if [[ "${protocol}" == "rtp" ]]; then
local rtp_port
local payload_type
if [[ "${codec}" == "h264" ]]; then
rtp_port=51040
payload_type=96
else
rtp_port=51042
payload_type=98
fi
sdp_path="${row_dir}/stream.sdp"
streamer_cmd+=(
--rtp
--rtp-endpoint "127.0.0.1:${rtp_port}"
--rtp-payload-type "${payload_type}"
--rtp-sdp "${sdp_path}"
)
tester_cmd=(
"${BUILD_DIR}/rtp_receiver_tester"
--port "${rtp_port}"
--expect-pt "${payload_type}"
--packet-threshold 1
--timeout-ms 10000
)
else
local rtmp_port
local tester_mode
case "${row_id}" in
rtmp_h264)
rtmp_port=19360
tester_mode="h264"
;;
rtmp_h265_enhanced)
rtmp_port=19362
tester_mode="h265-enhanced"
;;
rtmp_h265_domestic)
rtmp_port=19364
tester_mode="h265-domestic"
;;
*)
rtmp_port=19366
tester_mode="h264"
;;
esac
streamer_cmd+=(
--rtmp
--rtmp-url "rtmp://127.0.0.1:${rtmp_port}/live/${row_id}"
--rtmp-mode "${rtmp_mode}"
)
tester_cmd=(
"${BUILD_DIR}/rtmp_stub_tester"
--mode "${tester_mode}"
--listen-host 127.0.0.1
--listen-port "${rtmp_port}"
--video-threshold 1
--timeout-ms 10000
)
fi
local row_start_ms row_end_ms duration_ms
row_start_ms="$(date +%s%3N)"
"${tester_cmd[@]}" > "${tester_log}" 2>&1 &
local tester_pid=$!
cleanup_pids+=("${tester_pid}")
sleep 1
"${sim_cmd[@]}" > "${sim_log}" 2>&1 &
local sim_pid=$!
cleanup_pids+=("${sim_pid}")
sleep 1
"${streamer_cmd[@]}" > "${streamer_log}" 2>&1
local streamer_rc=$?
wait_pid "${tester_pid}" 15
local tester_rc=$?
wait_pid "${sim_pid}" 15
local sim_rc=$?
row_end_ms="$(date +%s%3N)"
duration_ms=$((row_end_ms - row_start_ms))
local status="PASS"
local reason="all-processes-ok"
if (( sim_rc != 0 || streamer_rc != 0 || tester_rc != 0 )); then
status="FAIL"
reason="sim_rc=${sim_rc},streamer_rc=${streamer_rc},tester_rc=${tester_rc}"
fi
append_manifest_row \
"${order}" \
"${row_id}" \
"${name}" \
"${protocol}" \
"${codec}" \
"${rtmp_mode}" \
"${status}" \
"${reason}" \
"${duration_ms}" \
"${sim_rc}" \
"${streamer_rc}" \
"${tester_rc}" \
"${sim_log}" \
"${streamer_log}" \
"${tester_log}" \
"${sdp_path}"
printf "[%s] %s => %s (%s)\n" "${row_id}" "${name}" "${status}" "${reason}"
}
main() {
local required=(
"${BUILD_DIR}/cvmmap_sim"
"${BUILD_DIR}/cvmmap_streamer"
"${BUILD_DIR}/rtp_receiver_tester"
"${BUILD_DIR}/rtmp_stub_tester"
)
local missing=()
for bin in "${required[@]}"; do
if ! binary_exists "${bin}"; then
missing+=("${bin}")
fi
done
if (( ${#missing[@]} > 0 )); then
for idx in 1 2 3 4 5; do
append_manifest_row \
"${idx}" \
"preflight_${idx}" \
"preflight missing binary" \
"preflight" \
"n/a" \
"" \
"SKIP" \
"missing binaries: ${missing[*]}" \
"0" \
"-1" \
"-1" \
"-1" \
"" \
"" \
"" \
""
done
else
run_matrix_row 1 "rtp_h264" "RTP + H.264" "rtp" "h264" ""
run_matrix_row 2 "rtp_h265" "RTP + H.265" "rtp" "h265" ""
run_matrix_row 3 "rtmp_h264" "RTMP + H.264" "rtmp" "h264" "enhanced"
run_matrix_row 4 "rtmp_h265_enhanced" "RTMP + H.265 enhanced" "rtmp" "h265" "enhanced"
run_matrix_row 5 "rtmp_h265_domestic" "RTMP + H.265 domestic" "rtmp" "h265" "domestic"
fi
local finished_at_utc
finished_at_utc="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
python3 "${SUMMARY_HELPER}" \
--manifest "${MANIFEST_TSV}" \
--output "${SUMMARY_JSON}" \
--run-id "${RUN_ID}" \
--run-dir "${RUN_DIR}" \
--started-at "${STARTED_AT_UTC}" \
--finished-at "${finished_at_utc}"
local summary_rc=$?
cp -f "${SUMMARY_JSON}" "${LATEST_SUMMARY_JSON}" 2>/dev/null || true
{
echo "task=14"
echo "run_id=${RUN_ID}"
echo "run_dir=${RUN_DIR}"
echo "manifest=${MANIFEST_TSV}"
echo "summary_json=${SUMMARY_JSON}"
echo "latest_summary_json=${LATEST_SUMMARY_JSON}"
echo "started_at=${STARTED_AT_UTC}"
echo "finished_at=${finished_at_utc}"
} > "${EVIDENCE_TEXT}"
if (( summary_rc != 0 )); then
echo "summary helper failed with rc=${summary_rc}" >&2
return 1
fi
local pass_count fail_count skip_count total_count
pass_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("pass", 0))
PY
)"
fail_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("fail", 0))
PY
)"
skip_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("skip", 0))
PY
)"
total_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("total", 0))
PY
)"
echo "summary: total=${total_count} pass=${pass_count} fail=${fail_count} skip=${skip_count}"
echo "json: ${SUMMARY_JSON}"
if [[ "${total_count}" == "5" && "${pass_count}" == "5" && "${fail_count}" == "0" && "${skip_count}" == "0" ]]; then
return 0
fi
return 1
}
main "$@"
+334
View File
@@ -0,0 +1,334 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import csv
import json
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import cast
MetricValue = int | float | str | bool
KV_PATTERN = re.compile(r"([a-zA-Z_]+)=([^\s]+)")
@dataclass(frozen=True)
class CliArgs:
manifest: str
output: str
run_id: str
run_dir: str
started_at: str
finished_at: str
def parse_args() -> CliArgs:
parser = argparse.ArgumentParser(
description="Build JSON summary for standalone acceptance matrix"
)
_ = parser.add_argument(
"--manifest", required=True, help="TSV manifest produced by acceptance runner"
)
_ = parser.add_argument("--output", required=True, help="Output JSON summary path")
_ = parser.add_argument("--run-id", required=True)
_ = parser.add_argument("--run-dir", required=True)
_ = parser.add_argument("--started-at", required=True)
_ = parser.add_argument("--finished-at", required=True)
parsed = parser.parse_args(sys.argv[1:])
manifest = cast(str, parsed.manifest)
output = cast(str, parsed.output)
run_id = cast(str, parsed.run_id)
run_dir = cast(str, parsed.run_dir)
started_at = cast(str, parsed.started_at)
finished_at = cast(str, parsed.finished_at)
return CliArgs(
manifest=manifest,
output=output,
run_id=run_id,
run_dir=run_dir,
started_at=started_at,
finished_at=finished_at,
)
def read_text(path: str) -> str:
p = Path(path)
if not p.exists():
return ""
try:
return p.read_text(encoding="utf-8", errors="replace")
except OSError:
return ""
def to_number(value: str) -> MetricValue:
if re.fullmatch(r"-?\d+", value):
try:
return int(value)
except ValueError:
return value
if re.fullmatch(r"-?\d+\.\d+", value):
try:
return float(value)
except ValueError:
return value
return value
def parse_key_value_metrics(line: str) -> dict[str, MetricValue]:
metrics: dict[str, MetricValue] = {}
for match in KV_PATTERN.finditer(line):
key = match.group(1)
raw = match.group(2)
metrics[key] = to_number(raw)
return metrics
def extract_last_matching_line(text: str, token: str) -> str:
match = ""
for line in text.splitlines():
if token in line:
match = line
return match
def parse_rtp_tester_metrics(text: str) -> dict[str, MetricValue]:
metrics: dict[str, MetricValue] = {}
patterns = {
"packets_received": r"Packets received:\s*(\d+)",
"sequence_gaps": r"Sequence gaps:\s*(\d+)",
"invalid_packets": r"Invalid packets:\s*(\d+)",
"detected_payload_type": r"Detected payload type:\s*(\d+)",
}
for key, pattern in patterns.items():
m = re.search(pattern, text)
if m:
metrics[key] = int(m.group(1))
return metrics
def parse_rtmp_tester_metrics(text: str) -> dict[str, MetricValue]:
metrics: dict[str, MetricValue] = {}
messages = re.search(
r"Messages:\s*total=(\d+),\s*audio=(\d+),\s*video=(\d+),\s*data=(\d+),\s*chunk-size-updates=(\d+)",
text,
)
if messages:
metrics.update(
{
"messages_total": int(messages.group(1)),
"messages_audio": int(messages.group(2)),
"messages_video": int(messages.group(3)),
"messages_data": int(messages.group(4)),
"chunk_size_updates": int(messages.group(5)),
}
)
counts = re.search(
r"Video signaling counts:\s*h264=(\d+),\s*h265-enhanced=(\d+),\s*h265-domestic=(\d+),\s*unknown=(\d+)",
text,
)
if counts:
metrics.update(
{
"h264_video_messages": int(counts.group(1)),
"h265_enhanced_video_messages": int(counts.group(2)),
"h265_domestic_video_messages": int(counts.group(3)),
"unknown_video_messages": int(counts.group(4)),
}
)
matching = re.search(
r"Matching count for expected mode:\s*(\d+)\s*\(threshold=(\d+)\)", text
)
if matching:
metrics.update(
{
"matching_count": int(matching.group(1)),
"matching_threshold": int(matching.group(2)),
}
)
return metrics
def parse_streamer_metrics(text: str) -> dict[str, dict[str, MetricValue]]:
result: dict[str, dict[str, MetricValue]] = {}
for token, key in (
("PIPELINE_METRICS", "pipeline"),
("RTP_METRICS", "rtp"),
("RTMP_METRICS", "rtmp"),
):
line = extract_last_matching_line(text, token)
if line:
result[key] = parse_key_value_metrics(line)
return result
def parse_sdp_metrics(path: str) -> dict[str, MetricValue]:
p = Path(path)
if not path:
return {}
if not p.exists():
return {"exists": False}
text = read_text(path)
metrics: dict[str, MetricValue] = {
"exists": True,
"bytes": p.stat().st_size,
"has_h264": "H264/90000" in text,
"has_h265": ("H265/90000" in text) or ("HEVC/90000" in text),
}
m = re.search(r"m=video\s+\d+\s+RTP/AVP\s+(\d+)", text)
if m:
metrics["payload_type"] = int(m.group(1))
return metrics
def parse_exit_code(value: str) -> int:
try:
return int(value)
except (TypeError, ValueError):
return -1
def parse_duration_ms(value: str) -> int:
try:
return int(value)
except (TypeError, ValueError):
return 0
MANIFEST_FIELDS = (
"order",
"row_id",
"name",
"protocol",
"codec",
"rtmp_mode",
"status",
"reason",
"duration_ms",
"sim_rc",
"streamer_rc",
"tester_rc",
"sim_log",
"streamer_log",
"tester_log",
"sdp_path",
)
def parse_manifest(path: str) -> list[dict[str, str]]:
rows: list[dict[str, str]] = []
with open(path, "r", encoding="utf-8", newline="") as handle:
reader = csv.DictReader(handle, delimiter="\t")
for raw_row in reader:
row: dict[str, str] = {}
for field in MANIFEST_FIELDS:
value = raw_row.get(field, "")
row[field] = "" if value is None else str(value)
rows.append(row)
return rows
def build_summary(args: CliArgs) -> dict[str, object]:
manifest_rows = parse_manifest(args.manifest)
rows: list[dict[str, object]] = []
for row in sorted(manifest_rows, key=lambda item: int(item["order"])):
streamer_log = row["streamer_log"]
tester_log = row["tester_log"]
sim_log = row["sim_log"]
sdp_path = row.get("sdp_path", "")
streamer_text = read_text(streamer_log)
tester_text = read_text(tester_log)
tester_metrics: dict[str, MetricValue]
if row["protocol"] == "rtp":
tester_metrics = parse_rtp_tester_metrics(tester_text)
else:
tester_metrics = parse_rtmp_tester_metrics(tester_text)
metrics: dict[str, object] = {
"tester": tester_metrics,
"streamer": parse_streamer_metrics(streamer_text),
}
if row["protocol"] == "rtp":
metrics["sdp"] = parse_sdp_metrics(sdp_path)
rows.append(
{
"order": int(row["order"]),
"id": row["row_id"],
"name": row["name"],
"protocol": row["protocol"],
"codec": row["codec"],
"rtmp_mode": row["rtmp_mode"] if row["rtmp_mode"] else None,
"status": row["status"],
"reason": row["reason"],
"duration_ms": parse_duration_ms(row["duration_ms"]),
"exit_codes": {
"sim": parse_exit_code(row["sim_rc"]),
"streamer": parse_exit_code(row["streamer_rc"]),
"tester": parse_exit_code(row["tester_rc"]),
},
"metrics": metrics,
"evidence": {
"sim_log": sim_log,
"streamer_log": streamer_log,
"tester_log": tester_log,
"sdp": sdp_path if sdp_path else None,
},
}
)
pass_count = sum(1 for row in rows if row["status"] == "PASS")
fail_count = sum(1 for row in rows if row["status"] == "FAIL")
skip_count = sum(1 for row in rows if row["status"] == "SKIP")
all_pass = (
len(rows) == 5 and pass_count == 5 and fail_count == 0 and skip_count == 0
)
return {
"run_id": args.run_id,
"run_dir": args.run_dir,
"started_at": args.started_at,
"finished_at": args.finished_at,
"counts": {
"total": len(rows),
"pass": pass_count,
"fail": fail_count,
"skip": skip_count,
},
"all_pass": all_pass,
"recommended_exit_code": 0 if all_pass else 1,
"rows": rows,
}
def main() -> int:
args = parse_args()
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
summary = build_summary(args)
_ = output_path.write_text(
json.dumps(summary, indent=2, sort_keys=False) + "\n", encoding="utf-8"
)
return 0
if __name__ == "__main__":
raise SystemExit(main())
+435
View File
@@ -0,0 +1,435 @@
#!/usr/bin/env bash
set -u -o pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
STREAMER_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
WORKTREE_ROOT="$(cd "${STREAMER_ROOT}/../.." && pwd)"
BUILD_DIR="${STREAMER_ROOT}/build"
EVIDENCE_ROOT="${WORKTREE_ROOT}/.sisyphus/evidence"
TASK_EVIDENCE_DIR="${EVIDENCE_ROOT}/task-15-fault-suite"
SUMMARY_HELPER="${SCRIPT_DIR}/fault_summary_helper.py"
MODE="baseline"
if [[ $# -gt 0 ]]; then
case "$1" in
--mode)
if [[ $# -lt 2 ]]; then
echo "missing value for --mode" >&2
exit 2
fi
MODE="$2"
shift 2
;;
--degraded)
MODE="degraded"
shift
;;
*)
echo "unknown argument: $1" >&2
exit 2
;;
esac
fi
if [[ "${MODE}" != "baseline" && "${MODE}" != "degraded" ]]; then
echo "invalid --mode '${MODE}' (expected: baseline|degraded)" >&2
exit 2
fi
RUN_ID=""
RUN_DIR=""
MANIFEST_TSV="${RUN_DIR}/rows.tsv"
SUMMARY_JSON="${RUN_DIR}/summary.json"
if [[ "${MODE}" == "baseline" ]]; then
LATEST_SUMMARY_JSON="${EVIDENCE_ROOT}/task-15-fault-suite-summary.json"
EVIDENCE_TEXT="${EVIDENCE_ROOT}/task-15-fault-suite.txt"
else
LATEST_SUMMARY_JSON="${EVIDENCE_ROOT}/task-15-fault-suite-error-summary.json"
EVIDENCE_TEXT="${EVIDENCE_ROOT}/task-15-fault-suite-error.txt"
fi
STARTED_AT_UTC="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
mkdir -p "${TASK_EVIDENCE_DIR}"
allocate_run_dir() {
local attempts=0
while (( attempts < 50 )); do
local candidate_id
candidate_id="$(date +"%Y%m%dT%H%M%S")-$(date +"%N")-p$$-$RANDOM"
local candidate_dir="${TASK_EVIDENCE_DIR}/${candidate_id}-${MODE}"
if mkdir "${candidate_dir}" 2>/dev/null; then
RUN_ID="${candidate_id}"
RUN_DIR="${candidate_dir}"
MANIFEST_TSV="${RUN_DIR}/rows.tsv"
SUMMARY_JSON="${RUN_DIR}/summary.json"
return 0
fi
attempts=$((attempts + 1))
sleep 0.01
done
echo "failed to allocate unique fault-suite run directory" >&2
return 1
}
allocate_run_dir || exit 1
echo -e "order\tscenario_id\tname\tstatus\treason\tduration_ms\tsim_rc\tstreamer_rc\ttester_rc\tsim_log\tstreamer_log\ttester_log\tsdp_path" > "${MANIFEST_TSV}"
cleanup_pids=()
cleanup_all() {
for pid in "${cleanup_pids[@]:-}"; do
if [[ -n "${pid}" ]] && kill -0 "${pid}" 2>/dev/null; then
kill "${pid}" 2>/dev/null || true
fi
done
}
trap cleanup_all EXIT
binary_exists() {
local path="$1"
[[ -x "${path}" ]]
}
wait_pid() {
local pid="$1"
local timeout_s="$2"
local elapsed=0
while kill -0 "${pid}" 2>/dev/null; do
if (( elapsed >= timeout_s )); then
kill "${pid}" 2>/dev/null || true
wait "${pid}" 2>/dev/null || true
return 124
fi
sleep 1
elapsed=$((elapsed + 1))
done
wait "${pid}" 2>/dev/null
return $?
}
append_manifest_row() {
local order="$1"
local scenario_id="$2"
local name="$3"
local status="$4"
local reason="$5"
local duration_ms="$6"
local sim_rc="$7"
local streamer_rc="$8"
local tester_rc="$9"
local sim_log="${10}"
local streamer_log="${11}"
local tester_log="${12}"
local sdp_path="${13}"
echo -e "${order}\t${scenario_id}\t${name}\t${status}\t${reason}\t${duration_ms}\t${sim_rc}\t${streamer_rc}\t${tester_rc}\t${sim_log}\t${streamer_log}\t${tester_log}\t${sdp_path}" >> "${MANIFEST_TSV}"
}
scenario_port() {
local order="$1"
if [[ "${MODE}" == "baseline" ]]; then
echo $((52040 + (order - 1) * 2))
else
echo $((52140 + (order - 1) * 2))
fi
}
run_fault_scenario() {
local order="$1"
local scenario_id="$2"
local name="$3"
local row_dir="${RUN_DIR}/${order}-${scenario_id}"
mkdir -p "${row_dir}"
local sim_log="${row_dir}/sim.log"
local streamer_log="${row_dir}/streamer.log"
local tester_log="${row_dir}/tester.log"
local sdp_path="${row_dir}/stream.sdp"
local shm_name="fault_${MODE}_${scenario_id}_${RUN_ID}"
local zmq_endpoint="ipc:///tmp/fault_${MODE}_${scenario_id}_${RUN_ID}.ipc"
local sim_label="f${order}_${MODE:0:3}_${scenario_id:0:3}"
local sim_frames=360
local sim_fps=200
local reset_every=""
local snapshot_delay_us=0
local emit_stall_ms=0
local ingest_max_frames=180
case "${scenario_id}" in
torn_read)
if [[ "${MODE}" == "baseline" ]]; then
snapshot_delay_us=2500
sim_fps=240
else
snapshot_delay_us=25000
sim_fps=320
fi
;;
sink_stall)
if [[ "${MODE}" == "baseline" ]]; then
emit_stall_ms=3
ingest_max_frames=140
else
emit_stall_ms=60
ingest_max_frames=160
fi
;;
reset_storm)
if [[ "${MODE}" == "baseline" ]]; then
reset_every=20
ingest_max_frames=120
else
reset_every=3
ingest_max_frames=180
fi
;;
*)
echo "unknown scenario_id=${scenario_id}" >&2
return 1
;;
esac
local rtp_port
rtp_port="$(scenario_port "${order}")"
local sim_cmd=(
"${BUILD_DIR}/cvmmap_sim"
--shm-name "${shm_name}"
--zmq-endpoint "${zmq_endpoint}"
--label "${sim_label}"
--frames "${sim_frames}"
--fps "${sim_fps}"
--width 640
--height 360
)
if [[ -n "${reset_every}" ]]; then
sim_cmd+=(--emit-reset-every "${reset_every}")
fi
local streamer_cmd=(
"${BUILD_DIR}/cvmmap_streamer"
--run-mode pipeline
--codec h264
--shm-name "${shm_name}"
--zmq-endpoint "${zmq_endpoint}"
--queue-size 1
--gop 30
--b-frames 0
--ingest-max-frames "${ingest_max_frames}"
--ingest-idle-timeout-ms 8000
--snapshot-copy-delay-us "${snapshot_delay_us}"
--emit-stall-ms "${emit_stall_ms}"
--rtp
--rtp-endpoint "127.0.0.1:${rtp_port}"
--rtp-payload-type 96
--rtp-sdp "${sdp_path}"
)
local tester_cmd=(
"${BUILD_DIR}/rtp_receiver_tester"
--port "${rtp_port}"
--expect-pt 96
--packet-threshold 1
--timeout-ms 15000
)
local row_start_ms row_end_ms duration_ms
row_start_ms="$(date +%s%3N)"
"${tester_cmd[@]}" > "${tester_log}" 2>&1 &
local tester_pid=$!
cleanup_pids+=("${tester_pid}")
sleep 1
"${sim_cmd[@]}" > "${sim_log}" 2>&1 &
local sim_pid=$!
cleanup_pids+=("${sim_pid}")
sleep 1
"${streamer_cmd[@]}" > "${streamer_log}" 2>&1
local streamer_rc=$?
wait_pid "${tester_pid}" 25
local tester_rc=$?
wait_pid "${sim_pid}" 25
local sim_rc=$?
row_end_ms="$(date +%s%3N)"
duration_ms=$((row_end_ms - row_start_ms))
local status="PASS"
local reason="all-processes-ok"
if (( sim_rc != 0 || streamer_rc != 0 || tester_rc != 0 )); then
status="FAIL"
reason="sim_rc=${sim_rc},streamer_rc=${streamer_rc},tester_rc=${tester_rc}"
fi
append_manifest_row \
"${order}" \
"${scenario_id}" \
"${name}" \
"${status}" \
"${reason}" \
"${duration_ms}" \
"${sim_rc}" \
"${streamer_rc}" \
"${tester_rc}" \
"${sim_log}" \
"${streamer_log}" \
"${tester_log}" \
"${sdp_path}"
printf "[%s] %s => %s (%s)\n" "${scenario_id}" "${name}" "${status}" "${reason}"
}
main() {
local required=(
"${BUILD_DIR}/cvmmap_sim"
"${BUILD_DIR}/cvmmap_streamer"
"${BUILD_DIR}/rtp_receiver_tester"
)
local missing=()
for bin in "${required[@]}"; do
if ! binary_exists "${bin}"; then
missing+=("${bin}")
fi
done
if (( ${#missing[@]} > 0 )); then
{
echo "task=15"
echo "mode=${MODE}"
echo "run_id=${RUN_ID}"
echo "run_dir=${RUN_DIR}"
echo "manifest=${MANIFEST_TSV}"
echo "missing_binaries=${missing[*]}"
} > "${EVIDENCE_TEXT}"
echo "missing binaries: ${missing[*]}" >&2
return 1
fi
run_fault_scenario 1 "torn_read" "fault:torn-read"
run_fault_scenario 2 "sink_stall" "fault:sink-stall"
run_fault_scenario 3 "reset_storm" "fault:reset-storm"
local finished_at_utc
finished_at_utc="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
python3 "${SUMMARY_HELPER}" \
--manifest "${MANIFEST_TSV}" \
--output "${SUMMARY_JSON}" \
--run-id "${RUN_ID}" \
--run-dir "${RUN_DIR}" \
--started-at "${STARTED_AT_UTC}" \
--finished-at "${finished_at_utc}" \
--mode "${MODE}"
local summary_rc=$?
cp -f "${SUMMARY_JSON}" "${LATEST_SUMMARY_JSON}" 2>/dev/null || true
local total_count pass_count fail_count all_pass
total_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("total", 0))
PY
)"
pass_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("pass", 0))
PY
)"
fail_count="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
counts = data.get("counts", {})
print(counts.get("fail", 0))
PY
)"
all_pass="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
print("true" if data.get("all_pass", False) else "false")
PY
)"
local violation_lines
violation_lines="$(python3 - <<'PY' "${SUMMARY_JSON}"
import json
import sys
data = json.load(open(sys.argv[1], "r", encoding="utf-8"))
for scenario in data.get("scenarios", []):
sid = scenario.get("id", "unknown")
for violation in scenario.get("violations", []):
print(f"{sid}:{violation}")
PY
)"
{
echo "task=15"
echo "mode=${MODE}"
echo "run_id=${RUN_ID}"
echo "run_dir=${RUN_DIR}"
echo "manifest=${MANIFEST_TSV}"
echo "summary_json=${SUMMARY_JSON}"
echo "latest_summary_json=${LATEST_SUMMARY_JSON}"
echo "started_at=${STARTED_AT_UTC}"
echo "finished_at=${finished_at_utc}"
echo "scenario_total=${total_count}"
echo "scenario_pass=${pass_count}"
echo "scenario_fail=${fail_count}"
echo "all_pass=${all_pass}"
echo "summary_helper_rc=${summary_rc}"
echo "violated_thresholds_begin"
if [[ -n "${violation_lines}" ]]; then
echo "${violation_lines}"
fi
echo "violated_thresholds_end"
} > "${EVIDENCE_TEXT}"
if (( summary_rc != 0 )); then
echo "summary helper failed with rc=${summary_rc}" >&2
return 1
fi
echo "fault-suite mode=${MODE} total=${total_count} pass=${pass_count} fail=${fail_count}"
echo "summary: ${SUMMARY_JSON}"
if [[ "${MODE}" == "baseline" ]]; then
if [[ "${total_count}" == "3" && "${pass_count}" == "3" && "${fail_count}" == "0" ]]; then
return 0
fi
return 1
fi
if [[ "${fail_count}" != "0" ]]; then
return 1
fi
echo "degraded mode did not violate thresholds" >&2
return 2
}
main "$@"
+406
View File
@@ -0,0 +1,406 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import csv
import json
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import cast
KV_PATTERN = re.compile(r"([a-zA-Z0-9_]+)=([^\s]+)")
@dataclass(frozen=True)
class CliArgs:
manifest: str
output: str
run_id: str
run_dir: str
started_at: str
finished_at: str
mode: str
def parse_args() -> CliArgs:
parser = argparse.ArgumentParser(
description="Build fault suite summary with threshold checks"
)
_ = parser.add_argument("--manifest", required=True)
_ = parser.add_argument("--output", required=True)
_ = parser.add_argument("--run-id", required=True)
_ = parser.add_argument("--run-dir", required=True)
_ = parser.add_argument("--started-at", required=True)
_ = parser.add_argument("--finished-at", required=True)
_ = parser.add_argument("--mode", required=True, choices=("baseline", "degraded"))
parsed = parser.parse_args(sys.argv[1:])
return CliArgs(
manifest=cast(str, parsed.manifest),
output=cast(str, parsed.output),
run_id=cast(str, parsed.run_id),
run_dir=cast(str, parsed.run_dir),
started_at=cast(str, parsed.started_at),
finished_at=cast(str, parsed.finished_at),
mode=cast(str, parsed.mode),
)
def read_text(path: str) -> str:
p = Path(path)
if not p.exists():
return ""
try:
return p.read_text(encoding="utf-8", errors="replace")
except OSError:
return ""
def to_number(value: str) -> int | float | str:
if re.fullmatch(r"-?\d+", value):
try:
return int(value)
except ValueError:
return value
if re.fullmatch(r"-?\d+\.\d+", value):
try:
return float(value)
except ValueError:
return value
return value
def parse_key_values(line: str) -> dict[str, int | float | str]:
out: dict[str, int | float | str] = {}
for match in KV_PATTERN.finditer(line):
out[match.group(1)] = to_number(match.group(2))
return out
def last_line_with_token(text: str, token: str) -> str:
found = ""
for line in text.splitlines():
if token in line:
found = line
return found
def parse_exit(value: str) -> int:
try:
return int(value)
except (TypeError, ValueError):
return -1
def parse_duration_ms(value: str) -> int:
try:
return int(value)
except (TypeError, ValueError):
return 0
def parse_manifest(path: str) -> list[dict[str, str]]:
rows: list[dict[str, str]] = []
with open(path, "r", encoding="utf-8", newline="") as handle:
reader = csv.DictReader(handle, delimiter="\t")
for raw in reader:
row: dict[str, str] = {}
for key in (
"order",
"scenario_id",
"name",
"status",
"reason",
"duration_ms",
"sim_rc",
"streamer_rc",
"tester_rc",
"sim_log",
"streamer_log",
"tester_log",
"sdp_path",
):
value = raw.get(key, "")
row[key] = "" if value is None else str(value)
rows.append(row)
return rows
Check = dict[str, object]
def make_check_min(metric: str, actual: int, minimum: int) -> Check:
passed = actual >= minimum
return {
"metric": metric,
"type": "min",
"actual": actual,
"expected": minimum,
"passed": passed,
"violation": "" if passed else f"{metric}={actual} < {minimum}",
}
def make_check_max(metric: str, actual: int, maximum: int) -> Check:
passed = actual <= maximum
return {
"metric": metric,
"type": "max",
"actual": actual,
"expected": maximum,
"passed": passed,
"violation": "" if passed else f"{metric}={actual} > {maximum}",
}
def get_thresholds(mode: str) -> dict[str, dict[str, int]]:
if mode == "baseline":
return {
"torn_read": {
"torn_read_events_min": 1,
"p50_us_max": 150_000,
"p99_us_max": 250_000,
"drop_ratio_ppm_max": 980_000,
"samples_min": 10,
},
"sink_stall": {
"sink_stall_events_min": 1,
"p50_us_max": 350_000,
"p95_us_max": 600_000,
"drop_ratio_ppm_max": 980_000,
"samples_min": 10,
},
"reset_storm": {
"reset_events_min": 4,
"p50_us_max": 800_000,
"p99_us_max": 1_000_000,
"drop_ratio_ppm_max": 1_000_000,
"samples_min": 1,
},
}
return {
"torn_read": {
"torn_read_events_min": 200,
"p50_us_max": 1_000,
"p99_us_max": 2_000,
"drop_ratio_ppm_max": 20_000,
"samples_min": 100,
},
"sink_stall": {
"sink_stall_events_min": 200,
"p50_us_max": 1_000,
"p95_us_max": 2_000,
"drop_ratio_ppm_max": 20_000,
"samples_min": 100,
},
"reset_storm": {
"reset_events_min": 20,
"p50_us_max": 1_000,
"p99_us_max": 2_000,
"drop_ratio_ppm_max": 20_000,
"samples_min": 100,
},
}
def scenario_checks(
scenario_id: str,
fault: dict[str, int | float | str],
latency: dict[str, int | float | str],
thresholds: dict[str, dict[str, int]],
) -> list[Check]:
scenario_thresholds = thresholds.get(scenario_id, {})
torn = int(fault.get("torn_read_events", 0))
stall = int(fault.get("sink_stall_events", 0))
resets = int(fault.get("reset_events", 0))
p95 = int(latency.get("p95_us", 0))
p99 = int(latency.get("p99_us", 0))
p50 = int(latency.get("p50_us", 0))
samples = int(latency.get("ingest_to_emit_samples", 0))
drop_ratio_ppm = int(latency.get("drop_ratio_ppm", 0))
checks: list[Check] = []
checks.append(
make_check_min(
"ingest_to_emit_samples",
samples,
int(scenario_thresholds.get("samples_min", 1)),
)
)
checks.append(
make_check_max(
"p50_us",
p50,
int(scenario_thresholds.get("p50_us_max", 500_000)),
)
)
checks.append(
make_check_max(
"drop_ratio_ppm",
drop_ratio_ppm,
int(scenario_thresholds.get("drop_ratio_ppm_max", 1_000_000)),
)
)
if scenario_id == "torn_read":
checks.append(
make_check_min(
"torn_read_events",
torn,
int(scenario_thresholds.get("torn_read_events_min", 1)),
)
)
checks.append(
make_check_max(
"p99_us",
p99,
int(scenario_thresholds.get("p99_us_max", 500_000)),
)
)
elif scenario_id == "sink_stall":
checks.append(
make_check_min(
"sink_stall_events",
stall,
int(scenario_thresholds.get("sink_stall_events_min", 1)),
)
)
checks.append(
make_check_max(
"p95_us",
p95,
int(scenario_thresholds.get("p95_us_max", 500_000)),
)
)
elif scenario_id == "reset_storm":
checks.append(
make_check_min(
"reset_events",
resets,
int(scenario_thresholds.get("reset_events_min", 1)),
)
)
checks.append(
make_check_max(
"p99_us",
p99,
int(scenario_thresholds.get("p99_us_max", 500_000)),
)
)
return checks
def build_summary(args: CliArgs) -> dict[str, object]:
thresholds = get_thresholds(args.mode)
rows = parse_manifest(args.manifest)
scenarios: list[dict[str, object]] = []
for row in sorted(rows, key=lambda item: int(item["order"])):
streamer_text = read_text(row["streamer_log"])
pipeline_line = last_line_with_token(streamer_text, "PIPELINE_METRICS")
latency_line = last_line_with_token(streamer_text, "LATENCY_METRICS")
fault_line = last_line_with_token(streamer_text, "FAULT_COUNTERS")
rtp_line = last_line_with_token(streamer_text, "RTP_METRICS")
pipeline = parse_key_values(pipeline_line) if pipeline_line else {}
latency = parse_key_values(latency_line) if latency_line else {}
fault = parse_key_values(fault_line) if fault_line else {}
rtp = parse_key_values(rtp_line) if rtp_line else {}
sim_rc = parse_exit(row["sim_rc"])
streamer_rc = parse_exit(row["streamer_rc"])
tester_rc = parse_exit(row["tester_rc"])
process_ok = sim_rc == 0 and streamer_rc == 0 and tester_rc == 0
checks = scenario_checks(row["scenario_id"], fault, latency, thresholds)
violated_checks = [
cast(str, check["violation"])
for check in checks
if not cast(bool, check["passed"])
]
scenario_pass = process_ok and len(violated_checks) == 0
scenario_status = "PASS" if scenario_pass else "FAIL"
reason = (
"all checks passed"
if scenario_pass
else (
f"process_rc(sim={sim_rc},streamer={streamer_rc},tester={tester_rc})"
if not process_ok
else "; ".join(violated_checks)
)
)
scenarios.append(
{
"order": int(row["order"]),
"id": row["scenario_id"],
"name": row["name"],
"status": scenario_status,
"reason": reason,
"duration_ms": parse_duration_ms(row["duration_ms"]),
"process_exit": {
"sim": sim_rc,
"streamer": streamer_rc,
"tester": tester_rc,
},
"metrics": {
"pipeline": pipeline,
"latency": latency,
"fault": fault,
"rtp": rtp,
},
"checks": checks,
"violations": violated_checks,
"evidence": {
"sim_log": row["sim_log"],
"streamer_log": row["streamer_log"],
"tester_log": row["tester_log"],
"sdp": row["sdp_path"],
},
}
)
pass_count = sum(1 for item in scenarios if item["status"] == "PASS")
fail_count = sum(1 for item in scenarios if item["status"] == "FAIL")
all_pass = len(scenarios) == 3 and pass_count == 3 and fail_count == 0
return {
"task": 15,
"mode": args.mode,
"run_id": args.run_id,
"run_dir": args.run_dir,
"started_at": args.started_at,
"finished_at": args.finished_at,
"thresholds": thresholds,
"counts": {
"total": len(scenarios),
"pass": pass_count,
"fail": fail_count,
},
"all_pass": all_pass,
"recommended_exit_code": 0 if all_pass else 1,
"scenarios": scenarios,
}
def main() -> int:
args = parse_args()
summary = build_summary(args)
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
_ = output_path.write_text(
json.dumps(summary, indent=2, sort_keys=False) + "\n", encoding="utf-8"
)
return 0
if __name__ == "__main__":
raise SystemExit(main())
+253
View File
@@ -0,0 +1,253 @@
#!/usr/bin/env bash
set -u -o pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
STREAMER_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
WORKTREE_ROOT="$(cd "${STREAMER_ROOT}/../.." && pwd)"
BUILD_DIR="${STREAMER_ROOT}/build"
EVIDENCE_ROOT="${WORKTREE_ROOT}/.sisyphus/evidence"
TASK_EVIDENCE_DIR="${EVIDENCE_ROOT}/task-17-release-gate"
PASS_EVIDENCE="${EVIDENCE_ROOT}/task-17-release-gate.txt"
FAIL_EVIDENCE="${EVIDENCE_ROOT}/task-17-release-gate-error.txt"
RUN_ID="$(date +"%Y%m%dT%H%M%S")"
RUN_DIR="${TASK_EVIDENCE_DIR}/${RUN_ID}"
STARTED_AT_UTC="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
INJECT_FAILURE=""
print_usage() {
cat <<'EOF'
usage: ./scripts/release_gate.sh [--inject-failure GATE]
Runs final release gate:
1) downstream build
2) mandatory standalone acceptance suite
3) mandatory fault suite baseline
4) required evidence presence checks
5) scope-constraint checks (no direct RTSP/WebRTC publisher, no audio)
Optional deterministic failure injection (for QA):
--inject-failure evidence Force evidence gate failure without modifying repository state
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--inject-failure)
if [[ $# -lt 2 ]]; then
echo "missing value for --inject-failure" >&2
exit 2
fi
INJECT_FAILURE="$2"
shift 2
;;
-h|--help)
print_usage
exit 0
;;
*)
echo "unknown argument: $1" >&2
exit 2
;;
esac
done
if [[ -n "${INJECT_FAILURE}" && "${INJECT_FAILURE}" != "evidence" ]]; then
echo "unsupported --inject-failure '${INJECT_FAILURE}' (supported: evidence)" >&2
exit 2
fi
mkdir -p "${RUN_DIR}"
GATE_LINES=()
FAILED_GATES=()
record_gate() {
local gate="$1"
local status="$2"
local detail="$3"
GATE_LINES+=("${gate}|${status}|${detail}")
if [[ "${status}" != "PASS" ]]; then
FAILED_GATES+=("${gate}")
fi
}
run_command_gate() {
local gate="$1"
local log_path="$2"
shift 2
"$@" >"${log_path}" 2>&1
local rc=$?
if (( rc == 0 )); then
record_gate "${gate}" "PASS" "rc=0 log=${log_path}"
else
record_gate "${gate}" "FAIL" "rc=${rc} log=${log_path}"
fi
return ${rc}
}
build_log="${RUN_DIR}/build.log"
acceptance_log="${RUN_DIR}/acceptance.log"
fault_log="${RUN_DIR}/fault-suite.log"
scope_log="${RUN_DIR}/scope.log"
build_rc=0
run_command_gate "build" "${build_log}" bash -lc "cmake -B \"${BUILD_DIR}\" -S \"${STREAMER_ROOT}\" && cmake --build \"${BUILD_DIR}\""
build_rc=$?
acceptance_rc=0
if (( build_rc == 0 )); then
run_command_gate "acceptance_standalone" "${acceptance_log}" "${SCRIPT_DIR}/acceptance_standalone.sh"
acceptance_rc=$?
else
record_gate "acceptance_standalone" "SKIP" "blocked_by=build"
acceptance_rc=1
fi
fault_rc=0
if (( build_rc == 0 )); then
run_command_gate "fault_suite_baseline" "${fault_log}" "${SCRIPT_DIR}/fault_suite.sh"
fault_rc=$?
else
record_gate "fault_suite_baseline" "SKIP" "blocked_by=build"
fault_rc=1
fi
required_evidence=(
"${EVIDENCE_ROOT}/task-14-acceptance.txt"
"${EVIDENCE_ROOT}/task-14-acceptance-summary.json"
"${EVIDENCE_ROOT}/task-15-fault-suite.txt"
"${EVIDENCE_ROOT}/task-15-fault-suite-summary.json"
"${EVIDENCE_ROOT}/task-15-fault-suite-error.txt"
"${EVIDENCE_ROOT}/task-15-fault-suite-error-summary.json"
"${EVIDENCE_ROOT}/task-16-docs.txt"
"${EVIDENCE_ROOT}/task-16-docs-error.txt"
)
if [[ "${INJECT_FAILURE}" == "evidence" ]]; then
required_evidence+=("${EVIDENCE_ROOT}/__forced_missing_for_task17.txt")
fi
missing_evidence=()
for path in "${required_evidence[@]}"; do
if [[ ! -f "${path}" ]]; then
missing_evidence+=("${path}")
fi
done
if (( ${#missing_evidence[@]} == 0 )); then
record_gate "required_evidence" "PASS" "all_required_files_present"
else
record_gate "required_evidence" "FAIL" "missing=${missing_evidence[*]}"
fi
scope_failures=()
if ! grep -Eiq '(video[- ]only|no[[:space:]]+audio[[:space:]]+support)' "${STREAMER_ROOT}/README.md" "${STREAMER_ROOT}/docs/caveats.md"; then
scope_failures+=("missing explicit video-only/no-audio scope declaration in README/docs")
fi
if ! grep -q "Optional Checks (Non-Blocking)" "${STREAMER_ROOT}/docs/compat_matrix.md"; then
scope_failures+=("docs/compat_matrix.md missing optional/non-blocking separation")
fi
if ! grep -q "No Direct RTSP/WebRTC Publishing" "${STREAMER_ROOT}/docs/caveats.md"; then
scope_failures+=("docs/caveats.md missing 'No Direct RTSP/WebRTC Publishing'")
fi
if ! grep -q "No Audio Support" "${STREAMER_ROOT}/docs/caveats.md"; then
scope_failures+=("docs/caveats.md missing 'No Audio Support'")
fi
if [[ -x "${BUILD_DIR}/cvmmap_streamer" ]]; then
streamer_help="$(${BUILD_DIR}/cvmmap_streamer --help 2>&1 || true)"
if [[ "${streamer_help}" == *"--rtsp"* ]]; then
scope_failures+=("cvmmap_streamer CLI exposes forbidden --rtsp flag")
fi
if [[ "${streamer_help}" == *"--webrtc"* ]]; then
scope_failures+=("cvmmap_streamer CLI exposes forbidden --webrtc flag")
fi
if [[ "${streamer_help}" == *"--audio"* ]]; then
scope_failures+=("cvmmap_streamer CLI exposes forbidden --audio flag")
fi
else
scope_failures+=("missing ${BUILD_DIR}/cvmmap_streamer for CLI scope validation")
fi
scope_scan_paths=(
"${STREAMER_ROOT}/src/config"
"${STREAMER_ROOT}/src/core"
"${STREAMER_ROOT}/src/ipc"
"${STREAMER_ROOT}/src/pipeline"
"${STREAMER_ROOT}/src/protocol"
"${STREAMER_ROOT}/include/cvmmap_streamer"
)
if grep -RInE --include='*.cpp' --include='*.hpp' --include='*.h' '(rtsp|webrtc)' "${scope_scan_paths[@]}" >"${scope_log}" 2>&1; then
scope_failures+=("production code references forbidden direct publisher tokens (rtsp|webrtc); see ${scope_log}")
fi
if grep -RInE --include='*.cpp' --include='*.hpp' --include='*.h' '(audio|aac|opus|vorbis)' "${scope_scan_paths[@]}" >>"${scope_log}" 2>&1; then
scope_failures+=("production code references forbidden audio tokens; see ${scope_log}")
fi
if (( ${#scope_failures[@]} == 0 )); then
record_gate "scope_constraints" "PASS" "no_direct_rtsp_webrtc_and_no_audio_confirmed"
else
record_gate "scope_constraints" "FAIL" "${scope_failures[*]}"
fi
FINISHED_AT_UTC="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
overall_status="PASS"
if (( ${#FAILED_GATES[@]} > 0 )); then
overall_status="FAIL"
fi
write_common_header() {
local target_file="$1"
{
echo "task=17"
echo "run_id=${RUN_ID}"
echo "run_dir=${RUN_DIR}"
echo "started_at=${STARTED_AT_UTC}"
echo "finished_at=${FINISHED_AT_UTC}"
echo "inject_failure=${INJECT_FAILURE:-none}"
echo "status=${overall_status}"
echo "gate_summary_begin"
for line in "${GATE_LINES[@]}"; do
echo "${line}"
done
echo "gate_summary_end"
echo "evidence_bundle_begin"
for path in "${required_evidence[@]}"; do
echo "${path}"
done
echo "evidence_bundle_end"
} >"${target_file}"
}
if [[ "${overall_status}" == "PASS" ]]; then
write_common_header "${PASS_EVIDENCE}"
{
echo "residual_risks_begin"
echo "[LOW] NVENC availability is host-dependent; software fallback can increase CPU usage under sustained load."
echo "[LOW] Optional SRS/ZLMediaKit smoke checks remain non-blocking and are intentionally excluded from mandatory release gating."
echo "[MEDIUM] Scope validation uses static token/pattern checks and documentation assertions; architectural regressions still require code review discipline."
echo "residual_risks_end"
} >>"${PASS_EVIDENCE}"
echo "release gate PASS"
echo "evidence: ${PASS_EVIDENCE}"
exit 0
fi
write_common_header "${FAIL_EVIDENCE}"
{
echo "failing_gates=${FAILED_GATES[*]}"
echo "failure_reason=one_or_more_gates_failed"
} >>"${FAIL_EVIDENCE}"
echo "release gate FAIL: ${FAILED_GATES[*]}" >&2
echo "evidence: ${FAIL_EVIDENCE}" >&2
exit 1
+82
View File
@@ -0,0 +1,82 @@
#!/usr/bin/env python3
"""Simple RTP test sender for validating rtp_receiver_tester."""
import socket
import struct
import sys
import time
import argparse
def create_rtp_packet(sequence: int, timestamp: int, payload_type: int, payload: bytes) -> bytes:
"""Create an RTP packet (RFC3550)."""
# RTP header format:
# Byte 0: V(2) P(1) X(1) CC(4)
# Byte 1: M(1) PT(7)
# Bytes 2-3: Sequence
# Bytes 4-7: Timestamp
# Bytes 8-11: SSRC
version = 2
padding = 0
extension = 0
csrc_count = 0
marker = 0
byte0 = (version << 6) | (padding << 5) | (extension << 4) | csrc_count
byte1 = (marker << 7) | payload_type
ssrc = 0x12345678
header = struct.pack('!BBHII', byte0, byte1, sequence, timestamp, ssrc)
return header + payload
def send_rtp_packets(target_host: str, target_port: int, payload_type: int,
packet_count: int, interval_ms: float):
"""Send RTP packets to target."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
for i in range(packet_count):
# Create dummy payload (could be NAL unit for video)
payload = bytes([0x00, 0x00, 0x00, 0x01, 0x09, 0x10]) # Simple dummy data
packet = create_rtp_packet(
sequence=i % 65536,
timestamp=i * 3000, # 3000 ticks per packet (90kHz / 30fps)
payload_type=payload_type,
payload=payload
)
sock.sendto(packet, (target_host, target_port))
if interval_ms > 0:
time.sleep(interval_ms / 1000.0)
print(f"Sent {packet_count} RTP packets with PT={payload_type}")
finally:
sock.close()
def main():
parser = argparse.ArgumentParser(description='Send test RTP packets')
parser.add_argument('--host', default='127.0.0.1', help='Target host')
parser.add_argument('--port', type=int, default=5004, help='Target port')
parser.add_argument('--pt', type=int, default=96, help='Payload type')
parser.add_argument('--count', type=int, default=20, help='Number of packets')
parser.add_argument('--interval-ms', type=float, default=50, help='Interval between packets')
args = parser.parse_args()
send_rtp_packets(
args.host,
args.port,
args.pt,
args.count,
args.interval_ms
)
if __name__ == '__main__':
main()