chore: sync local latest state and repository cleanup

This commit is contained in:
Your Name
2026-03-23 13:02:36 +08:00
parent f1ff3d629f
commit 2ef0f17961
493 changed files with 46912 additions and 7977 deletions

View File

@@ -0,0 +1,94 @@
activity.approval.approve.ALL
activity.approval.submit.ALL
activity.config.edit.ALL
activity.index.clone.ALL
activity.index.create.ALL
activity.index.delete.ALL
activity.index.end.ALL
activity.index.export.ALL
activity.index.pause.ALL
activity.index.publish.ALL
activity.index.resume.ALL
activity.index.update.ALL
activity.index.view.ALL
activity.stats.view.ALL
activity.template.view.ALL
approval.execute.approve.ALL
approval.execute.reject.ALL
approval.execute.transfer.ALL
approval.flow.manage.ALL
approval.index.batch.ALL
approval.index.batch.handle.ALL
approval.index.batch.transfer.ALL
approval.index.cancel.ALL
approval.index.delegate.ALL
approval.index.handle.ALL
approval.index.submit.ALL
approval.index.view.ALL
approval.record.view.ALL
audit.index.export.ALL
audit.index.view.ALL
audit.report.view.ALL
dashboard.chart.history.ALL
dashboard.chart.realtime.ALL
dashboard.index.export.ALL
dashboard.index.view.ALL
dashboard.kpi.config.ALL
dashboard.monitor.view.ALL
department.index.manage.ALL
department.index.view.ALL
notification.index.manage.ALL
notification.index.view.ALL
permission.data.config.ALL
permission.index.manage.ALL
permission.index.view.ALL
permission.user.assign.ALL
permission.user.revoke.ALL
reward.index.apply.ALL
reward.index.approve.ALL
reward.index.batch.ALL
reward.index.cancel.ALL
reward.index.export.ALL
reward.index.grant.ALL
reward.index.reconcile.ALL
reward.index.reject.ALL
reward.index.view.ALL
risk.blacklist.manage.ALL
risk.block.execute.ALL
risk.block.release.ALL
risk.index.audit.ALL
risk.index.export.ALL
risk.index.view.ALL
risk.rule.create.ALL
risk.rule.delete.ALL
risk.rule.edit.ALL
risk.rule.enable.ALL
risk.rule.manage.ALL
role.index.manage.ALL
role.index.view.ALL
system.api-key.create.ALL
system.api-key.delete.ALL
system.api-key.disable.ALL
system.api-key.enable.ALL
system.api-key.manage.ALL
system.api-key.reset.ALL
system.api-key.view.ALL
system.cache.manage.ALL
system.config.manage.ALL
system.index.view.ALL
system.sensitive.access.ALL
user.index.certify.ALL
user.index.create.ALL
user.index.delete.ALL
user.index.export.ALL
user.index.freeze.ALL
user.index.unfreeze.ALL
user.index.update.ALL
user.index.view.ALL
user.points.adjust.ALL
user.points.view.ALL
user.role.view.ALL
user.tag.add.ALL
user.tag.view.ALL
user.whitelist.add.ALL
user.whitelist.remove.ALL

View File

@@ -0,0 +1,90 @@
activity.approval.approve.ALL
activity.approval.submit.ALL
activity.config.edit.ALL
activity.index.clone.ALL
activity.index.create.ALL
activity.index.delete.ALL
activity.index.end.ALL
activity.index.export.ALL
activity.index.pause.ALL
activity.index.publish.ALL
activity.index.resume.ALL
activity.index.update.ALL
activity.index.view.ALL
activity.stats.view.ALL
activity.template.view.ALL
approval.execute.approve.ALL
approval.execute.reject.ALL
approval.execute.transfer.ALL
approval.flow.manage.ALL
approval.index.batch.ALL
approval.index.batch.handle.ALL
approval.index.batch.transfer.ALL
approval.index.cancel.ALL
approval.index.delegate.ALL
approval.index.handle.ALL
approval.index.submit.ALL
approval.index.view.ALL
approval.record.view.ALL
audit.index.export.ALL
audit.index.view.ALL
audit.report.view.ALL
dashboard.chart.history.ALL
dashboard.chart.realtime.ALL
dashboard.index.export.ALL
dashboard.index.view.ALL
dashboard.kpi.config.ALL
dashboard.monitor.view.ALL
department.index.manage.ALL
department.index.view.ALL
notification.index.manage.ALL
notification.index.view.ALL
permission.data.config.ALL
permission.index.manage.ALL
permission.index.view.ALL
permission.user.assign.ALL
permission.user.revoke.ALL
reward.index.apply.ALL
reward.index.approve.ALL
reward.index.batch.ALL
reward.index.cancel.ALL
reward.index.export.ALL
reward.index.grant.ALL
reward.index.reconcile.ALL
reward.index.reject.ALL
reward.index.view.ALL
risk.blacklist.manage.ALL
risk.block.execute.ALL
risk.block.release.ALL
risk.index.audit.ALL
risk.index.export.ALL
risk.index.view.ALL
risk.rule.create.ALL
risk.rule.delete.ALL
risk.rule.edit.ALL
risk.rule.enable.ALL
risk.rule.manage.ALL
role.index.manage.ALL
role.index.view.ALL
system.api-key.create.ALL
system.api-key.delete.ALL
system.api-key.disable.ALL
system.api-key.enable.ALL
system.api-key.manage.ALL
system.api-key.reset.ALL
system.api-key.view.ALL
system.cache.manage.ALL
system.config.manage.ALL
system.index.view.ALL
system.sensitive.access.ALL
user.index.certify.ALL
user.index.create.ALL
user.index.delete.ALL
user.index.export.ALL
user.index.freeze.ALL
user.index.unfreeze.ALL
user.index.update.ALL
user.index.view.ALL
user.role.view.ALL
user.tag.add.ALL
user.tag.view.ALL

View File

@@ -0,0 +1,90 @@
activity.approval.approve.ALL
activity.approval.submit.ALL
activity.config.edit.ALL
activity.index.clone.ALL
activity.index.create.ALL
activity.index.delete.ALL
activity.index.end.ALL
activity.index.export.ALL
activity.index.pause.ALL
activity.index.publish.ALL
activity.index.resume.ALL
activity.index.update.ALL
activity.index.view.ALL
activity.stats.view.ALL
activity.template.view.ALL
approval.execute.approve.ALL
approval.execute.reject.ALL
approval.execute.transfer.ALL
approval.flow.manage.ALL
approval.index.batch.ALL
approval.index.batch.handle.ALL
approval.index.batch.transfer.ALL
approval.index.cancel.ALL
approval.index.delegate.ALL
approval.index.handle.ALL
approval.index.submit.ALL
approval.index.view.ALL
approval.record.view.ALL
audit.index.export.ALL
audit.index.view.ALL
audit.report.view.ALL
dashboard.chart.history.ALL
dashboard.chart.realtime.ALL
dashboard.index.export.ALL
dashboard.index.view.ALL
dashboard.kpi.config.ALL
dashboard.monitor.view.ALL
department.index.manage.ALL
department.index.view.ALL
notification.index.manage.ALL
notification.index.view.ALL
permission.data.config.ALL
permission.index.manage.ALL
permission.index.view.ALL
permission.user.assign.ALL
permission.user.revoke.ALL
reward.index.apply.ALL
reward.index.approve.ALL
reward.index.batch.ALL
reward.index.cancel.ALL
reward.index.export.ALL
reward.index.grant.ALL
reward.index.reconcile.ALL
reward.index.reject.ALL
reward.index.view.ALL
risk.blacklist.manage.ALL
risk.block.execute.ALL
risk.block.release.ALL
risk.index.audit.ALL
risk.index.export.ALL
risk.index.view.ALL
risk.rule.create.ALL
risk.rule.delete.ALL
risk.rule.edit.ALL
risk.rule.enable.ALL
risk.rule.manage.ALL
role.index.manage.ALL
role.index.view.ALL
system.api-key.create.ALL
system.api-key.delete.ALL
system.api-key.disable.ALL
system.api-key.enable.ALL
system.api-key.manage.ALL
system.api-key.reset.ALL
system.api-key.view.ALL
system.cache.manage.ALL
system.config.manage.ALL
system.index.view.ALL
system.sensitive.access.ALL
user.index.certify.ALL
user.index.create.ALL
user.index.delete.ALL
user.index.export.ALL
user.index.freeze.ALL
user.index.unfreeze.ALL
user.index.update.ALL
user.index.view.ALL
user.role.view.ALL
user.tag.add.ALL
user.tag.view.ALL

View File

@@ -0,0 +1,94 @@
activity.approval.approve.ALL
activity.approval.submit.ALL
activity.config.edit.ALL
activity.index.clone.ALL
activity.index.create.ALL
activity.index.delete.ALL
activity.index.end.ALL
activity.index.export.ALL
activity.index.pause.ALL
activity.index.publish.ALL
activity.index.resume.ALL
activity.index.update.ALL
activity.index.view.ALL
activity.stats.view.ALL
activity.template.view.ALL
approval.execute.approve.ALL
approval.execute.reject.ALL
approval.execute.transfer.ALL
approval.flow.manage.ALL
approval.index.batch.ALL
approval.index.batch.handle.ALL
approval.index.batch.transfer.ALL
approval.index.cancel.ALL
approval.index.delegate.ALL
approval.index.handle.ALL
approval.index.submit.ALL
approval.index.view.ALL
approval.record.view.ALL
audit.index.export.ALL
audit.index.view.ALL
audit.report.view.ALL
dashboard.chart.history.ALL
dashboard.chart.realtime.ALL
dashboard.index.export.ALL
dashboard.index.view.ALL
dashboard.kpi.config.ALL
dashboard.monitor.view.ALL
department.index.manage.ALL
department.index.view.ALL
notification.index.manage.ALL
notification.index.view.ALL
permission.data.config.ALL
permission.index.manage.ALL
permission.index.view.ALL
permission.user.assign.ALL
permission.user.revoke.ALL
reward.index.apply.ALL
reward.index.approve.ALL
reward.index.batch.ALL
reward.index.cancel.ALL
reward.index.export.ALL
reward.index.grant.ALL
reward.index.reconcile.ALL
reward.index.reject.ALL
reward.index.view.ALL
risk.blacklist.manage.ALL
risk.block.execute.ALL
risk.block.release.ALL
risk.index.audit.ALL
risk.index.export.ALL
risk.index.view.ALL
risk.rule.create.ALL
risk.rule.delete.ALL
risk.rule.edit.ALL
risk.rule.enable.ALL
risk.rule.manage.ALL
role.index.manage.ALL
role.index.view.ALL
system.api-key.create.ALL
system.api-key.delete.ALL
system.api-key.disable.ALL
system.api-key.enable.ALL
system.api-key.manage.ALL
system.api-key.reset.ALL
system.api-key.view.ALL
system.cache.manage.ALL
system.config.manage.ALL
system.index.view.ALL
system.sensitive.access.ALL
user.index.certify.ALL
user.index.create.ALL
user.index.delete.ALL
user.index.export.ALL
user.index.freeze.ALL
user.index.unfreeze.ALL
user.index.update.ALL
user.index.view.ALL
user.points.adjust.ALL
user.points.view.ALL
user.role.view.ALL
user.tag.add.ALL
user.tag.view.ALL
user.whitelist.add.ALL
user.whitelist.remove.ALL

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="/home/long/project/蚊子"
STATE_DIR="$PROJECT_DIR/logs/e2e-automation"
OUT_FILE="${1:-$STATE_DIR/consistency_latest.md}"
latest_report="$(ls -1t "$STATE_DIR"/report_*.md 2>/dev/null | head -n1 || true)"
latest_run="$(ls -1t "$STATE_DIR"/run_*.log 2>/dev/null | head -n1 || true)"
status="PASS"
reason=()
if [ -z "$latest_report" ] || [ ! -s "$latest_report" ]; then
status="FAIL"
reason+=("报告缺失或为空")
fi
if [ -z "$latest_run" ] || [ ! -s "$latest_run" ]; then
status="FAIL"
reason+=("runner日志缺失或为空")
fi
report_pass="UNKNOWN"
if [ -n "$latest_report" ] && [ -s "$latest_report" ]; then
if grep -Eq '全部通过[: ]*是|是否“全部通过”[: ]*是|全部通过\s*\(是\)' "$latest_report"; then
report_pass="YES"
elif grep -Eq '全部通过[: ]*否|是否“全部通过”[: ]*否|全部通过\s*\(否\)' "$latest_report"; then
report_pass="NO"
fi
fi
runner_error="UNKNOWN"
data_contract_ok="UNKNOWN"
if [ -n "$latest_run" ] && [ -s "$latest_run" ]; then
if grep -Eqi 'run finished but not fully passed|error:|runner appears stuck|\[watchdog\].*stuck|\bException\b|\bTraceback\b|\[DATA-CONTRACT\] FAIL' "$latest_run"; then
runner_error="YES"
else
runner_error="NO"
fi
if grep -Eq '\[DATA-CONTRACT\] PASS' "$latest_run"; then
data_contract_ok="YES"
else
data_contract_ok="NO"
fi
fi
if [ "$report_pass" = "YES" ] && [ "$runner_error" = "YES" ]; then
status="FAIL"
reason+=("报告声明通过但runner日志包含失败/异常信号")
fi
if [ "$report_pass" = "UNKNOWN" ]; then
status="FAIL"
reason+=("报告未给出明确通过结论(是/否)")
fi
if [ "$data_contract_ok" != "YES" ]; then
status="FAIL"
reason+=("缺少DATA-CONTRACT通过证据结果可能为假绿")
fi
mkdir -p "$(dirname "$OUT_FILE")"
{
echo "# E2E Consistency Check"
echo
echo "- Status: $status"
echo "- Report: ${latest_report:-N/A}"
echo "- Runner Log: ${latest_run:-N/A}"
echo "- Report Pass Flag: $report_pass"
echo "- Runner Error Signal: $runner_error"
echo "- Data Contract Signal: $data_contract_ok"
echo
echo "## Reasons"
if [ ${#reason[@]} -eq 0 ]; then
echo "- 一致性检查通过"
else
for r in "${reason[@]}"; do
echo "- $r"
done
fi
} > "$OUT_FILE"
if [ "$status" = "PASS" ]; then
exit 0
else
exit 2
fi

View File

@@ -0,0 +1,203 @@
#!/bin/bash
# 权限码一致性校验脚本
# 校验后端/数据库/前端/canonical基线四维权限码差异
# 支持多段权限码(如 approval.index.batch.transfer.ALL和连字符如 system.api-key.create.ALL
set -e
echo "=== 权限码一致性校验 ==="
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
BACKEND_PERMS="$SCRIPT_DIR/.permission_check_backend.txt"
FRONTEND_PERMS="$SCRIPT_DIR/.permission_check_frontend.txt"
DB_PERMS="$SCRIPT_DIR/.permission_check_db.txt"
CANONICAL_PERMS="$SCRIPT_DIR/.permission_check_canonical.txt"
DIFF_REPORT="$SCRIPT_DIR/permission_diff_report.md"
# 清理旧文件
rm -f "$BACKEND_PERMS" "$FRONTEND_PERMS" "$DB_PERMS" "$CANONICAL_PERMS" "$DIFF_REPORT"
echo "0. 加载Canonical 90基线..."
CANONICAL_FILE="$PROJECT_DIR/src/test/resources/permission/canonical-permissions-90.txt"
if [ -f "$CANONICAL_FILE" ]; then
grep -v '^#' "$CANONICAL_FILE" | grep -v '^$' | sort -u > "$CANONICAL_PERMS" || true
fi
CANONICAL_COUNT=$(wc -l < "$CANONICAL_PERMS")
echo " Canonical基线权限码数量: $CANONICAL_COUNT"
echo "1. 提取前端权限码..."
# 提取前端权限码从roles.ts
# 支持多段格式: module.resource[.subresource].operation[.suboperation].dataScope
# 例如: approval.index.batch.transfer.ALL (5段), dashboard.chart.realtime.ALL (4段)
grep -oE "'[a-z]+(-[a-z]+)*\.[a-z]+(-[a-z]+)*(\.[a-z]+(-[a-z]+)*)*\.[A-Z]+'" "$PROJECT_DIR/frontend/admin/src/auth/roles.ts" 2>/dev/null | \
sed "s/'//g" | sort -u > "$FRONTEND_PERMS" || true
# 也支持双引号
grep -oE '"[a-z]+(-[a-z]+)*\.[a-z]+(-[a-z]+)*(\.[a-z]+(-[a-z]+)*)*\.[A-Z]+"' "$PROJECT_DIR/frontend/admin/src/auth/roles.ts" 2>/dev/null | \
sed 's/"//g' >> "$FRONTEND_PERMS" || true
sort -u "$FRONTEND_PERMS" -o "$FRONTEND_PERMS"
FRONTEND_COUNT=$(wc -l < "$FRONTEND_PERMS")
echo " 前端权限码数量: $FRONTEND_COUNT"
echo "2. 提取数据库权限码..."
# 提取数据库权限码从SQL迁移脚本
# 支持多段格式: module.resource[.subresource].operation[.suboperation].dataScope
grep -oE "'[a-z]+(-[a-z]+)*\.[a-z]+(-[a-z]+)*(\.[a-z]+(-[a-z]+)*)*\.[A-Z]+'" "$PROJECT_DIR/src/main/resources/db/migration/V26__Seed_roles_permissions.sql" 2>/dev/null | \
sed "s/'//g" > "$DB_PERMS" || true
# 从其他迁移脚本补充
for sql in "$PROJECT_DIR/src/main/resources/db/migration"/V*.sql; do
grep -oE "'[a-z]+(-[a-z]+)*\.[a-z]+(-[a-z]+)*(\.[a-z]+(-[a-z]+)*)*\.[A-Z]+'" "$sql" 2>/dev/null | sed "s/'//g" >> "$DB_PERMS" || true
done
sort -u "$DB_PERMS" -o "$DB_PERMS"
DB_COUNT=$(wc -l < "$DB_PERMS")
echo " 数据库权限码总数: $DB_COUNT"
echo "3. 提取后端权限码..."
# 提取后端权限码从RequirePermission注解
# 支持直接字符串: @RequirePermission("approval.index.view.ALL")
# 支持多段格式: module.resource[.subresource].operation[.suboperation].dataScope
grep -r "RequirePermission" "$PROJECT_DIR/src/main/java" --include="*.java" 2>/dev/null | \
grep -oE '"[a-z]+(-[a-z]+)*\.[a-z]+(-[a-z]+)*(\.[a-z]+(-[a-z]+)*)*\.[A-Z]+"' | sed 's/"//g' >> "$BACKEND_PERMS" || true
# 支持常量引用: @RequirePermission(PERM_XXX) - 需要进一步处理常量定义
# 找出所有常量定义
grep -r "private static final String PERM_" "$PROJECT_DIR/src/main/java" --include="*.java" 2>/dev/null | \
grep -oE 'PERM_[A-Z_]+ = "[a-z]+(-[a-z]+)*\.[a-z]+(-[a-z]+)*(\.[a-z]+(-[a-z]+)*)*\.[A-Z]+"' | \
sed 's/.*= *"/"/g' | sed 's/"//g' >> "$BACKEND_PERMS" || true
sort -u "$BACKEND_PERMS" -o "$BACKEND_PERMS"
BACKEND_COUNT=$(wc -l < "$BACKEND_PERMS")
echo " 后端权限码数量: $BACKEND_COUNT"
echo ""
echo "4. 生成差异报告..."
# 计算差异
comm -23 "$FRONTEND_PERMS" "$CANONICAL_PERMS" > "$SCRIPT_DIR/.frontend_only.txt" || true
comm -23 "$DB_PERMS" "$CANONICAL_PERMS" > "$SCRIPT_DIR/.db_only.txt" || true
comm -23 "$BACKEND_PERMS" "$CANONICAL_PERMS" > "$SCRIPT_DIR/.backend_only.txt" || true
comm -23 "$CANONICAL_PERMS" "$FRONTEND_PERMS" > "$SCRIPT_DIR/.canonical_not_in_frontend.txt" || true
comm -23 "$CANONICAL_PERMS" "$DB_PERMS" > "$SCRIPT_DIR/.canonical_not_in_db.txt" || true
comm -23 "$CANONICAL_PERMS" "$BACKEND_PERMS" > "$SCRIPT_DIR/.canonical_not_in_backend.txt" || true
FRONTEND_ONLY=$(wc -l < "$SCRIPT_DIR/.frontend_only.txt")
DB_ONLY=$(wc -l < "$SCRIPT_DIR/.db_only.txt")
BACKEND_ONLY=$(wc -l < "$SCRIPT_DIR/.backend_only.txt")
CANONICAL_NOT_IN_FRONTEND=$(wc -l < "$SCRIPT_DIR/.canonical_not_in_frontend.txt")
CANONICAL_NOT_IN_DB=$(wc -l < "$SCRIPT_DIR/.canonical_not_in_db.txt")
CANONICAL_NOT_IN_BACKEND=$(wc -l < "$SCRIPT_DIR/.canonical_not_in_backend.txt")
# 写入报告
cat > "$DIFF_REPORT" << EOF
# 权限码一致性校验报告
生成时间: $(date '+%Y-%m-%d %H:%M:%S')
## 四维统计
| 来源 | 权限码数量 |
|------|------------|
| Canonical基线 | $CANONICAL_COUNT |
| 前端 | $FRONTEND_COUNT |
| 数据库 | $DB_COUNT |
| 后端 | $BACKEND_COUNT |
## Canonical基线覆盖率
| 维度 | 缺失数量 | 说明 |
|------|----------|------|
| 前端缺失 | $CANONICAL_NOT_IN_FRONTEND | Canonical基线在前端未定义 |
| 数据库缺失 | $CANONICAL_NOT_IN_DB | Canonical基线在数据库未导入 |
| 后端缺失 | $CANONICAL_NOT_IN_BACKEND | Canonical基线在后端未使用 |
## 额外权限码分析不在Canonical基线中
### 前端独有权限码 (不在Canonical基线中): $FRONTEND_ONLY
EOF
if [ $FRONTEND_ONLY -gt 0 ]; then
echo "" >> "$DIFF_REPORT"
cat "$SCRIPT_DIR/.frontend_only.txt" >> "$DIFF_REPORT"
echo "" >> "$DIFF_REPORT"
fi
cat >> "$DIFF_REPORT" << EOF
### 数据库独有权限码 (不在Canonical基线中): $DB_ONLY
EOF
if [ $DB_ONLY -gt 0 ]; then
echo "" >> "$DIFF_REPORT"
cat "$SCRIPT_DIR/.db_only.txt" >> "$DIFF_REPORT"
echo "" >> "$DIFF_REPORT"
fi
cat >> "$DIFF_REPORT" << EOF
### 后端独有权限码 (不在Canonical基线中): $BACKEND_ONLY
EOF
if [ $BACKEND_ONLY -gt 0 ]; then
echo "" >> "$DIFF_REPORT"
cat "$SCRIPT_DIR/.backend_only.txt" >> "$DIFF_REPORT"
echo "" >> "$DIFF_REPORT"
fi
cat >> "$DIFF_REPORT" << EOF
## Canonical基线缺失项
### 前端未覆盖Canonical基线 ($CANONICAL_NOT_IN_FRONTEND)
EOF
if [ $CANONICAL_NOT_IN_FRONTEND -gt 0 ]; then
echo "" >> "$DIFF_REPORT"
cat "$SCRIPT_DIR/.canonical_not_in_frontend.txt" >> "$DIFF_REPORT"
echo "" >> "$DIFF_REPORT"
fi
cat >> "$DIFF_REPORT" << EOF
### 数据库未导入Canonical基线 ($CANONICAL_NOT_IN_DB)
EOF
if [ $CANONICAL_NOT_IN_DB -gt 0 ]; then
echo "" >> "$DIFF_REPORT"
cat "$SCRIPT_DIR/.canonical_not_in_db.txt" >> "$DIFF_REPORT"
echo "" >> "$DIFF_REPORT"
fi
cat >> "$DIFF_REPORT" << EOF
### 后端未实现Canonical基线 ($CANONICAL_NOT_IN_BACKEND)
EOF
if [ $CANONICAL_NOT_IN_BACKEND -gt 0 ]; then
echo "" >> "$DIFF_REPORT"
cat "$SCRIPT_DIR/.canonical_not_in_backend.txt" >> "$DIFF_REPORT"
echo "" >> "$DIFF_REPORT"
fi
# 判断是否通过 - 以Canonical基线为基准
if [ $CANONICAL_NOT_IN_FRONTEND -eq 0 ] && [ $CANONICAL_NOT_IN_DB -eq 0 ] && [ $CANONICAL_NOT_IN_BACKEND -eq 0 ]; then
echo ""
echo "✅ 权限码一致性校验通过Canonical基线已完整覆盖。"
RESULT=0
else
echo ""
echo "❌ 权限码一致性校验未通过,请查看差异报告: $DIFF_REPORT"
echo " Canonical基线缺失 - 前端: $CANONICAL_NOT_IN_FRONTEND"
echo " Canonical基线缺失 - 数据库: $CANONICAL_NOT_IN_DB"
echo " Canonical基线缺失 - 后端: $CANONICAL_NOT_IN_BACKEND"
RESULT=1
fi
# 清理临时文件
rm -f "$SCRIPT_DIR/.frontend_only.txt" "$SCRIPT_DIR/.db_only.txt" "$SCRIPT_DIR/.backend_only.txt"
rm -f "$SCRIPT_DIR/.canonical_not_in_frontend.txt" "$SCRIPT_DIR/.canonical_not_in_db.txt" "$SCRIPT_DIR/.canonical_not_in_backend.txt"
exit $RESULT

113
scripts/ci/archive-logs.sh Executable file
View File

@@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
APPLY="false"
OLDER_THAN_DAYS="1"
ARCHIVE_TAG_DEFAULT="$(date +%Y%m%d_%H%M%S)"
ARCHIVE_TAG="${ARCHIVE_TAG_DEFAULT}"
PATTERN_PATHS=(
"logs/e2e-automation/run_*.log"
"logs/e2e-automation/report_*.md"
"logs/prd-review/review_*.md"
"logs/prd-review/claude_apply_*.md"
"logs/prd-review/execution_report_*.md"
"logs/prd-review/optimization_report_*.md"
)
usage() {
cat <<'EOF'
Usage:
./scripts/ci/archive-logs.sh [--apply] [--older-than-days N] [--archive-tag TAG]
Options:
--apply Execute archive move. Without this flag, script runs in dry-run mode.
--older-than-days N Archive files older than N days. Default: 1
--archive-tag TAG Archive subdir tag under logs/archive/. Default: timestamp
-h, --help Show help
Examples:
./scripts/ci/archive-logs.sh
./scripts/ci/archive-logs.sh --apply
./scripts/ci/archive-logs.sh --apply --older-than-days 2 --archive-tag weekly_20260323
EOF
}
log() { echo "[archive-logs] $*"; }
run_cmd() {
if [[ "${APPLY}" == "true" ]]; then
"$@"
else
log "DRY-RUN: $*"
fi
}
while [[ $# -gt 0 ]]; do
case "$1" in
--apply)
APPLY="true"
shift
;;
--older-than-days)
OLDER_THAN_DAYS="${2:-}"
shift 2
;;
--archive-tag)
ARCHIVE_TAG="${2:-}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage
exit 1
;;
esac
done
if ! [[ "${OLDER_THAN_DAYS}" =~ ^[0-9]+$ ]]; then
echo "Invalid --older-than-days: ${OLDER_THAN_DAYS}" >&2
exit 1
fi
ARCHIVE_DIR="${ROOT_DIR}/logs/archive/${ARCHIVE_TAG}"
CUTOFF_EPOCH="$(date -d "${OLDER_THAN_DAYS} days ago" +%s)"
log "root=${ROOT_DIR}"
log "apply=${APPLY} older_than_days=${OLDER_THAN_DAYS} archive_dir=${ARCHIVE_DIR}"
log "cutoff=$(date -d "@${CUTOFF_EPOCH}" '+%Y-%m-%d %H:%M:%S')"
if [[ "${APPLY}" == "true" ]]; then
mkdir -p "${ARCHIVE_DIR}"
fi
shopt -s nullglob
moved_count=0
for pattern in "${PATTERN_PATHS[@]}"; do
for abs in "${ROOT_DIR}"/${pattern}; do
[[ -f "${abs}" ]] || continue
mtime_epoch="$(stat -c %Y "${abs}")"
if [[ "${mtime_epoch}" -ge "${CUTOFF_EPOCH}" ]]; then
continue
fi
rel="${abs#${ROOT_DIR}/}"
dest="${ARCHIVE_DIR}/${rel}"
run_cmd mkdir -p "$(dirname "${dest}")"
run_cmd mv "${abs}" "${dest}"
log "ARCHIVE ${rel} -> ${dest}"
moved_count=$((moved_count + 1))
done
done
shopt -u nullglob
log "done: archived=${moved_count}"
if [[ "${APPLY}" != "true" ]]; then
log "dry-run completed. Use --apply to execute."
fi

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env bash
# 断言迁移测试不可跳过
# 在CI环境中PermissionCanonicalMigrationTest 和 AuditLogImmutabilityIntegrationTest 的 Skipped 必须为0
# 否则表示迁移验证未被执行,质量门禁失败
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
REPORT_DIR="${ROOT_DIR}/target/surefire-reports"
# 关键测试类列表strict模式下不可跳过
CRITICAL_TESTS=(
"com.mosquito.project.permission.PermissionCanonicalMigrationTest"
"com.mosquito.project.integration.AuditLogImmutabilityIntegrationTest"
"com.mosquito.project.RolePermissionMigrationTest"
"com.mosquito.project.FlywayMigrationSmokeTest"
)
echo "===== 关键测试跳过断言 ====="
# 检查测试报告目录是否存在
if [[ ! -d "${REPORT_DIR}" ]]; then
echo "ERROR: 测试报告目录不存在: ${REPORT_DIR}"
echo "请先运行 Maven 测试"
exit 1
fi
FAILED=0
for TEST_CLASS in "${CRITICAL_TESTS[@]}"; do
echo ""
echo "检查: ${TEST_CLASS}"
# 查找对应的测试报告文件
REPORT_FILE=""
for ext in txt xml; do
candidate="${REPORT_DIR}/${TEST_CLASS}.${ext}"
if [[ -f "${candidate}" ]]; then
REPORT_FILE="${candidate}"
break
fi
done
if [[ -z "${REPORT_FILE}" ]]; then
echo " ERROR: 未找到测试报告文件: ${TEST_CLASS}.{txt,xml}"
echo " 目录内容:"
ls -la "${REPORT_DIR}" | head -20 || true
FAILED=1
continue
fi
echo " 报告: ${REPORT_FILE}"
# 从XML报告中提取Skipped数量如果存在
if [[ "${REPORT_FILE}" == *.xml ]]; then
if grep -q 'failures="[^"]*" errors="[^"]*" skipped="[^"]*"' "${REPORT_FILE}"; then
SKIPPED=$(grep -oP 'skipped="\K[0-9]+' "${REPORT_FILE}" | head -1)
if [[ "${SKIPPED}" -gt 0 ]]; then
echo " ERROR: ${TEST_CLASS}${SKIPPED} 个被跳过!"
echo " 在CI严格模式下关键测试必须执行。"
echo " 质量门禁失败Skipped 数量必须为0"
FAILED=1
else
echo " PASS: ${TEST_CLASS} 跳过数量为0${SKIPPED}"
fi
else
# 如果XML中没有skipped属性检查是否完全跳过了测试
if grep -q 'tests="0"' "${REPORT_FILE}"; then
echo " ERROR: ${TEST_CLASS} 未被执行tests=\"0\""
FAILED=1
else
echo " INFO: 无法从XML中解析skipped数量假设通过"
fi
fi
elif [[ "${REPORT_FILE}" == *.txt ]]; then
# 从文本报告中提取信息
if grep -q "Skipped" "${REPORT_FILE}"; then
# 检查是否有跳过的测试
if grep "Skipped.*[1-9]" "${REPORT_FILE}"; then
echo " ERROR: ${TEST_CLASS} 有跳过的用例!"
FAILED=1
fi
fi
echo " INFO: 文本报告格式,跳过详细检查"
fi
done
echo ""
if [[ "${FAILED}" -eq 1 ]]; then
echo "===== 关键测试跳过断言失败 ====="
exit 1
fi
echo "===== 关键测试跳过断言通过 ====="
exit 0

62
scripts/ci/backend-verify.sh Executable file
View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
TMP_DIR="${ROOT_DIR}/tmp"
JNA_TMP_DIR="${TMP_DIR}/jna"
JAVA_TMP_DIR="${TMP_DIR}/java"
PODMAN_LOG="${TMP_DIR}/podman-service.log"
PODMAN_SOCK_PATH="/run/user/$(id -u)/podman/podman.sock"
PODMAN_SOCK="unix://${PODMAN_SOCK_PATH}"
PODMAN_PID=""
if ! command -v podman >/dev/null 2>&1; then
echo "ERROR: podman 未安装,无法执行严格模式迁移验证。" >&2
exit 1
fi
mkdir -p "${JNA_TMP_DIR}" "${JAVA_TMP_DIR}"
cleanup() {
if [[ -n "${PODMAN_PID}" ]] && kill -0 "${PODMAN_PID}" >/dev/null 2>&1; then
kill "${PODMAN_PID}" >/dev/null 2>&1 || true
wait "${PODMAN_PID}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
cd "${ROOT_DIR}"
podman system service --time=0 "${PODMAN_SOCK}" > "${PODMAN_LOG}" 2>&1 &
PODMAN_PID=$!
for _ in {1..30}; do
if [[ -S "${PODMAN_SOCK_PATH}" ]] && podman --url "${PODMAN_SOCK}" info >/dev/null 2>&1; then
break
fi
sleep 1
done
if ! [[ -S "${PODMAN_SOCK_PATH}" ]] || ! podman --url "${PODMAN_SOCK}" info >/dev/null 2>&1; then
echo "ERROR: podman service 未就绪,无法执行严格模式迁移验证。" >&2
if [[ -f "${PODMAN_LOG}" ]]; then
echo "----- podman service log (tail) -----" >&2
tail -n 80 "${PODMAN_LOG}" >&2 || true
fi
exit 1
fi
export DOCKER_HOST="${PODMAN_SOCK}"
export TESTCONTAINERS_RYUK_DISABLED="true"
mvn -B -DskipTests=false -Dmigration.test.strict=true \
-Djna.tmpdir="${JNA_TMP_DIR}" \
-Djava.io.tmpdir="${JAVA_TMP_DIR}" \
clean verify
# 显式执行关键集成测试(之前被默认排除)
echo "=== 执行关键集成测试集合 ==="
mvn -B test -Dtest=UserOperationJourneyTest,CacheConfigIntegrationTest,SchemaVerificationTest \
-DfailIfNoTests=false \
-Djourney.test.enabled=true \
-Djna.tmpdir="${JNA_TMP_DIR}" \
-Djava.io.tmpdir="${JAVA_TMP_DIR}"

View File

@@ -0,0 +1,119 @@
#!/usr/bin/env bash
# 容器运行时检测脚本
# 在执行strict模式测试前检测Docker/Podman是否可用
# 如果不可用,给出明确的修复指令
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CHECK_DOCKER="${1:-false}"
CHECK_PODMAN="${2:-false}"
echo "===== 容器运行时检测 ====="
echo ""
# 如果未指定检测选项,默认检测两者
if [[ "${CHECK_DOCKER}" == "false" && "${CHECK_PODMAN}" == "false" ]]; then
CHECK_DOCKER="true"
CHECK_PODMAN="true"
fi
DOCKER_AVAILABLE="false"
PODMAN_AVAILABLE="false"
# 检测Docker
if [[ "${CHECK_DOCKER}" == "true" ]]; then
echo "检测 Docker..."
if command -v docker >/dev/null 2>&1; then
if docker info >/dev/null 2>&1; then
echo " ✓ Docker 已安装且运行中"
DOCKER_AVAILABLE="true"
else
echo " ✗ Docker 已安装但无法连接可能需要启动Docker daemon"
echo " 解决方案: sudo systemctl start docker"
fi
else
echo " ✗ Docker 未安装"
fi
fi
# 检测Podman
if [[ "${CHECK_PODMAN}" == "true" ]]; then
echo ""
echo "检测 Podman..."
if command -v podman >/dev/null 2>&1; then
if podman info >/dev/null 2>&1; then
echo " ✓ Podman 已安装且运行中"
PODMAN_AVAILABLE="true"
else
echo " ✗ Podman 已安装但无法连接"
echo " 解决方案: podman system start"
fi
else
echo " ✗ Podman 未安装"
fi
fi
# 检测Docker Socket
echo ""
echo "检测 Docker Socket..."
DOCKER_SOCK="/var/run/docker.sock"
if [[ -S "${DOCKER_SOCK}" ]]; then
echo "${DOCKER_SOCK} 存在"
else
echo "${DOCKER_SOCK} 不存在或不是socket"
fi
# 检测Podman Socket
echo ""
echo "检测 Podman Socket..."
for uid in $(id -u) 0 1000; do
PODMAN_SOCK="/run/user/${uid}/podman/podman.sock"
if [[ -S "${PODMAN_SOCK}" ]]; then
echo "${PODMAN_SOCK} 存在"
PODMAN_AVAILABLE="true"
break
fi
done
if [[ "${PODMAN_AVAILABLE}" != "true" ]]; then
echo " ✗ 未找到可用的Podman socket"
fi
# 最终结论
echo ""
echo "===== 检测结果 ====="
if [[ "${DOCKER_AVAILABLE}" == "true" || "${PODMAN_AVAILABLE}" == "true" ]]; then
echo "✓ 容器运行时可用"
if [[ "${DOCKER_AVAILABLE}" == "true" ]]; then
echo " - Docker: 可用"
export DOCKER_HOST="unix://${DOCKER_SOCK}"
fi
if [[ "${PODMAN_AVAILABLE}" == "true" ]]; then
echo " - Podman: 可用"
export DOCKER_HOST="unix:///run/user/$(id -u)/podman/podman.sock"
fi
echo ""
echo "可以执行 strict 模式测试"
exit 0
else
echo "✗ 无可用的容器运行时"
echo ""
echo "===== 修复指令 ====="
echo "严格模式测试需要Docker或Podman来启动PostgreSQL容器。"
echo ""
echo "方案1: 使用Docker"
echo " 1. 安装Docker: sudo apt install docker.io"
echo " 2. 启动Docker daemon: sudo systemctl start docker"
echo " 3. 将当前用户加入docker组: sudo usermod -aG docker \$(whoami)"
echo " 4. 重新登录或执行: newgrp docker"
echo ""
echo "方案2: 使用Podman推荐无需root"
echo " 1. 安装Podman: sudo apt install podman"
echo " 2. 启动Podman服务: podman system start"
echo ""
echo "方案3: 跳过strict模式测试"
echo " 使用普通Maven测试命令不带 -Dmigration.test.strict=true"
echo " 注意:这将导致关键安全测试被跳过,不推荐用于生产环境"
echo ""
exit 1
fi

237
scripts/ci/clean-artifacts.sh Executable file
View File

@@ -0,0 +1,237 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
ARCHIVE_BASE_DEFAULT="/tmp/mosquito-archives"
ARCHIVE_TAG_DEFAULT="$(date +%Y%m%d_%H%M%S)"
MODE="archive" # archive | delete
APPLY="false" # false => dry-run
INCLUDE_TRACKED="false" # true => include git tracked paths
FAIL_ON_FOUND="false" # true => exit non-zero when cleanup candidates exist
INCLUDE_BUILD_OUTPUTS="false" # true => include dist/target build outputs
ARCHIVE_BASE="${ARCHIVE_BASE_DEFAULT}"
ARCHIVE_TAG="${ARCHIVE_TAG_DEFAULT}"
ARTIFACT_PATHS=(
"frontend/admin/test-results"
"frontend/e2e/e2e-results"
"frontend/e2e/e2e-report"
"frontend/e2e/playwright-report"
"frontend/e2e/test-results"
"frontend/e2e-admin/test-results"
"frontend/e2e-results"
)
BUILD_OUTPUT_PATHS=(
"target"
"frontend/dist"
"frontend/admin/dist"
"frontend/h5/dist"
)
ROOT_SPILLOVER_GLOBS=(
"e2e-test-report-*.md"
"E2E_TEST*.md"
"TEST_E2E_*.md"
"COVERAGE_*.md"
)
usage() {
cat <<'EOF'
Usage:
./scripts/ci/clean-artifacts.sh [--apply] [--mode archive|delete] [--archive-base DIR] [--archive-tag TAG] [--include-tracked] [--include-build-outputs] [--fail-on-found]
Options:
--apply Execute cleanup. Without this flag, script runs in dry-run mode.
--mode MODE archive (default) | delete
--archive-base DIR Archive base dir for archive mode. Default: /tmp/mosquito-archives
--archive-tag TAG Archive subdir tag. Default: current timestamp
--include-tracked Include git tracked paths (default: skip tracked)
--include-build-outputs
Include build output paths (target, frontend/*/dist)
--fail-on-found Exit non-zero when cleanup candidates are found (useful for CI dry-run)
-h, --help Show help
Examples:
./scripts/ci/clean-artifacts.sh
./scripts/ci/clean-artifacts.sh --apply
./scripts/ci/clean-artifacts.sh --apply --mode archive --archive-tag manual_cleanup
./scripts/ci/clean-artifacts.sh --include-build-outputs --apply --mode archive --archive-tag weekly_cleanup
./scripts/ci/clean-artifacts.sh --fail-on-found
EOF
}
log() { echo "[clean-artifacts] $*"; }
is_git_tracked() {
local rel="$1"
local out
out="$(git -C "${ROOT_DIR}" ls-files -- "${rel}" 2>/dev/null || true)"
[[ -n "${out}" ]]
}
run_cmd() {
if [[ "${APPLY}" == "true" ]]; then
"$@"
else
log "DRY-RUN: $*"
fi
}
while [[ $# -gt 0 ]]; do
case "$1" in
--apply)
APPLY="true"
shift
;;
--mode)
MODE="${2:-}"
shift 2
;;
--archive-base)
ARCHIVE_BASE="${2:-}"
shift 2
;;
--archive-tag)
ARCHIVE_TAG="${2:-}"
shift 2
;;
--include-tracked)
INCLUDE_TRACKED="true"
shift
;;
--include-build-outputs)
INCLUDE_BUILD_OUTPUTS="true"
shift
;;
--fail-on-found)
FAIL_ON_FOUND="true"
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage
exit 1
;;
esac
done
if [[ "${MODE}" != "archive" && "${MODE}" != "delete" ]]; then
echo "Invalid --mode: ${MODE}" >&2
exit 1
fi
ARCHIVE_DIR="${ARCHIVE_BASE}/${ARCHIVE_TAG}"
log "root=${ROOT_DIR}"
log "mode=${MODE} apply=${APPLY} include_tracked=${INCLUDE_TRACKED} include_build_outputs=${INCLUDE_BUILD_OUTPUTS} fail_on_found=${FAIL_ON_FOUND}"
if [[ "${INCLUDE_BUILD_OUTPUTS}" == "true" ]] && pgrep -af "spring-boot:run|e2e_continuous_runner.sh|vite" >/dev/null 2>&1; then
log "WARN: detected active dev/e2e processes; build outputs may be recreated immediately."
fi
if [[ "${MODE}" == "archive" ]]; then
log "archive_dir=${ARCHIVE_DIR}"
if [[ "${APPLY}" == "true" ]]; then
mkdir -p "${ARCHIVE_DIR}"
fi
fi
cleaned_count=0
skipped_count=0
found_count=0
CANDIDATE_PATHS=("${ARTIFACT_PATHS[@]}")
if [[ "${INCLUDE_BUILD_OUTPUTS}" == "true" ]]; then
CANDIDATE_PATHS+=("${BUILD_OUTPUT_PATHS[@]}")
fi
for rel in "${CANDIDATE_PATHS[@]}"; do
abs="${ROOT_DIR}/${rel}"
if [[ ! -e "${abs}" ]]; then
continue
fi
if [[ "${INCLUDE_TRACKED}" != "true" ]] && is_git_tracked "${rel}"; then
log "SKIP tracked path: ${rel}"
skipped_count=$((skipped_count + 1))
continue
fi
if [[ "${MODE}" == "archive" ]]; then
dest="${ARCHIVE_DIR}/${rel}"
run_cmd mkdir -p "$(dirname "${dest}")"
run_cmd mv "${abs}" "${dest}"
log "ARCHIVE ${rel} -> ${dest}"
else
run_cmd rm -rf "${abs}"
log "DELETE ${rel}"
fi
found_count=$((found_count + 1))
cleaned_count=$((cleaned_count + 1))
done
# Root report spillover (files in repository root)
shopt -s nullglob
for pattern in "${ROOT_SPILLOVER_GLOBS[@]}"; do
for file in "${ROOT_DIR}"/${pattern}; do
[[ -f "${file}" ]] || continue
rel="$(basename "${file}")"
if [[ "${INCLUDE_TRACKED}" != "true" ]] && is_git_tracked "${rel}"; then
log "SKIP tracked root file: ${rel}"
skipped_count=$((skipped_count + 1))
continue
fi
if [[ "${MODE}" == "archive" ]]; then
dest="${ARCHIVE_DIR}/root-spillover/${rel}"
run_cmd mkdir -p "$(dirname "${dest}")"
run_cmd mv "${file}" "${dest}"
log "ARCHIVE ${rel} -> ${dest}"
else
run_cmd rm -f "${file}"
log "DELETE ${rel}"
fi
found_count=$((found_count + 1))
cleaned_count=$((cleaned_count + 1))
done
done
shopt -u nullglob
# Move root attach_pid files into tmp/pids
pids_dir="${ROOT_DIR}/tmp/pids"
if compgen -G "${ROOT_DIR}/.attach_pid*" > /dev/null; then
run_cmd mkdir -p "${pids_dir}"
while IFS= read -r pid_file; do
rel_pid="$(basename "${pid_file}")"
if [[ "${MODE}" == "archive" ]]; then
if [[ "${APPLY}" == "true" ]]; then
mv "${pid_file}" "${pids_dir}/${rel_pid}"
else
log "DRY-RUN: mv ${pid_file} ${pids_dir}/${rel_pid}"
fi
log "MOVE ${rel_pid} -> tmp/pids/"
else
run_cmd rm -f "${pid_file}"
log "DELETE ${rel_pid}"
fi
found_count=$((found_count + 1))
cleaned_count=$((cleaned_count + 1))
done < <(find "${ROOT_DIR}" -maxdepth 1 -type f -name ".attach_pid*" | sort)
fi
log "done: found=${found_count}, cleaned=${cleaned_count}, skipped=${skipped_count}"
if [[ "${APPLY}" != "true" ]]; then
log "dry-run completed. Use --apply to execute."
fi
if [[ "${FAIL_ON_FOUND}" == "true" && "${found_count}" -gt 0 ]]; then
log "fail-on-found enabled: detected ${found_count} cleanup candidates."
exit 2
fi

70
scripts/ci/logs-health-check.sh Executable file
View File

@@ -0,0 +1,70 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
LOGS_DIR="${ROOT_DIR}/logs"
OLDER_THAN_DAYS="${OLDER_THAN_DAYS:-1}"
WARN_TOTAL_MB="${WARN_TOTAL_MB:-300}"
WARN_CANDIDATE_FILES="${WARN_CANDIDATE_FILES:-500}"
# Keep in sync with archive-logs.sh patterns.
PATTERN_PATHS=(
"logs/e2e-automation/run_*.log"
"logs/e2e-automation/report_*.md"
"logs/prd-review/review_*.md"
"logs/prd-review/claude_apply_*.md"
"logs/prd-review/execution_report_*.md"
"logs/prd-review/optimization_report_*.md"
)
log() { echo "[logs-health] $*"; }
to_mb() {
local bytes="$1"
awk -v b="${bytes}" 'BEGIN { printf "%.2f", b / 1024 / 1024 }'
}
if [[ ! -d "${LOGS_DIR}" ]]; then
log "logs directory not found, skip."
exit 0
fi
cutoff_epoch="$(date -d "${OLDER_THAN_DAYS} days ago" +%s)"
total_bytes="$(du -sb "${LOGS_DIR}" | awk '{print $1}')"
total_mb="$(to_mb "${total_bytes}")"
candidate_files=0
shopt -s nullglob
for pattern in "${PATTERN_PATHS[@]}"; do
for abs in "${ROOT_DIR}"/${pattern}; do
[[ -f "${abs}" ]] || continue
mtime_epoch="$(stat -c %Y "${abs}")"
if [[ "${mtime_epoch}" -lt "${cutoff_epoch}" ]]; then
candidate_files=$((candidate_files + 1))
fi
done
done
shopt -u nullglob
log "total_size_mb=${total_mb} (threshold=${WARN_TOTAL_MB})"
log "archive_candidates_older_than_${OLDER_THAN_DAYS}d=${candidate_files} (threshold=${WARN_CANDIDATE_FILES})"
if awk -v a="${total_mb}" -v b="${WARN_TOTAL_MB}" 'BEGIN { exit !(a > b) }'; then
log "WARN: logs directory is large. Consider: npm run logs:archive:apply"
fi
if [[ "${candidate_files}" -gt "${WARN_CANDIDATE_FILES}" ]]; then
log "WARN: many archive candidates detected. Consider archiving historical logs."
fi
log "top 5 largest log files:"
find "${LOGS_DIR}" -type f -printf '%s %p\n' | sort -nr | sed -n '1,5p' | awk '
{
mb = $1 / 1024 / 1024
$1 = ""
sub(/^ /, "", $0)
printf " - %.2f MB %s\n", mb, $0
}
'
exit 0

239
scripts/ci/prd-gap-check.sh Executable file
View File

@@ -0,0 +1,239 @@
#!/usr/bin/env bash
#
# PRD-实现差距自动化检查脚本
# 生成可读的PRD差距报告包含失败项和证据路径
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
TMP_DIR="${ROOT_DIR}/tmp/prd-gap-report"
REPORT_FILE="${TMP_DIR}/prd-gap-report-$(date +%Y%m%d_%H%M%S).md"
JAVA_TMP_DIR="${TMP_DIR}/java"
JNA_TMP_DIR="${TMP_DIR}/jna"
PODMAN_SOCK_PATH="/run/user/$(id -u)/podman/podman.sock"
PODMAN_SOCK="unix://${PODMAN_SOCK_PATH}"
PODMAN_LOG="${TMP_DIR}/podman-service.log"
PODMAN_PID=""
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
mkdir -p "${TMP_DIR}" "${JAVA_TMP_DIR}" "${JNA_TMP_DIR}"
cleanup() {
if [[ -n "${PODMAN_PID}" ]] && kill -0 "${PODMAN_PID}" >/dev/null 2>&1; then
kill "${PODMAN_PID}" >/dev/null 2>&1 || true
wait "${PODMAN_PID}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
# 初始化Podman如果可用
init_podman() {
if ! command -v podman >/dev/null 2>&1; then
echo -e "${YELLOW}WARNING: podman 未安装,跳过容器测试${NC}" >&2
return 1
fi
mkdir -p "$(dirname "${PODMAN_SOCK_PATH}")"
podman system service --time=0 "${PODMAN_SOCK}" > "${PODMAN_LOG}" 2>&1 &
PODMAN_PID=$!
for _ in {1..30}; do
if [[ -S "${PODMAN_SOCK_PATH}" ]] && podman --url "${PODMAN_SOCK}" info >/dev/null 2>&1; then
echo -e "${GREEN}Podman service 就绪${NC}"
return 0
fi
sleep 1
done
echo -e "${RED}ERROR: podman service 未就绪${NC}" >&2
return 1
}
# 写入报告头
write_report_header() {
cat > "${REPORT_FILE}" << 'EOF'
# PRD-实现差距报告
> 自动生成时间: TIMESTAMP
> 分支: BRANCH
> 提交: COMMIT
## 执行摘要
| 检查项 | 状态 | 证据路径 |
|--------|------|----------|
EOF
sed -i "s/TIMESTAMP/$(date '+%Y-%m-%d %H:%M:%S')/g" "${REPORT_FILE}"
sed -i "s/BRANCH/$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo 'unknown')/g" "${REPORT_FILE}"
sed -i "s/COMMIT/$(git rev-parse HEAD 2>/dev/null || echo 'unknown')/g" "${REPORT_FILE}"
}
# 添加检查结果到报告
add_check_result() {
local name="$1"
local status="$2"
local evidence="$3"
local details="$4"
local status_icon
if [[ "${status}" == "PASS" ]]; then
status_icon="✅"
elif [[ "${status}" == "FAIL" ]]; then
status_icon="❌"
else
status_icon="⚠️"
fi
cat >> "${REPORT_FILE}" << EOF |
| ${name} | ${status_icon} ${status} | ${evidence} |
EOF
if [[ -n "${details}" ]]; then
cat >> "${REPORT_FILE}" << EOF
<details>
<summary>详细信息</summary>
\`\`\`
${details}
\`\`\`
</details>
EOF
fi
}
# 运行单个测试类并捕获结果
run_test() {
local test_name="$1"
local test_class="$2"
# 清理test_class中的#和后续方法名,只保留类名作为文件路径
local clean_class="${test_class%%#*}"
local evidence_path="${TMP_DIR}/test-results/${clean_class}.txt"
mkdir -p "$(dirname "${evidence_path}")"
echo -e "\n${YELLOW}运行测试: ${test_name}${NC}"
local start_time=$(date +%s)
local exit_code=0
if [[ -n "${PODMAN_SOCK}" ]] && [[ -S "${PODMAN_SOCK_PATH}" ]]; then
export DOCKER_HOST="${PODMAN_SOCK}"
fi
export TESTCONTAINERS_RYUK_DISABLED="true"
export JNA_TMPDIR="${JNA_TMP_DIR}"
export JAVA_IO_TMPDIR="${JAVA_TMP_DIR}"
mvn -B test -Dtest="${test_class}" \
-Djna.tmpdir="${JNA_TMP_DIR}" \
-Djava.io.tmpdir="${JAVA_TMP_DIR}" \
-Dmigration.test.strict=true \
-Dsurefire.failIfNoSpecifiedTests=true \
2>&1 | tee "${evidence_path}" || exit_code=$?
local end_time=$(date +%s)
local duration=$((end_time - start_time))
local result
if [[ ${exit_code} -eq 0 ]]; then
result="PASS"
else
result="FAIL"
fi
echo -e "${result}: ${test_name} (${duration}s)"
# 只输出证据路径到stdout不输出其他内容
echo "${evidence_path}"
}
# 主流程
main() {
echo -e "${GREEN}====== PRD-实现差距检查 ======${NC}"
echo "报告输出目录: ${TMP_DIR}"
# 初始化Podman
if init_podman; then
export DOCKER_HOST="${PODMAN_SOCK}"
fi
# 生成报告头
write_report_header
# 定义要运行的PRD关键测试
declare -a TEST_CLASSES=(
"AuditLogImmutabilityIntegrationTest"
"PermissionCanonicalMigrationTest#shouldValidateCanonicalPermissionsAgainstBaseline"
"PermissionCanonicalMigrationTest#shouldHaveZeroLegacyPermissionCodes"
)
local failed_count=0
local passed_count=0
for test_spec in "${TEST_CLASSES[@]}"; do
IFS='#' read -r test_class test_method <<< "${test_spec}"
local evidence_path
if [[ -n "${test_method}" ]]; then
evidence_path=$(run_test "${test_spec}" "${test_class}#${test_method}")
else
evidence_path=$(run_test "${test_spec}" "${test_class}")
fi
if grep -q "BUILD SUCCESS" "${evidence_path}" 2>/dev/null; then
add_check_result "${test_spec}" "PASS" "${evidence_path}" ""
((passed_count++))
else
local details=$(tail -50 "${evidence_path}" 2>/dev/null || echo "无日志")
add_check_result "${test_spec}" "FAIL" "${evidence_path}" "${details}"
((failed_count++))
fi
done
# 添加后端构建检查
echo -e "\n${YELLOW}运行后端构建检查${NC}"
local build_exit_code=0
local build_log="${TMP_DIR}/maven-build.txt"
mvn -B clean compile -DskipTests 2>&1 | tee "${build_log}" || build_exit_code=$?
if [[ ${build_exit_code} -eq 0 ]]; then
add_check_result "Maven构建" "PASS" "${build_log}" ""
((passed_count++))
else
local details=$(tail -50 "${build_log}" 2>/dev/null || echo "无日志")
add_check_result "Maven构建" "FAIL" "${build_log}" "${details}"
((failed_count++))
fi
# 生成总结
cat >> "${REPORT_FILE}" << EOF
## 总结
- 通过: ${passed_count}
- 失败: ${failed_count}
- 生成时间: $(date '+%Y-%m-%d %H:%M:%S')
EOF
echo -e "\n${GREEN}====== 检查完成 ======${NC}"
echo -e "通过: ${GREEN}${passed_count}${NC}"
echo -e "失败: ${RED}${failed_count}${NC}"
echo -e "报告: ${REPORT_FILE}"
if [[ ${failed_count} -gt 0 ]]; then
exit 1
fi
exit 0
}
main "$@"

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
ARCHIVE_ROOT="${ROOT_DIR}/logs/archive"
OUTPUT_FILE="${ARCHIVE_ROOT}/README.md"
GENERATED_AT="$(date '+%Y-%m-%d %H:%M:%S %Z')"
fmt_epoch() {
local epoch="$1"
date -d "@${epoch}" '+%Y-%m-%d %H:%M:%S'
}
list_batches() {
find "${ARCHIVE_ROOT}" -mindepth 1 -maxdepth 1 -type d -printf '%f\n' | sort -r
}
if [[ ! -d "${ARCHIVE_ROOT}" ]]; then
mkdir -p "${ARCHIVE_ROOT}"
fi
{
echo "# 日志归档索引"
echo
echo "本文件由 \`scripts/ci/update-log-archive-index.sh\` 自动生成。"
echo
echo "- 生成时间: ${GENERATED_AT}"
echo "- 归档根目录: \`logs/archive/\`"
echo
mapfile -t batches < <(list_batches)
if [[ "${#batches[@]}" -eq 0 ]]; then
echo "当前没有可用归档批次。"
exit 0
fi
echo "## 批次总览"
echo
echo "| 批次 | 文件数 | 体积 | 最早文件时间 | 最晚文件时间 |"
echo "|---|---:|---:|---|---|"
for batch in "${batches[@]}"; do
batch_dir="${ARCHIVE_ROOT}/${batch}"
file_count="$(find "${batch_dir}" -type f | wc -l)"
size="$(du -sh "${batch_dir}" | awk '{print $1}')"
if [[ "${file_count}" -eq 0 ]]; then
earliest="-"
latest="-"
else
read -r earliest_epoch latest_epoch < <(
find "${batch_dir}" -type f -printf '%T@\n' | awk '
NR == 1 { min = $1; max = $1 }
{ if ($1 < min) min = $1; if ($1 > max) max = $1 }
END { printf "%d %d\n", min, max }
'
)
earliest="$(fmt_epoch "${earliest_epoch}")"
latest="$(fmt_epoch "${latest_epoch}")"
fi
echo "| \`${batch}\` | ${file_count} | ${size} | ${earliest} | ${latest} |"
done
echo
echo "## 子系统明细"
echo
for batch in "${batches[@]}"; do
batch_dir="${ARCHIVE_ROOT}/${batch}"
logs_dir="${batch_dir}/logs"
echo "### ${batch}"
echo
if [[ ! -d "${logs_dir}" ]]; then
echo "_未发现 \`logs/\` 子目录_"
echo
continue
fi
echo "| 子系统目录 | 文件数 | 体积 |"
echo "|---|---:|---:|"
while IFS= read -r subdir; do
sub_name="${subdir#${logs_dir}/}"
sub_count="$(find "${subdir}" -type f | wc -l)"
sub_size="$(du -sh "${subdir}" | awk '{print $1}')"
echo "| \`${sub_name}\` | ${sub_count} | ${sub_size} |"
done < <(find "${logs_dir}" -mindepth 1 -maxdepth 1 -type d | sort)
echo
done
} > "${OUTPUT_FILE}"
echo "[update-log-archive-index] generated ${OUTPUT_FILE}"

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="/home/long/project/蚊子"
STATE_DIR="$PROJECT_DIR/logs/e2e-automation"
CLAUDE_BIN="${CLAUDE_BIN:-$HOME/.cursor/extensions/anthropic.claude-code-2.1.15-linux-x64/resources/native-binary/claude}"
CONTRACT_CHECK="$PROJECT_DIR/scripts/validate_test_contracts.sh"
mkdir -p "$STATE_DIR"
TS="$(date '+%Y%m%d_%H%M%S')"
RUN_LOG="$STATE_DIR/run_${TS}.log"
REPORT_FILE="$STATE_DIR/report_${TS}.md"
LATEST_LINK="$STATE_DIR/latest_report.md"
cd "$PROJECT_DIR"
if [ ! -x "$CONTRACT_CHECK" ]; then
echo "[$(date '+%F %T')] [data-contract] missing executable: $CONTRACT_CHECK" | tee -a "$RUN_LOG"
exit 2
fi
export SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE:-e2e}"
if ! "$CONTRACT_CHECK" "$PROJECT_DIR" runner >> "$RUN_LOG" 2>&1; then
echo "[$(date '+%F %T')] [data-contract] preflight failed, aborting run" | tee -a "$RUN_LOG"
exit 2
fi
echo "[$(date '+%F %T')] runner start" | tee -a "$RUN_LOG"
PROMPT=$(cat <<'EOF'
你现在负责 /home/long/project/蚊子 的端到端测试优化闭环。
要求:
1) 运行并修复端到端测试,直到全部通过(若仓库存在前后端测试,也一并回归)
2) 你可以直接修改代码并执行必要命令
3) 如遇失败,持续迭代修复,不要停在计划阶段
4) 输出最终报告中文Markdown必须包含
- 是否“全部通过”(是/否)
- 执行命令清单
- 修改文件清单
- 测试结果摘要(通过/失败数量)
- 若未全部通过,明确阻塞项和下一步
EOF
)
"$CLAUDE_BIN" -p \
--permission-mode dontAsk \
--dangerously-skip-permissions \
"$PROMPT" > "$REPORT_FILE" 2>> "$RUN_LOG"
cp -f "$REPORT_FILE" "$LATEST_LINK"
if grep -Eq '全部通过[: ]*是|是否“全部通过”[: ]*是|全部通过\s*\(是\)' "$REPORT_FILE"; then
touch "$STATE_DIR/done.flag"
echo "[$(date '+%F %T')] done flag set" | tee -a "$RUN_LOG"
else
echo "[$(date '+%F %T')] run finished but not fully passed" | tee -a "$RUN_LOG"
fi
echo "[$(date '+%F %T')] runner end" | tee -a "$RUN_LOG"

41
scripts/e2e_kick.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="/home/long/project/蚊子"
STATE_DIR="$PROJECT_DIR/logs/e2e-automation"
PID_FILE="$STATE_DIR/runner.pid"
WATCHDOG_LOG="$STATE_DIR/watchdog.log"
RUNNER="$PROJECT_DIR/scripts/e2e_continuous_runner.sh"
CONTRACT_CHECK="$PROJECT_DIR/scripts/validate_test_contracts.sh"
mkdir -p "$STATE_DIR"
log(){ echo "[$(date '+%F %T')] [kick] $*" >> "$WATCHDOG_LOG"; }
if [ ! -x "$CONTRACT_CHECK" ]; then
log "data-contract checker missing: $CONTRACT_CHECK"
exit 2
fi
if ! SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE:-e2e}" "$CONTRACT_CHECK" "$PROJECT_DIR" preflight >> "$WATCHDOG_LOG" 2>&1; then
log "data-contract preflight failed; skip kick"
exit 2
fi
# 允许周期性复跑:清理完成标记,触发新一轮回归
if [ -f "$STATE_DIR/done.flag" ]; then
rm -f "$STATE_DIR/done.flag"
log "removed done.flag for scheduled rerun"
fi
if [ -f "$PID_FILE" ]; then
pid="$(cat "$PID_FILE" 2>/dev/null || true)"
if [ -n "${pid:-}" ] && kill -0 "$pid" 2>/dev/null; then
log "runner already running pid=$pid"
exit 0
fi
fi
nohup "$RUNNER" > "$STATE_DIR/nohup.out" 2>&1 &
new_pid=$!
echo "$new_pid" > "$PID_FILE"
log "runner kicked pid=$new_pid"

60
scripts/e2e_watchdog.sh Executable file
View File

@@ -0,0 +1,60 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="/home/long/project/蚊子"
STATE_DIR="$PROJECT_DIR/logs/e2e-automation"
PID_FILE="$STATE_DIR/runner.pid"
WATCHDOG_LOG="$STATE_DIR/watchdog.log"
RUNNER="$PROJECT_DIR/scripts/e2e_continuous_runner.sh"
CONTRACT_CHECK="$PROJECT_DIR/scripts/validate_test_contracts.sh"
mkdir -p "$STATE_DIR"
log() {
echo "[$(date '+%F %T')] $*" >> "$WATCHDOG_LOG"
}
if [ ! -x "$CONTRACT_CHECK" ]; then
log "data-contract checker missing: $CONTRACT_CHECK"
exit 2
fi
if ! SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE:-e2e}" "$CONTRACT_CHECK" "$PROJECT_DIR" preflight >> "$WATCHDOG_LOG" 2>&1; then
log "data-contract preflight failed; watchdog will not start/restart runner"
exit 2
fi
if [ -f "$STATE_DIR/done.flag" ]; then
log "done flag exists, watchdog idle"
exit 0
fi
if [ -f "$PID_FILE" ]; then
pid="$(cat "$PID_FILE" || true)"
if [ -n "${pid:-}" ] && kill -0 "$pid" 2>/dev/null; then
latest_log="$(ls -1t "$STATE_DIR"/run_*.log 2>/dev/null | head -n1 || true)"
if [ -n "$latest_log" ]; then
now=$(date +%s)
mtime=$(stat -c %Y "$latest_log" 2>/dev/null || echo "$now")
idle=$((now - mtime))
if [ "$idle" -gt 1800 ]; then
log "runner appears stuck (idle ${idle}s), restarting pid=$pid"
kill "$pid" 2>/dev/null || true
sleep 2
else
log "runner healthy pid=$pid idle=${idle}s"
exit 0
fi
else
log "runner pid=$pid, no run log yet"
exit 0
fi
else
log "stale pid file or process gone"
fi
fi
nohup "$RUNNER" > "$STATE_DIR/nohup.out" 2>&1 &
new_pid=$!
echo "$new_pid" > "$PID_FILE"
log "runner started pid=$new_pid"

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="/home/long/project/蚊子"
LOG_DIR="$PROJECT_DIR/logs"
SUP_LOG="$LOG_DIR/optimization_supervisor.log"
LOCK_FILE="/tmp/mosquito_optimization_supervisor.lock"
PRD_SCRIPT="$PROJECT_DIR/scripts/prd_review_cycle.sh"
E2E_KICK_SCRIPT="$PROJECT_DIR/scripts/e2e_kick.sh"
E2E_CONSISTENCY_SCRIPT="$PROJECT_DIR/scripts/check_e2e_consistency.sh"
DATA_CONTRACT_SCRIPT="$PROJECT_DIR/scripts/validate_test_contracts.sh"
PRD_REVIEW_DIR="$LOG_DIR/prd-review"
E2E_DIR="$LOG_DIR/e2e-automation"
mkdir -p "$LOG_DIR"
exec 9>"$LOCK_FILE"
if ! flock -n 9; then
echo "[$(date '+%F %T')] skip: supervisor already running" >> "$SUP_LOG"
exit 0
fi
log() { echo "[$(date '+%F %T')] $*" >> "$SUP_LOG"; }
now=$(date +%s)
if [ ! -x "$DATA_CONTRACT_SCRIPT" ]; then
log "data-contract checker missing: $DATA_CONTRACT_SCRIPT"
else
if SPRING_PROFILES_ACTIVE="${SPRING_PROFILES_ACTIVE:-e2e}" "$DATA_CONTRACT_SCRIPT" "$PROJECT_DIR" preflight >> "$SUP_LOG" 2>&1; then
log "data-contract preflight PASS"
else
log "data-contract preflight FAIL; skip e2e kick this round"
SKIP_E2E_KICK=1
fi
fi
latest_prd_apply="$(ls -1t "$PRD_REVIEW_DIR"/claude_apply_*.md 2>/dev/null | head -n1 || true)"
if [ -n "$latest_prd_apply" ]; then
prd_mtime=$(stat -c %Y "$latest_prd_apply" 2>/dev/null || echo 0)
prd_age=$((now - prd_mtime))
log "prd latest apply=$(basename "$latest_prd_apply") age=${prd_age}s"
if [ "$prd_age" -gt 15000 ]; then
log "prd stale (>15000s), triggering prd_review_cycle.sh"
nohup "$PRD_SCRIPT" >> "$LOG_DIR/prd_review_cycle.cron.log" 2>&1 &
fi
else
log "prd apply report missing, triggering first prd_review_cycle.sh"
nohup "$PRD_SCRIPT" >> "$LOG_DIR/prd_review_cycle.cron.log" 2>&1 &
fi
latest_e2e_report="$(ls -1t "$E2E_DIR"/report_*.md 2>/dev/null | head -n1 || true)"
if [ -n "$latest_e2e_report" ]; then
e2e_mtime=$(stat -c %Y "$latest_e2e_report" 2>/dev/null || echo 0)
e2e_age=$((now - e2e_mtime))
log "e2e latest report=$(basename "$latest_e2e_report") age=${e2e_age}s"
if [ "$e2e_age" -gt 15000 ]; then
log "e2e stale (>15000s), triggering e2e_kick.sh"
if [ "${SKIP_E2E_KICK:-0}" = "1" ]; then
log "skip e2e kick due to failed data-contract preflight"
else
"$E2E_KICK_SCRIPT" || true
fi
fi
else
log "e2e report missing, triggering e2e_kick.sh"
if [ "${SKIP_E2E_KICK:-0}" = "1" ]; then
log "skip e2e kick due to failed data-contract preflight"
else
"$E2E_KICK_SCRIPT" || true
fi
fi
if [ -x "$E2E_CONSISTENCY_SCRIPT" ]; then
CONSISTENCY_OUT="$E2E_DIR/consistency_latest.md"
if "$E2E_CONSISTENCY_SCRIPT" "$CONSISTENCY_OUT"; then
log "e2e consistency PASS: $CONSISTENCY_OUT"
else
log "e2e consistency FAIL: $CONSISTENCY_OUT"
fi
else
log "e2e consistency script missing: $E2E_CONSISTENCY_SCRIPT"
fi
log "supervisor done"

View File

@@ -0,0 +1,48 @@
# 权限码一致性校验报告
生成时间: 2026-03-22 21:33:26
## 四维统计
| 来源 | 权限码数量 |
|------|------------|
| Canonical基线 | 90 |
| 前端 | 94 |
| 数据库 | 90 |
| 后端 | 94 |
## Canonical基线覆盖率
| 维度 | 缺失数量 | 说明 |
|------|----------|------|
| 前端缺失 | 0 | Canonical基线在前端未定义 |
| 数据库缺失 | 0 | Canonical基线在数据库未导入 |
| 后端缺失 | 0 | Canonical基线在后端未使用 |
## 额外权限码分析不在Canonical基线中
### 前端独有权限码 (不在Canonical基线中): 4
user.points.adjust.ALL
user.points.view.ALL
user.whitelist.add.ALL
user.whitelist.remove.ALL
### 数据库独有权限码 (不在Canonical基线中): 0
### 后端独有权限码 (不在Canonical基线中): 4
user.points.adjust.ALL
user.points.view.ALL
user.whitelist.add.ALL
user.whitelist.remove.ALL
## Canonical基线缺失项
### 前端未覆盖Canonical基线 (0)
### 数据库未导入Canonical基线 (0)
### 后端未实现Canonical基线 (0)

156
scripts/prd_review_cycle.sh Executable file
View File

@@ -0,0 +1,156 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="/home/long/project/蚊子"
REPORT_DIR="$PROJECT_DIR/logs/prd-review"
RUN_LOG="$PROJECT_DIR/logs/prd_review_cycle.log"
LOCK_FILE="/tmp/mosquito_prd_review_cycle.lock"
PID_FILE="$PROJECT_DIR/logs/prd-review/cycle.pid"
FORCE_RERUN="${FORCE_RERUN:-0}"
STALE_SECONDS="${STALE_SECONDS:-10800}"
CLAUDE_BIN="$HOME/.cursor/extensions/anthropic.claude-code-2.1.15-linux-x64/resources/native-binary/claude"
EVIDENCE_CHECK_SCRIPT="$PROJECT_DIR/scripts/verify_review_evidence.sh"
# cron环境下PATH通常不完整显式补齐nvm/codex
export HOME="${HOME:-/home/long}"
export NVM_DIR="${NVM_DIR:-$HOME/.nvm}"
export PATH="$HOME/.local/bin:$HOME/bin:/usr/local/bin:/usr/bin:/bin:$PATH"
if ! command -v codex >/dev/null 2>&1; then
if [ -s "$NVM_DIR/nvm.sh" ]; then
# shellcheck disable=SC1090
. "$NVM_DIR/nvm.sh" >/dev/null 2>&1 || true
nvm use --silent 20 >/dev/null 2>&1 || true
nvm use --silent default >/dev/null 2>&1 || true
fi
fi
CODEX_BIN="$(command -v codex || true)"
if [ -z "$CODEX_BIN" ] && [ -x "$HOME/.nvm/versions/node/v20.19.0/bin/codex" ]; then
CODEX_BIN="$HOME/.nvm/versions/node/v20.19.0/bin/codex"
fi
mkdir -p "$REPORT_DIR" "$PROJECT_DIR/logs"
if [ -f "$PID_FILE" ]; then
prev_pid="$(cat "$PID_FILE" 2>/dev/null || true)"
if [ -n "${prev_pid:-}" ] && kill -0 "$prev_pid" 2>/dev/null; then
now_ts=$(date +%s)
pid_age=$((now_ts - $(stat -c %Y "$PID_FILE" 2>/dev/null || echo "$now_ts")))
if [ "$FORCE_RERUN" = "1" ] || [ "$pid_age" -gt "$STALE_SECONDS" ]; then
echo "[$(date '+%F %T')] force/stale rerun: killing previous pid=$prev_pid age=${pid_age}s" >> "$RUN_LOG"
kill "$prev_pid" 2>/dev/null || true
sleep 2
fi
fi
fi
exec 9>"$LOCK_FILE"
if ! flock -n 9; then
echo "[$(date '+%F %T')] skip: previous cycle still running" >> "$RUN_LOG"
exit 0
fi
echo $$ > "$PID_FILE"
trap 'rm -f "$PID_FILE"' EXIT
cd "$PROJECT_DIR"
TIMESTAMP="$(date '+%Y%m%d_%H%M%S')"
REPORT_FILE="$REPORT_DIR/review_${TIMESTAMP}.md"
LATEST_REPORT="$REPORT_DIR/latest_review.md"
CLAUDE_SUMMARY="$REPORT_DIR/claude_apply_${TIMESTAMP}.md"
PRD_FILES=()
for f in "$PROJECT_DIR/docs/PRD.md" "$PROJECT_DIR/docs/prd"/*.md; do
if [ -f "$f" ]; then
PRD_FILES+=("$f")
fi
done
if [ ${#PRD_FILES[@]} -eq 0 ]; then
echo "[$(date '+%F %T')] error: no PRD files found" >> "$RUN_LOG"
exit 1
fi
PRD_LIST=$(printf '%s\n' "${PRD_FILES[@]}")
CODEX_PROMPT=$(cat <<EOF
你是严格的项目评审官。请在仓库 $PROJECT_DIR 执行全面review并以 PRD 为基准进行差距分析。
PRD文件如下
$PRD_LIST
输出要求中文Markdown
1. 总体结论是否满足最新PRD
2. PRD事项对照矩阵事项/当前状态/证据文件路径/优先级)
3. 未完成清单P0/P1/P2
4. 给Claude可直接执行的优化任务清单按顺序包含具体文件与验收标准
5. 建议执行命令构建、测试、e2e
请务必面向“可执行落地”,不要空泛建议。
EOF
)
if [ -z "$CODEX_BIN" ]; then
echo "[$(date '+%F %T')] error: codex binary not found (PATH=$PATH)" >> "$RUN_LOG"
exit 1
fi
echo "[$(date '+%F %T')] cycle start: generating codex report with $CODEX_BIN" >> "$RUN_LOG"
"$CODEX_BIN" exec \
--cd "$PROJECT_DIR" \
--dangerously-bypass-approvals-and-sandbox \
--output-last-message "$REPORT_FILE" \
"$CODEX_PROMPT" >> "$RUN_LOG" 2>&1
if [ ! -s "$REPORT_FILE" ]; then
echo "[$(date '+%F %T')] error: codex report is empty: $REPORT_FILE" >> "$RUN_LOG"
exit 1
fi
cp -f "$REPORT_FILE" "$LATEST_REPORT"
echo "[$(date '+%F %T')] codex report ready: $REPORT_FILE" >> "$RUN_LOG"
CLAUDE_PROMPT=$(cat <<EOF
请读取并执行该评审报告中的优化任务:
$REPORT_FILE
要求:
1) 按优先级执行修复优先完成P0/P1
2) 可以直接修改代码并运行必要命令(含后端测试、前端测试、端到端测试)
3) 若修复后仍有失败,继续迭代直到通过或给出明确阻塞原因
4) 输出最终摘要执行命令、修改文件、测试结果、剩余未完成PRD项
5) 必须输出“证据矩阵”,每条包含:命令 | 退出码 | 原始日志路径 | 结果结论
6) 所有结论都要附可追溯日志路径(如 logs/*.log、target/surefire-reports/*.xml
若无法提供可追溯证据,必须明确写“证据不足-本轮无效”。
请直接开始执行,不要只给计划。
EOF
)
echo "[$(date '+%F %T')] cycle continue: running claude apply" >> "$RUN_LOG"
"$CLAUDE_BIN" -p \
--permission-mode dontAsk \
--dangerously-skip-permissions \
"$CLAUDE_PROMPT" > "$CLAUDE_SUMMARY" 2>> "$RUN_LOG"
if [ ! -s "$CLAUDE_SUMMARY" ]; then
echo "[$(date '+%F %T')] error: claude summary is empty: $CLAUDE_SUMMARY" >> "$RUN_LOG"
exit 1
fi
if [ -x "$EVIDENCE_CHECK_SCRIPT" ]; then
if "$EVIDENCE_CHECK_SCRIPT" "$CLAUDE_SUMMARY" "$PROJECT_DIR" >> "$RUN_LOG" 2>&1; then
echo "[$(date '+%F %T')] evidence gate pass" >> "$RUN_LOG"
else
echo "[$(date '+%F %T')] error: evidence gate failed, mark cycle invalid" >> "$RUN_LOG"
exit 1
fi
else
echo "[$(date '+%F %T')] warn: evidence checker missing: $EVIDENCE_CHECK_SCRIPT" >> "$RUN_LOG"
exit 1
fi
echo "[$(date '+%F %T')] cycle done: $CLAUDE_SUMMARY" >> "$RUN_LOG"

View File

@@ -0,0 +1,164 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_DIR="${1:-/home/long/project/蚊子}"
MODE="${2:-preflight}"
failures=()
warnings=()
fail() {
failures+=("$*")
}
warn() {
warnings+=("$*")
}
require_file() {
local file="$1"
local hint="$2"
if [ ! -f "$file" ]; then
fail "缺少文件: $file$hint"
fi
}
require_contains() {
local file="$1"
local regex="$2"
local hint="$3"
if ! grep -Eq "$regex" "$file"; then
fail "文件契约不满足: $file$hint"
fi
}
require_non_empty_kv() {
local file="$1"
local key="$2"
local hint="$3"
local line
line="$(grep -E "^${key}=" "$file" | tail -n1 || true)"
if [ -z "$line" ]; then
fail "缺少配置项: ${key}$hint"
return
fi
local value="${line#*=}"
if [ -z "${value// /}" ]; then
fail "配置项为空: ${key}$hint"
fi
}
E2E_PROPS="$PROJECT_DIR/src/main/resources/application-e2e.properties"
TEST_PROPS="$PROJECT_DIR/src/test/resources/application.properties"
TEST_YML="$PROJECT_DIR/src/main/resources/application-test.yml"
MIGRATION_DIR="$PROJECT_DIR/src/main/resources/db/migration"
STATE_DIR="$PROJECT_DIR/logs/e2e-automation"
require_file "$E2E_PROPS" "E2E环境配置"
require_file "$TEST_PROPS" "测试资源配置"
require_file "$TEST_YML" "test profile配置"
require_file "$MIGRATION_DIR/V26__Seed_roles_permissions.sql" "权限seed必须存在"
require_file "$MIGRATION_DIR/V37__Seed_user_roles.sql" "用户角色seed必须存在"
if [ -f "$E2E_PROPS" ]; then
require_contains "$E2E_PROPS" '^spring\.datasource\.url=jdbc:h2:mem:' 'E2E必须使用内存库避免污染真实环境'
require_contains "$E2E_PROPS" '^spring\.flyway\.enabled=false' 'E2E依赖JPA自动建表时需关闭Flyway'
require_contains "$E2E_PROPS" '^mosquito\.security\.csrf\.enabled=false' 'E2E接口测试需要关闭CSRF'
require_contains "$E2E_PROPS" '^mosquito\.callback\.allow-localhost=true' 'E2E回调需允许localhost'
require_non_empty_kv "$E2E_PROPS" 'mosquito.security.jwt.secret' '鉴权token签名密钥必填'
fi
if [ -f "$TEST_PROPS" ]; then
require_contains "$TEST_PROPS" '^spring\.datasource\.url=jdbc:h2:mem:' '单测必须使用H2内存库'
require_contains "$TEST_PROPS" '^spring\.jpa\.hibernate\.ddl-auto=create-drop' '测试环境要求自动建删表'
fi
if [ -f "$MIGRATION_DIR/V37__Seed_user_roles.sql" ]; then
require_contains "$MIGRATION_DIR/V37__Seed_user_roles.sql" 'INSERT INTO sys_user_role' '必须初始化用户角色关联,避免鉴权假绿'
fi
if [ -f "$MIGRATION_DIR/V26__Seed_roles_permissions.sql" ]; then
require_contains "$MIGRATION_DIR/V26__Seed_roles_permissions.sql" 'INSERT INTO sys_role ' '必须初始化角色数据'
require_contains "$MIGRATION_DIR/V26__Seed_roles_permissions.sql" 'INSERT INTO sys_permission ' '必须初始化权限数据'
fi
if [ -n "${SPRING_PROFILES_ACTIVE:-}" ]; then
if echo "$SPRING_PROFILES_ACTIVE" | grep -Eqi '(^|,)(prod|production)(,|$)'; then
fail "检测到危险profile: SPRING_PROFILES_ACTIVE=$SPRING_PROFILES_ACTIVE禁止在测试链路使用prod"
fi
else
warn "未设置 SPRING_PROFILES_ACTIVE建议runner显式设为e2e"
fi
CLAUDE_BIN_DEFAULT="$HOME/.cursor/extensions/anthropic.claude-code-2.1.15-linux-x64/resources/native-binary/claude"
CLAUDE_BIN_EFFECTIVE="${CLAUDE_BIN:-$CLAUDE_BIN_DEFAULT}"
if [ "$MODE" = "runner" ]; then
if [ ! -x "$CLAUDE_BIN_EFFECTIVE" ]; then
fail "CLAUDE_BIN不可执行: $CLAUDE_BIN_EFFECTIVErunner无法产出报告"
fi
fi
mkdir -p "$STATE_DIR" 2>/dev/null || fail "无法创建状态目录: $STATE_DIR"
if [ ! -w "$STATE_DIR" ]; then
fail "状态目录不可写: $STATE_DIR"
fi
# ============================================================
# E2E严格断言检查MOSQ-P1-001
# 检查 user-journey*.spec.ts 是否存在宽松断言模式
# ============================================================
E2E_TEST_DIR="$PROJECT_DIR/frontend/e2e/tests"
check_e2e_strict_assertions() {
local spec_files=("$E2E_TEST_DIR"/user-journey*.spec.ts)
local found_issues=0
for spec_file in "${spec_files[@]}"; do
if [ ! -f "$spec_file" ]; then
continue
fi
# 检查是否存在宽松断言expect([200, 401, 403]) 这种模式
# 严格模式应该是expect(status).toBeGreaterThanOrEqual(200) 或类似
if grep -q 'expect(\[200, 401' "$spec_file" 2>/dev/null; then
fail "E2E严格断言违规: $spec_file 包含宽松断言 expect([200, 401, 403])"
found_issues=$((found_issues + 1))
fi
if grep -q 'expect(\[200, 201, 401' "$spec_file" 2>/dev/null; then
fail "E2E严格断言违规: $spec_file 包含宽松断言 expect([200, 201, 401, 403])"
found_issues=$((found_issues + 1))
fi
# 检查是否存在 hasRealApiCredentials 函数调用
if ! grep -q 'hasRealApiCredentials' "$spec_file" 2>/dev/null; then
warn "E2E建议: $spec_file 未使用 hasRealApiCredentials 函数"
fi
# 检查是否存在 test.skip 保护
if ! grep -q 'test\.skip' "$spec_file" 2>/dev/null; then
warn "E2E建议: $spec_file 未使用 test.skip 进行条件跳过"
fi
done
return $found_issues
}
if [ -d "$E2E_TEST_DIR" ]; then
check_e2e_strict_assertions || true
fi
if [ ${#warnings[@]} -gt 0 ]; then
for w in "${warnings[@]}"; do
echo "[WARN] $w"
done
fi
if [ ${#failures[@]} -gt 0 ]; then
echo "[DATA-CONTRACT] FAIL (${#failures[@]}项)"
for f in "${failures[@]}"; do
echo " - $f"
done
exit 2
fi
echo "[DATA-CONTRACT] PASS mode=$MODE"

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -euo pipefail
SUMMARY_FILE="${1:?summary file required}"
PROJECT_DIR="${2:-/home/long/project/蚊子}"
[ -s "$SUMMARY_FILE" ] || { echo "summary missing/empty" >&2; exit 2; }
# 1) 必须包含结构化字段
for k in "执行命令" "修改文件" "测试结果" "剩余未完成"; do
grep -q "$k" "$SUMMARY_FILE" || { echo "missing section: $k" >&2; exit 3; }
done
# 2) 至少有3条命令证据bash代码块内或行内命令
cmd_count=$(grep -E "(mvn|npm|npx|pnpm|yarn|pytest|go test|gradle|playwright)" "$SUMMARY_FILE" | wc -l | tr -d ' ')
if [ "${cmd_count:-0}" -lt 3 ]; then
echo "insufficient command evidence: $cmd_count" >&2
exit 4
fi
# 3) 提取日志路径并验证至少2个真实存在
# 允许绝对路径,或相对 logs/ target/ frontend/**/test-results/
mapfile -t paths < <(grep -Eo '/[^ )`"]+\.(log|txt|md|xml|json)|((logs|target|frontend)/[^ )`"]+\.(log|txt|md|xml|json))' "$SUMMARY_FILE" | sort -u)
exists=0
for p in "${paths[@]:-}"; do
if [[ "$p" = /* ]]; then
fp="$p"
else
fp="$PROJECT_DIR/$p"
fi
if [ -f "$fp" ]; then
exists=$((exists+1))
fi
done
if [ "$exists" -lt 2 ]; then
echo "insufficient raw log path evidence: $exists" >&2
exit 5
fi
echo "evidence ok: cmd_count=$cmd_count existing_logs=$exists"