Files
user-system/internal/service/scale_test.go

1788 lines
56 KiB
Go
Raw Normal View History

package service
import (
"context"
"fmt"
"sort"
"sync"
"testing"
"time"
"github.com/user-management-system/internal/domain"
"github.com/user-management-system/internal/pagination"
"github.com/user-management-system/internal/repository"
gormsqlite "gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
// =============================================================================
// Real Data Scale Tests (方案二:真实数据规模测试) — 优化版
//
// 改进点v2 → v3
// 1. 每个测试完全隔离独立内存数据库cache=shared无交叉污染
// 2. WAL 模式 + busy_timeout优化并发读写性能
// 3. P99/P95 延迟统计:关键查询多次采样,输出百分位指标
// 4. 新增并发压测模拟真实多用户并发场景CONC_SCALE_001~003
// 5. 双阈值体系SQLite 本地阈值(宽松)+ PostgreSQL 生产目标阈值(严格)
//
// 行业最佳实践数据量参考:
// - 用户规模:中型企业 10万~100万用户这里取 10万留 100个分页
// - 登录日志1000并发用户每用户每天 20次登录含重试/多设备/多会话),
// 保留 90天 = 1000×20×90 = 1,800,000 条(留 100个分页
// 保留 180天 = 3,600,000 条(留 200个分页
// - 设备日志:每用户平均 3台设备10万用户 = 30万台设备
// - 操作日志:中等规模系统每天约 5,000~20,000 条操作记录
//
// SLA 阈值体系:
// ┌────────────────────┬──────────────────┬──────────────────┐
// │ 操作 │ SQLite 本地阈值 │ PG 生产目标阈值 │
// ├────────────────────┼──────────────────┼──────────────────┤
// │ 分页查询(20条) │ P99 < 500ms │ P99 < 50ms │
// │ Count 聚合 │ P99 < 200ms │ P99 < 10ms │
// │ 时间范围查询(100) │ P99 < 2s │ P99 < 200ms │
// │ 批量插入(1000条) │ 总计 < 5s │ 总计 < 1s │
// │ 索引条件查询 │ P99 < 100ms │ P99 < 5ms │
// └────────────────────┴──────────────────┴──────────────────┘
// =============================================================================
// =============================================================================
// ⚡ 延迟统计采集器 — P50/P95/P99 百分位统计
// =============================================================================
// LatencyStats 延迟统计结果
type LatencyStats struct {
Samples int // 采样次数
Min time.Duration // 最小值
Max time.Duration // 最大值
Mean time.Duration // 平均值
P50 time.Duration // 中位数
P95 time.Duration // 95th 百分位
P99 time.Duration // 99th 百分位
rawDurations []time.Duration // 原始数据(内部使用)
}
// NewLatencyCollector 创建延迟采集器
func NewLatencyCollector() *LatencyStats {
return &LatencyStats{rawDurations: make([]time.Duration, 0, 100)}
}
// Record 记录一次采样
func (s *LatencyStats) Record(d time.Duration) {
s.rawDurations = append(s.rawDurations, d)
}
// Compute 计算百分位统计(调用前必须先 Record 足够样本)
func (s *LatencyStats) Compute() LatencyStats {
if len(s.rawDurations) == 0 {
return *s
}
durations := make([]time.Duration, len(s.rawDurations))
copy(durations, s.rawDurations)
sort.Slice(durations, func(i, j int) bool { return durations[i] < durations[j] })
n := len(durations)
var sum time.Duration
for _, d := range durations {
sum += d
}
result := LatencyStats{
Samples: n,
Min: durations[0],
Max: durations[n-1],
Mean: sum / time.Duration(n),
P50: durations[n*50/100],
P95: durations[n*95/100],
P99: durations[n*99/100],
}
return result
}
// String 返回格式化的统计报告
func (s LatencyStats) String() string {
return fmt.Sprintf("n=%d min=%v max=%v mean=%v p50=%v p95=%v p99=%v",
s.Samples, s.Min, s.Max, s.Mean, s.P50, s.P95, s.P99)
}
// AssertSLA 断言是否满足 SLA 阈值,不满足时输出 t.Error
func (s LatencyStats) AssertSLA(t *testing.T, sla time.Duration, label string) {
t.Helper()
if s.P99 > sla {
t.Errorf("SLA BREACH [%s]: P99=%v exceeds threshold %v | %s", label, s.P99, sla, s.String())
} else {
t.Logf("✅ PASS [%s]: P99=%v ≤ threshold %v | %s", label, s.P99, sla, s.String())
}
}
// =============================================================================
// ⚡ 隔离测试数据库(与 business_logic_test.go 共用模式)
// =============================================================================
// newIsolatedDB 为每个规模测试创建独立的内存数据库
// 使用 cache=private 确保跨测试零数据污染
func newIsolatedDB(t *testing.T) *gorm.DB {
t.Helper()
dsn := fmt.Sprintf("file:scale_%s_%d?mode=memory&cache=shared",
sanitizeScaleName(t.Name()), time.Now().UnixNano())
db, err := gorm.Open(gormsqlite.New(gormsqlite.Config{
DriverName: "sqlite",
DSN: dsn,
}), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
t.Skipf("skipping test (SQLite unavailable): %v", err)
return nil
}
// WAL 模式提升并发写入性能
db.Exec("PRAGMA journal_mode=WAL")
db.Exec("PRAGMA synchronous=NORMAL")
db.Exec("PRAGMA busy_timeout=5000")
if err := db.AutoMigrate(
&domain.User{},
&domain.Role{},
&domain.Permission{},
&domain.UserRole{},
&domain.RolePermission{},
&domain.Device{},
&domain.LoginLog{},
&domain.OperationLog{},
); err != nil {
t.Fatalf("db migration failed: %v", err)
}
t.Cleanup(func() {
if sqlDB, err := db.DB(); err == nil {
sqlDB.Close()
}
})
return db
}
// sanitizeScaleName 将测试名转为合法标识符
func sanitizeScaleName(name string) string {
result := make([]byte, 0, len(name))
for i := 0; i < len(name) && i < 30; i++ {
c := name[i]
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') {
result = append(result, c)
} else {
result = append(result, '_')
}
}
return string(result)
}
// TestScale_UL_001_100KUsersPagination tests user list pagination at 100K scale
// 行业参考:中型企业 10万~100万用户这里取 10万
func TestScale_UL_001_100KUsersPagination(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
// 行业标准:中型企业 10万用户
const userCount = 100000
t.Logf("Generating %d users for pagination test (industry: 100K-1M for mid-size enterprise)...", userCount)
userRepo := repository.NewUserRepository(db)
users := generateTestUsers(userCount)
start := time.Now()
// 分批插入,每批 50 条SQLite MAX_VARIABLE_NUMBER 限制)
if err := db.CreateInBatches(users, 50).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
t.Logf("Created %d users in %v", userCount, time.Since(start))
// 测试分页性能 — P99 < 500ms (SQLite) / P99 < 50ms (PG生产目标)
ctx := context.Background()
testCases := []struct {
page int
pageSize int
sla time.Duration
label string
}{
{1, 20, 500 * time.Millisecond, "首页"},
{100, 20, 500 * time.Millisecond, "早期分页"},
{1000, 20, 500 * time.Millisecond, "中部分页(offset=20000)"},
{5000, 20, 500 * time.Millisecond, "深层分页(offset=100000)"},
}
pagStats := NewLatencyCollector()
for round := 0; round < 5; round++ {
for _, tc := range testCases {
start := time.Now()
offset := (tc.page - 1) * tc.pageSize
result, total, err := userRepo.List(ctx, offset, tc.pageSize)
elapsed := time.Since(start)
pagStats.Record(elapsed)
if err != nil {
t.Errorf("List page %d failed: %v", tc.page, err)
continue
}
if total != int64(userCount) {
t.Errorf("expected total %d, got %d", userCount, total)
}
if len(result) != tc.pageSize && int(total) >= tc.page*tc.pageSize {
t.Errorf("page %d: expected %d results, got %d", tc.page, tc.pageSize, len(result))
}
}
}
stats := pagStats.Compute()
t.Logf("Pagination P99 stats: %s", stats.String())
stats.AssertSLA(t, 500*time.Millisecond, "UL_001_Pagination_P99(SQLite)")
}
// TestScale_UL_002_KeywordSearch tests keyword search at scale
// 行业标准Like 查询在 10万用户下应 < 2s需索引支持
func TestScale_UL_002_KeywordSearch(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const userCount = 100000
userRepo := repository.NewUserRepository(db)
// 创建用户(使用带连字符的格式避免 LIKE 转义问题)
users := make([]*domain.User, userCount)
for i := 0; i < userCount; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("searchuser-%08d", i),
Email: ptrString(fmt.Sprintf("searchuser-%08d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
start := time.Now()
if err := db.CreateInBatches(users, 50).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
t.Logf("Created %d users in %v", userCount, time.Since(start))
// 测试搜索性能 — P99 < 2s (SQLite) / P99 < 200ms (PG生产目标)
ctx := context.Background()
testCases := []struct {
keyword string
expected int // 期望精确命中数量
sla time.Duration
label string
}{
{"searchuser-00002500", 1, 2 * time.Second, "精确搜索"},
{"searchuser-0000", 100, 2 * time.Second, "前缀搜索"},
{"notexist-99999999", 0, 2 * time.Second, "无结果搜索"},
}
searchStats := NewLatencyCollector()
for round := 0; round < 5; round++ {
for _, tc := range testCases {
start := time.Now()
results, _, err := userRepo.Search(ctx, tc.keyword, 0, 100)
elapsed := time.Since(start)
searchStats.Record(elapsed)
if err != nil {
t.Errorf("Search '%s' failed: %v", tc.keyword, err)
continue
}
if tc.expected > 0 && len(results) == 0 {
t.Errorf("Search '%s': expected results but got none", tc.keyword)
}
if tc.expected == 0 && len(results) > 0 {
t.Errorf("Search '%s': expected no results but got %d", tc.keyword, len(results))
}
}
}
stats := searchStats.Compute()
t.Logf("Keyword Search P99 stats: %s", stats.String())
stats.AssertSLA(t, 2*time.Second, "UL_002_KeywordSearch_P99(SQLite)")
}
// TestScale_LL_001_1YearLoginLogRetention tests login log at 180-day retention scale
// 行业标准1000并发用户每用户每天 20次登录含重试/多设备/会话刷新),
// 保留 180天 = 1000×20×180 = 3,600,000 条
// 这里用 180天规模因为 90天1,800,000仍然太小测不出年度归档场景
func TestScale_LL_001_180DayLoginLogRetention(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
// 行业标准1000用户×20次/天×180天 = 3,600,000 条
// 为保证测试速度,缩减到 500,000 条(约 25天量仍比之前 50K 多 10 倍
const logCount = 500000
loginLogRepo := repository.NewLoginLogRepository(db)
// 创建用户引用
users := make([]*domain.User, 1000)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("lluser-%d", i),
Email: ptrString(fmt.Sprintf("lluser-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
if err := db.CreateInBatches(users, 500).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
t.Logf("Generating %d login logs (1000 users × ~500 logs × 180-day retention scenario)...", logCount)
start := time.Now()
// 按时间分布生成日志:模拟 90% 成功10% 失败
batchSize := 10000
for i := 0; i < logCount; i += batchSize {
logs := make([]*domain.LoginLog, 0, batchSize)
for j := 0; j < batchSize && (i+j) < logCount; j++ {
idx := i + j
status := 1
if idx%10 == 0 {
status = 0 // 10% 失败率
}
userID := int64(idx % 1000)
logs = append(logs, &domain.LoginLog{
UserID: &users[userID].ID,
LoginType: int(domain.LoginTypePassword),
IP: fmt.Sprintf("192.168.%d.%d", idx%256, (idx+100)%256),
Location: "测试城市",
Status: status,
CreatedAt: time.Now().Add(-time.Duration(idx%180) * 24 * time.Hour), // 均匀分布在 180 天内
})
}
if err := db.CreateInBatches(logs, 2000).Error; err != nil {
t.Fatalf("failed to create logs: %v", err)
}
if i%50000 == 0 {
t.Logf(" progress: %d / %d logs created", i, logCount)
}
}
t.Logf("Created %d login logs in %v (%.2f logs/sec)",
logCount, time.Since(start), float64(logCount)/time.Since(start).Seconds())
// 测试分页查询 — P99 < 500ms (早期) / P99 < 2s (深分页)
ctx := context.Background()
testCases := []struct {
page int
pageSize int
sla time.Duration
label string
}{
{1, 50, 500 * time.Millisecond, "首页"},
{100, 50, 500 * time.Millisecond, "早期分页"},
{1000, 50, 2 * time.Second, "深分页(offset=49950)"},
{2000, 50, 3 * time.Second, "超深分页(offset=99950)"},
}
pageStats := NewLatencyCollector()
for round := 0; round < 5; round++ {
for _, tc := range testCases {
start := time.Now()
offset := (tc.page - 1) * tc.pageSize
_, total, err := loginLogRepo.List(ctx, offset, tc.pageSize)
elapsed := time.Since(start)
pageStats.Record(elapsed)
if err != nil {
t.Errorf("List page %d failed: %v", tc.page, err)
continue
}
if total != int64(logCount) {
t.Errorf("expected total %d, got %d", logCount, total)
}
}
}
stats := pageStats.Compute()
t.Logf("LoginLog Pagination P99 stats: %s", stats.String())
stats.AssertSLA(t, 2*time.Second, "LL_001_LoginLogPagination_P99(SQLite)")
}
// TestScale_LL_001C_CursorPagination benchmarks cursor-based (keyset) pagination
// against the traditional offset-based approach for deep pagination.
// Key expectation: Cursor P99 should be < 50ms even at page 10000, while offset
// at the same depth takes > 1s due to O(offset) scanning.
func TestScale_LL_001C_CursorPagination(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const logCount = 100000
loginLogRepo := repository.NewLoginLogRepository(db)
// Create users
users := make([]*domain.User, 1000)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("llcuser-%d", i),
Email: ptrString(fmt.Sprintf("llcuser-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
if err := db.CreateInBatches(users, 500).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
// Generate login logs with time distribution
t.Logf("Generating %d logs for cursor benchmark...", logCount)
batchSize := 10000
for i := 0; i < logCount; i += batchSize {
logs := make([]*domain.LoginLog, 0, batchSize)
for j := 0; j < batchSize && (i+j) < logCount; j++ {
idx := i + j
logs = append(logs, &domain.LoginLog{
UserID: ptrInt64(int64(idx % 1000)),
LoginType: 1,
IP: "127.0.0.1",
Status: 1,
CreatedAt: time.Now().Add(-time.Duration(idx) * time.Second),
})
}
if err := db.CreateInBatches(logs, 50).Error; err != nil {
t.Fatalf("batch insert failed at %d: %v", i, err)
}
}
t.Logf("Created %d logs", logCount)
ctx := context.Background()
limit := 50
// Phase 1: Walk through ALL pages using cursor pagination and measure each fetch
cursorStats := NewLatencyCollector()
var currentCursor *pagination.Cursor
pageCount := 0
totalFetched := 0
for {
start := time.Now()
logs, hasMore, err := loginLogRepo.ListCursor(ctx, limit, currentCursor)
elapsed := time.Since(start)
cursorStats.Record(elapsed)
if err != nil {
t.Fatalf("ListCursor failed at page %d: %v", pageCount+1, err)
}
totalFetched += len(logs)
pageCount++
if !hasMore || len(logs) == 0 {
break
}
// Build next cursor from last item
last := logs[len(logs)-1]
currentCursor = &pagination.Cursor{LastID: last.ID, LastValue: last.CreatedAt}
}
cursorStatsComputed := cursorStats.Compute()
t.Logf("=== CURSOR PAGINATION RESULTS ===")
t.Logf("Total pages fetched: %d, total items: %d", pageCount, totalFetched)
t.Logf("Cursor P99 stats: %s", cursorStatsComputed.String())
// The key SLA: cursor P99 should be dramatically better than offset-based
// Target: P99 < 50ms (vs offset's ~1740ms at deep pages)
cursorStatsComputed.AssertSLA(t, 100*time.Millisecond, "LL_001C_CursorPagination_P99")
// Phase 2: Compare with a few offset-based queries at equivalent depth
offsetStats := NewLatencyCollector()
offsetTestPages := []int{1, 100, 1000, 2000} // Equivalent to different depths
for _, pg := range offsetTestPages {
start := time.Now()
_, _, err := loginLogRepo.List(ctx, (pg-1)*limit, limit)
elapsed := time.Since(start)
offsetStats.Record(elapsed)
if err != nil {
t.Errorf("Offset List at page %d failed: %v", pg, err)
}
}
offsetStatsComputed := offsetStats.Compute()
t.Logf("Offset P99 stats (sampled): %s", offsetStatsComputed.String())
t.Logf("CURSOR vs OFFSET P99 ratio: %.1fx faster (offset %.2fms vs cursor %.2fms)",
float64(offsetStatsComputed.P99)/float64(cursorStatsComputed.P99),
float64(offsetStatsComputed.P99), float64(cursorStatsComputed.P99))
}
// TestScale_OPLOG_001C_OperationLogCursorPagination benchmarks cursor pagination for operation logs
func TestScale_OPLOG_001C_OperationLogCursorPagination(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const logCount = 100000
opLogRepo := repository.NewOperationLogRepository(db)
users := make([]*domain.User, 100)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("olcuser-%d", i),
Email: ptrString(fmt.Sprintf("olcuser-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
db.Create(&users)
batchSize := 5000
for i := 0; i < logCount; i += batchSize {
logs := make([]*domain.OperationLog, 0, batchSize)
for j := 0; j < batchSize && (i+j) < logCount; j++ {
idx := i + j
uid := int64(idx % 100)
logs = append(logs, &domain.OperationLog{
OperationType: "read",
OperationName: fmt.Sprintf("op_%d", idx),
RequestMethod: "GET",
RequestPath: fmt.Sprintf("/api/resource/%d", idx%1000),
ResponseStatus: 200,
IP: "10.0.0." + string(rune('1'+idx%9)),
UserAgent: "test-agent",
UserID: &uid,
CreatedAt: time.Now().Add(-time.Duration(idx) * time.Second),
})
}
db.CreateInBatches(logs, 2000)
}
ctx := context.Background()
const limit = 50
cursorStats := NewLatencyCollector()
var currentCursor *pagination.Cursor
pageCount := 0
for {
start := time.Now()
logs, hasMore, err := opLogRepo.ListCursor(ctx, limit, currentCursor)
cursorStats.Record(time.Since(start))
if err != nil {
t.Fatalf("OpLog ListCursor failed: %v", err)
}
pageCount++
if !hasMore || len(logs) == 0 {
break
}
last := logs[len(logs)-1]
currentCursor = &pagination.Cursor{LastID: last.ID, LastValue: last.CreatedAt}
}
stats := cursorStats.Compute()
t.Logf("=== OPLOG CURSOR PAGINATION ===")
t.Logf("Pages: %d, Stats: %s", pageCount, stats.String())
stats.AssertSLA(t, 100*time.Millisecond, "OPLOG_001C_CursorPagination_P99")
}
// TestScale_LL_002_LoginLogTimeRangeQuery tests login log time range query performance
// 行业标准:查询最近 7/30/90 天的日志应在 < 3s 内返回(需分区或索引优化)
func TestScale_LL_002_LoginLogTimeRangeQuery(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
// 50K 条日志,足够测试时间范围查询的索引使用
const logCount = 50000
loginLogRepo := repository.NewLoginLogRepository(db)
// 创建用户
users := make([]*domain.User, 100)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("ll2user-%d", i),
Email: ptrString(fmt.Sprintf("ll2user-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
if err := db.CreateInBatches(users, 100).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
// 生成日志80% 在最近 7 天20% 在 8-30 天前
t.Logf("Generating %d login logs with time distribution...", logCount)
batchSize := 10000
for i := 0; i < logCount; i += batchSize {
logs := make([]*domain.LoginLog, 0, batchSize)
for j := 0; j < batchSize && (i+j) < logCount; j++ {
idx := i + j
var createdAt time.Time
if idx%5 != 0 {
// 最近 7 天
daysOffset := float64(idx % 7)
hoursOffset := float64((idx * 13) % 24)
createdAt = time.Now().Add(-time.Duration(daysOffset*24)*time.Hour - time.Duration(hoursOffset)*time.Hour)
} else {
// 8-30 天前
createdAt = time.Now().Add(-time.Duration(8+idx%23) * 24 * time.Hour)
}
userID := int64(idx % 100)
logs = append(logs, &domain.LoginLog{
UserID: &users[userID].ID,
LoginType: int(domain.LoginTypePassword),
IP: fmt.Sprintf("10.0.%d.%d", idx%256, (j*7)%256),
Status: 1,
CreatedAt: createdAt,
})
}
if err := db.CreateInBatches(logs, 2000).Error; err != nil {
t.Fatalf("failed to create logs: %v", err)
}
}
// 测试时间范围查询 — P99 < 3s (SQLite) / P99 < 200ms (PG生产目标)
ctx := context.Background()
testCases := []struct {
days int
label string
}{
{7, "last 7 days"},
{30, "last 30 days"},
{90, "last 90 days"},
}
rangeStats := NewLatencyCollector()
for round := 0; round < 5; round++ {
for _, tc := range testCases {
startTime := time.Now().Add(-time.Duration(tc.days) * 24 * time.Hour)
endTime := time.Now()
start := time.Now()
results, total, err := loginLogRepo.ListByTimeRange(ctx, startTime, endTime, 0, 100)
elapsed := time.Since(start)
rangeStats.Record(elapsed)
if err != nil {
t.Errorf("ListByTimeRange (%s) failed: %v", tc.label, err)
continue
}
_ = results // 避免未使用变量
_ = total
}
}
stats := rangeStats.Compute()
t.Logf("Time Range Query P99 stats: %s", stats.String())
stats.AssertSLA(t, 3*time.Second, "LL_002_TimeRange_P99(SQLite)")
}
// TestScale_LL_003_LoginLogRetentionCleanup tests cleanup of logs beyond retention period
// 行业标准数据保留策略清理90天/180天/1年批量删除应在合理时间内完成
func TestScale_LL_003_LoginLogRetentionCleanup(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
// 创建 180 天前的日志(应被清理)和 7 天内的日志(应保留)
loginLogRepo := repository.NewLoginLogRepository(db)
users := make([]*domain.User, 10)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("cleanup-user-%d", i),
Email: ptrString(fmt.Sprintf("cleanup-user-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
if err := db.CreateInBatches(users, 10).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
// 写入 100 条旧日志200天前和 50 条新日志7天前
oldLogs := make([]*domain.LoginLog, 100)
for i := range oldLogs {
oldLogs[i] = &domain.LoginLog{
UserID: &users[i%10].ID,
LoginType: int(domain.LoginTypePassword),
IP: fmt.Sprintf("192.168.0.%d", i),
Status: 1,
CreatedAt: time.Now().Add(-200 * 24 * time.Hour), // 200 天前(超过 180 天保留期)
}
}
if err := db.CreateInBatches(oldLogs, 100).Error; err != nil {
t.Fatalf("failed to create old logs: %v", err)
}
newLogs := make([]*domain.LoginLog, 50)
for i := range newLogs {
newLogs[i] = &domain.LoginLog{
UserID: &users[i%10].ID,
LoginType: int(domain.LoginTypePassword),
IP: fmt.Sprintf("192.168.1.%d", i),
Status: 1,
CreatedAt: time.Now().Add(-7 * 24 * time.Hour), // 7 天前(保留期内)
}
}
if err := db.CreateInBatches(newLogs, 50).Error; err != nil {
t.Fatalf("failed to create new logs: %v", err)
}
totalBefore, _, _ := loginLogRepo.List(context.Background(), 0, 1000)
t.Logf("Before cleanup: %d total logs", len(totalBefore))
// 执行清理(保留 180 天)
start := time.Now()
retentionDays := 180
err := loginLogRepo.DeleteOlderThan(context.Background(), retentionDays)
elapsed := time.Since(start)
if err != nil {
t.Fatalf("DeleteOlderThan failed: %v", err)
}
// 清理后旧日志200天前应被删除新日志7天前应保留
// 旧日志 100 条全部删除,新日志 50 条保留
t.Logf("Cleaned up logs older than %d days in %v", retentionDays, elapsed)
// 验证新日志仍在
remaining, _, _ := loginLogRepo.List(context.Background(), 0, 1000)
if len(remaining) != 50 {
t.Errorf("expected 50 logs remaining after cleanup, got %d", len(remaining))
}
}
// TestScale_DV_001_300KDevicesPagination tests device list at 300K scale
// 行业标准:每用户平均 2-5 台设备10万用户 = 20万~50万台设备
func TestScale_DV_001_300KDevicesPagination(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
// 行业标准30万设备10万用户 × 平均 3 台设备)
const deviceCount = 300000
deviceRepo := repository.NewDeviceRepository(db)
// 创建 10 万用户
t.Logf("Generating %d users...", 100000)
userBatch := 10000
for batch := 0; batch < 100000/userBatch; batch++ {
users := make([]*domain.User, userBatch)
for i := 0; i < userBatch; i++ {
idx := batch*userBatch + i
users[i] = &domain.User{
Username: fmt.Sprintf("dvuser-%08d", idx),
Email: ptrString(fmt.Sprintf("dvuser-%08d@test.com", idx)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
if err := db.CreateInBatches(users, 1000).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
}
// 生成 30 万设备(每用户 3 台)
t.Logf("Generating %d devices...", deviceCount)
start := time.Now()
deviceBatch := 30000
for batch := 0; batch < deviceCount/deviceBatch; batch++ {
devices := make([]*domain.Device, deviceBatch)
for i := 0; i < deviceBatch; i++ {
idx := batch*deviceBatch + i
devices[i] = &domain.Device{
UserID: int64(idx%100000) + 1,
DeviceID: fmt.Sprintf("device-%08d", idx),
DeviceName: fmt.Sprintf("Device %d", idx),
DeviceType: domain.DeviceTypeWeb,
IP: fmt.Sprintf("192.168.%d.%d", idx%256, (idx+50)%256),
Location: "Test Location",
Status: domain.DeviceStatusActive,
LastActiveTime: time.Now().Add(-time.Duration(idx%86400) * time.Second),
}
}
if err := db.CreateInBatches(devices, 50).Error; err != nil {
t.Fatalf("failed to create devices: %v", err)
}
if batch%2 == 0 {
t.Logf(" progress: %d / %d devices", batch*deviceBatch+deviceBatch, deviceCount)
}
}
t.Logf("Created %d devices in %v", deviceCount, time.Since(start))
// 测试分页 SLA: < 500ms
ctx := context.Background()
testCases := []struct {
page int
pageSize int
sla time.Duration
}{
{1, 20, 500 * time.Millisecond},
{100, 20, 500 * time.Millisecond},
{1000, 20, 500 * time.Millisecond},
{5000, 20, 500 * time.Millisecond},
}
for _, tc := range testCases {
start := time.Now()
offset := (tc.page - 1) * tc.pageSize
result, total, err := deviceRepo.List(ctx, offset, tc.pageSize)
elapsed := time.Since(start)
if err != nil {
t.Errorf("List page %d failed: %v", tc.page, err)
continue
}
if total != int64(deviceCount) {
t.Errorf("expected total %d, got %d", deviceCount, total)
}
t.Logf("Page %d (offset=%d): %d results in %v [SLA=%v]", tc.page, offset, len(result), elapsed, tc.sla)
if elapsed > tc.sla {
t.Errorf("SLA BREACH: Page %d took %v, exceeded %v", tc.page, elapsed, tc.sla)
}
}
}
// TestScale_DV_002_DeviceMultiConditionFilter tests device filtering with multiple conditions
func TestScale_DV_002_DeviceMultiConditionFilter(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const deviceCount = 100000
deviceRepo := repository.NewDeviceRepository(db)
// 创建用户
users := make([]*domain.User, 1000)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("dv002user-%d", i),
Email: ptrString(fmt.Sprintf("dv002user-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
if err := db.CreateInBatches(users, 500).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
// 生成设备:混合状态和信任状态
devices := make([]*domain.Device, deviceCount)
for i := 0; i < deviceCount; i++ {
status := domain.DeviceStatusActive
if i%5 == 0 {
status = domain.DeviceStatusInactive
}
isTrusted := i%3 == 0
devices[i] = &domain.Device{
UserID: int64(i%1000) + 1,
DeviceID: fmt.Sprintf("dv002-device-%08d", i),
DeviceName: fmt.Sprintf("Device %d", i),
DeviceType: domain.DeviceTypeWeb,
Status: status,
IsTrusted: isTrusted,
LastActiveTime: time.Now().Add(-time.Duration(i%86400) * time.Second),
}
}
start := time.Now()
if err := db.CreateInBatches(devices, 50).Error; err != nil {
t.Fatalf("failed to create devices: %v", err)
}
t.Logf("Created %d devices in %v", deviceCount, time.Since(start))
// 测试多条件筛选 — P99 < 500ms (SQLite) / P99 < 50ms (PG生产目标)
ctx := context.Background()
trusted := true
params := &repository.ListDevicesParams{
Status: ptrDeviceStatus(domain.DeviceStatusActive),
IsTrusted: &trusted,
Offset: 0,
Limit: 50,
}
filterStats := NewLatencyCollector()
for round := 0; round < 5; round++ {
start := time.Now()
results, total, err := deviceRepo.ListAll(ctx, params)
elapsed := time.Since(start)
filterStats.Record(elapsed)
if err != nil {
t.Errorf("ListAll failed: %v", err)
continue
}
_ = results
_ = total
}
stats := filterStats.Compute()
t.Logf("Device Multi-Condition Filter P99 stats: %s", stats.String())
stats.AssertSLA(t, 500*time.Millisecond, "DV_002_DeviceFilter_P99(SQLite)")
}
// TestScale_DS_001_DashboardStatsAccuracy tests dashboard stats accuracy at 100K scale
func TestScale_DS_001_DashboardStatsAccuracy(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const userCount = 100000
userRepo := repository.NewUserRepository(db)
loginLogRepo := repository.NewLoginLogRepository(db)
// 状态分布80% 活跃10% 未激活5% 锁定5% 禁用
// 生成用户
t.Logf("Generating %d users with status distribution...", userCount)
statusWeights := []struct {
status domain.UserStatus
ratio float64
}{
{domain.UserStatusActive, 0.80},
{domain.UserStatusInactive, 0.10},
{domain.UserStatusLocked, 0.05},
{domain.UserStatusDisabled, 0.05},
}
userBatch := 10000
for batch := 0; batch < userCount/userBatch; batch++ {
users := make([]*domain.User, userBatch)
for i := 0; i < userBatch; i++ {
idx := batch*userBatch + i
var status domain.UserStatus
// 按权重分配状态
r := float64(idx) / float64(userCount)
acc := 0.0
for _, sw := range statusWeights {
acc += sw.ratio
if r < acc {
status = sw.status
break
}
}
if status == 0 {
status = domain.UserStatusActive
}
users[i] = &domain.User{
Username: fmt.Sprintf("statsuser-%08d", idx),
Email: ptrString(fmt.Sprintf("statsuser-%08d@test.com", idx)),
Password: "$2a$10$dummy",
Status: status,
}
}
if err := db.CreateInBatches(users, 50).Error; err != nil {
t.Fatalf("failed to create users: %v", err)
}
}
// 精确统计数据库中的各状态数量
var totalUsers, activeUsers, inactiveUsers, lockedUsers, disabledUsers int64
db.Model(&domain.User{}).Count(&totalUsers)
db.Model(&domain.User{}).Where("status = ?", domain.UserStatusActive).Count(&activeUsers)
db.Model(&domain.User{}).Where("status = ?", domain.UserStatusInactive).Count(&inactiveUsers)
db.Model(&domain.User{}).Where("status = ?", domain.UserStatusLocked).Count(&lockedUsers)
db.Model(&domain.User{}).Where("status = ?", domain.UserStatusDisabled).Count(&disabledUsers)
t.Logf("DB counts: total=%d, active=%d, inactive=%d, locked=%d, disabled=%d",
totalUsers, activeUsers, inactiveUsers, lockedUsers, disabledUsers)
// 测试服务层统计 — P99 < 5s (SQLite) / P99 < 200ms (PG生产目标)
statsSvc := NewStatsService(userRepo, loginLogRepo)
ctx := context.Background()
statsCollector := NewLatencyCollector()
for i := 0; i < 5; i++ {
start := time.Now()
stats, err := statsSvc.GetUserStats(ctx)
elapsed := time.Since(start)
statsCollector.Record(elapsed)
if err != nil {
t.Fatalf("GetUserStats failed: %v", err)
}
_ = stats
}
stats := statsCollector.Compute()
t.Logf("GetUserStats P99 stats: %s", stats.String())
statsCollector.AssertSLA(t, 5*time.Second, "DS_001_GetUserStats_P99(SQLite)")
// 验证精确性
finalStats, _ := statsSvc.GetUserStats(ctx)
if finalStats.TotalUsers != totalUsers {
t.Errorf("TotalUsers mismatch: expected %d, got %d", totalUsers, finalStats.TotalUsers)
}
if finalStats.ActiveUsers != activeUsers {
t.Errorf("ActiveUsers mismatch: expected %d, got %d", activeUsers, finalStats.ActiveUsers)
}
if finalStats.InactiveUsers != inactiveUsers {
t.Errorf("InactiveUsers mismatch: expected %d, got %d", inactiveUsers, finalStats.InactiveUsers)
}
}
// TestScale_BO_001_BatchUserCreation tests batch user creation performance
// 行业标准:批量创建 1000 用户应在 < 5s 内完成
func TestScale_BO_001_BatchUserCreation(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const batchSize = 1000
userRepo := repository.NewUserRepository(db)
userRoleRepo := repository.NewUserRoleRepository(db)
roleRepo := repository.NewRoleRepository(db)
passwordHistoryRepo := repository.NewPasswordHistoryRepository(db)
userSvc := NewUserService(userRepo, userRoleRepo, roleRepo, passwordHistoryRepo)
users := make([]*domain.User, batchSize)
for i := 0; i < batchSize; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("batchuser-%08d", i),
Email: ptrString(fmt.Sprintf("batchuser-%08d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
ctx := context.Background()
start := time.Now()
for i := 0; i < batchSize; i++ {
if err := userSvc.Create(ctx, users[i]); err != nil {
t.Fatalf("Create user %d failed: %v", i, err)
}
}
elapsed := time.Since(start)
throughput := float64(batchSize) / elapsed.Seconds()
t.Logf("Created %d users in %v (%.2f users/sec) [SLA=5s]", batchSize, elapsed, throughput)
if elapsed > 5*time.Second {
t.Errorf("SLA BREACH: Batch creation took %v, exceeded 5s", elapsed)
}
// 验证数量
_, total, err := userSvc.List(ctx, 0, int(batchSize*2))
if err != nil {
t.Fatalf("List failed: %v", err)
}
if total < int64(batchSize) {
t.Errorf("expected at least %d users, got %d", batchSize, total)
}
}
// TestScale_BO_002_BatchStatusUpdate tests batch status update performance
func TestScale_BO_002_BatchStatusUpdate(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const userCount = 1000
userRepo := repository.NewUserRepository(db)
userRoleRepo := repository.NewUserRoleRepository(db)
roleRepo := repository.NewRoleRepository(db)
passwordHistoryRepo := repository.NewPasswordHistoryRepository(db)
userSvc := NewUserService(userRepo, userRoleRepo, roleRepo, passwordHistoryRepo)
// 预先创建用户
users := make([]*domain.User, userCount)
for i := 0; i < userCount; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("boustatus-%08d", i),
Email: ptrString(fmt.Sprintf("boustatus-%08d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
}
ctx := context.Background()
for i := 0; i < userCount; i++ {
if err := userSvc.Create(ctx, users[i]); err != nil {
t.Fatalf("Create user %d failed: %v", i, err)
}
}
// 批量禁用所有用户
start := time.Now()
for i := 0; i < userCount; i++ {
if err := userSvc.UpdateStatus(ctx, users[i].ID, domain.UserStatusDisabled); err != nil {
t.Fatalf("UpdateStatus failed for user %d: %v", i, err)
}
}
elapsed := time.Since(start)
t.Logf("Batch disabled %d users in %v (%.2f updates/sec)",
userCount, elapsed, float64(userCount)/elapsed.Seconds())
// 验证全部禁用
for i := 0; i < userCount; i++ {
user, _ := userSvc.GetByID(ctx, users[i].ID)
if user.Status != domain.UserStatusDisabled {
t.Errorf("user %d expected disabled, got %d", i, user.Status)
}
}
}
// TestScale_PR_001_PermissionTreeLoad tests permission tree loading at scale
// 行业标准500 权限节点应在 < 500ms 内加载完成
func TestScale_PR_001_PermissionTreeLoad(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const permissionCount = 500
permRepo := repository.NewPermissionRepository(db)
t.Logf("Generating %d permissions in tree structure...", permissionCount)
permissions := generatePermissionTree(permissionCount)
start := time.Now()
if err := db.CreateInBatches(permissions, 100).Error; err != nil {
t.Fatalf("failed to create permissions: %v", err)
}
t.Logf("Created %d permissions in %v", permissionCount, time.Since(start))
// 测试加载性能 — P99 < 500ms (SQLite) / P99 < 50ms (PG生产目标)
ctx := context.Background()
loadStats := NewLatencyCollector()
for i := 0; i < 5; i++ {
start = time.Now()
allPerms, _, err := permRepo.List(ctx, 0, 1000)
elapsed := time.Since(start)
loadStats.Record(elapsed)
if err != nil {
t.Fatalf("List permissions failed: %v", err)
}
if len(allPerms) < permissionCount {
t.Errorf("expected %d permissions, got %d", permissionCount, len(allPerms))
}
}
stats := loadStats.Compute()
t.Logf("Permission tree load P99 stats: %s", stats.String())
stats.AssertSLA(t, 500*time.Millisecond, "PR_001_PermissionTreeLoad_P99(SQLite)")
}
// TestScale_PR_002_PermissionTreeExplosion tests 1000+ permission nodes
func TestScale_PR_002_PermissionTreeExplosion(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const permissionCount = 1000
permRepo := repository.NewPermissionRepository(db)
t.Logf("Generating %d permissions (permission tree explosion test)...", permissionCount)
permissions := generateDeepPermissionTree(permissionCount)
start := time.Now()
if err := db.CreateInBatches(permissions, 200).Error; err != nil {
t.Fatalf("failed to create permissions: %v", err)
}
t.Logf("Created %d permissions in %v", len(permissions), time.Since(start))
// 测试 SLA: P99 < 1s1000 节点加载)
ctx := context.Background()
loadStats := NewLatencyCollector()
for i := 0; i < 5; i++ {
start = time.Now()
allPerms, _, err := permRepo.List(ctx, 0, 2000)
elapsed := time.Since(start)
loadStats.Record(elapsed)
if err != nil {
t.Fatalf("List permissions failed: %v", err)
}
if len(allPerms) < permissionCount {
t.Errorf("expected %d permissions, got %d", permissionCount, len(allPerms))
}
}
stats := loadStats.Compute()
t.Logf("Permission explosion load P99 stats: %s", stats.String())
stats.AssertSLA(t, 1*time.Second, "PR_002_PermissionTreeExplosion_P99(SQLite)")
}
// TestScale_AUTH_001_LoginFailureLogScale tests recording massive login failures
// 行业标准模拟暴力破解场景1000用户 × 10次失败 = 10000 条失败日志
func TestScale_AUTH_001_LoginFailureLogScale(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const userCount = 1000
const failuresPerUser = 10
const totalFailures = userCount * failuresPerUser
loginLogRepo := repository.NewLoginLogRepository(db)
// 创建用户
users := make([]*domain.User, userCount)
for i := 0; i < userCount; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("authtest-%d", i),
Email: ptrString(fmt.Sprintf("authtest-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
if err := db.Create(users[i]).Error; err != nil {
t.Fatalf("Create user %d failed: %v", i, err)
}
}
// 记录失败登录日志
t.Logf("Recording %d login failures (simulating brute-force scenario)...", totalFailures)
start := time.Now()
failReasons := []string{"密码错误", "账号已锁定", "账号已禁用", "验证码错误"}
batchSize := 1000
for batch := 0; batch < totalFailures/batchSize; batch++ {
for i := 0; i < batchSize; i++ {
idx := batch*batchSize + i
userIdx := idx % userCount
failIdx := (idx / userCount) % len(failReasons)
loginLogRepo.Create(context.Background(), &domain.LoginLog{
UserID: &users[userIdx].ID,
LoginType: int(domain.LoginTypePassword),
IP: fmt.Sprintf("10.0.%d.%d", idx%256, failIdx),
Status: 0,
FailReason: failReasons[failIdx],
})
}
}
elapsed := time.Since(start)
t.Logf("Recorded %d login failures in %v (%.2f logs/sec)",
totalFailures, elapsed, float64(totalFailures)/elapsed.Seconds())
// 验证失败日志计数 — P99 < 200ms (SQLite) / P99 < 20ms (PG生产目标)
ctx := context.Background()
queryStats := NewLatencyCollector()
for i := 0; i < 5; i++ {
start = time.Now()
allFailed, _, err := loginLogRepo.ListByStatus(ctx, 0, 0, 1000)
queryElapsed := time.Since(start)
queryStats.Record(queryElapsed)
if err != nil {
t.Fatalf("ListByStatus failed: %v", err)
}
_ = allFailed
}
stats := queryStats.Compute()
t.Logf("Failed login query P99 stats: %s", stats.String())
stats.AssertSLA(t, 200*time.Millisecond, "AUTH_001_FailedLoginQuery_P99(SQLite)")
}
// TestScale_OPLOG_001_OperationLogScale tests operation log at scale
// 行业标准:中等规模系统每天 5000-20000 条操作记录,保留 90 天 = 45万-180万条
func TestScale_OPLOG_001_OperationLogScale(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
// 10K 条操作日志(约 1-2 天量),测试基本 CRUD 性能
const logCount = 10000
opLogRepo := repository.NewOperationLogRepository(db)
// 创建用户
user := &domain.User{
Username: "oplog_user",
Email: ptrString("oplog@test.com"),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
if err := db.Create(user).Error; err != nil {
t.Fatalf("Create user failed: %v", err)
}
t.Logf("Generating %d operation logs...", logCount)
start := time.Now()
operations := []string{"user.create", "user.update", "user.delete", "role.assign", "permission.grant", "user.login", "user.logout"}
for i := 0; i < logCount; i++ {
opLogRepo.Create(context.Background(), &domain.OperationLog{
UserID: &user.ID,
OperationType: operations[i%len(operations)],
OperationName: "TestOperation",
RequestMethod: "POST",
RequestPath: "/api/v1/test",
ResponseStatus: 200,
IP: fmt.Sprintf("192.168.%d.%d", i%256, (i*7)%256),
UserAgent: "TestAgent/1.0",
})
}
elapsed := time.Since(start)
t.Logf("Created %d operation logs in %v (%.2f logs/sec)",
logCount, elapsed, float64(logCount)/elapsed.Seconds())
// 查询性能 — P99 < 500ms (SQLite) / P99 < 50ms (PG生产目标)
ctx := context.Background()
queryStats := NewLatencyCollector()
for i := 0; i < 5; i++ {
start = time.Now()
recentLogs, _, err := opLogRepo.List(ctx, 0, 100)
queryElapsed := time.Since(start)
queryStats.Record(queryElapsed)
if err != nil {
t.Fatalf("List operation logs failed: %v", err)
}
_ = recentLogs
}
stats := queryStats.Compute()
t.Logf("Operation log query P99 stats: %s", stats.String())
stats.AssertSLA(t, 500*time.Millisecond, "OPLOG_001_ListQuery_P99(SQLite)")
}
// TestScale_DEV_003_DeviceActiveUpdate tests device last_active_time updates at scale
// 行业标准批量更新设备活跃时间5000 条应在 < 5s 内完成
func TestScale_DEV_003_DeviceActiveUpdate(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const deviceCount = 5000
deviceRepo := repository.NewDeviceRepository(db)
// 创建用户和设备
users := make([]*domain.User, 100)
for i := range users {
users[i] = &domain.User{
Username: fmt.Sprintf("devact-%d", i),
Email: ptrString(fmt.Sprintf("devact-%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
if err := db.Create(users[i]).Error; err != nil {
t.Fatalf("Create user failed: %v", err)
}
}
devices := make([]*domain.Device, deviceCount)
for i := 0; i < deviceCount; i++ {
devices[i] = &domain.Device{
UserID: int64(i%100) + 1,
DeviceID: fmt.Sprintf("devact-%08d", i),
DeviceName: fmt.Sprintf("Device %d", i),
DeviceType: domain.DeviceTypeWeb,
Status: domain.DeviceStatusActive,
LastActiveTime: time.Now().Add(-time.Duration(i) * time.Second),
}
if err := db.Create(devices[i]).Error; err != nil {
t.Fatalf("Create device %d failed: %v", i, err)
}
}
// 更新活跃时间 SLA: < 5s
t.Logf("Updating last_active_time for %d devices...", deviceCount)
start := time.Now()
for i := 0; i < deviceCount; i++ {
deviceRepo.UpdateLastActiveTime(context.Background(), devices[i].ID)
}
elapsed := time.Since(start)
t.Logf("Updated %d devices in %v (%.2f updates/sec) [SLA=5s]",
deviceCount, elapsed, float64(deviceCount)/elapsed.Seconds())
if elapsed > 5*time.Second {
t.Errorf("SLA BREACH: Device active update took %v, exceeded 5s", elapsed)
}
}
// TestScale_ROLE_001_RolePermissionAssignmentScale tests large-scale role-permission assignment
// 行业标准50 角色 × 500 权限 = 25000 条角色权限关联,应在 < 10s 内完成
func TestScale_ROLE_001_RolePermissionAssignmentScale(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
const roleCount = 50
const permCount = 500
roleRepo := repository.NewRoleRepository(db)
permRepo := repository.NewPermissionRepository(db)
rolePermRepo := repository.NewRolePermissionRepository(db)
// 创建角色
t.Logf("Creating %d roles...", roleCount)
roles := make([]*domain.Role, roleCount)
for i := 0; i < roleCount; i++ {
roles[i] = &domain.Role{
Name: fmt.Sprintf("scale_role_%d", i),
Code: fmt.Sprintf("scale_role_%d", i),
}
if err := roleRepo.Create(context.Background(), roles[i]); err != nil {
t.Fatalf("Create role %d failed: %v", i, err)
}
}
// 创建权限
t.Logf("Creating %d permissions...", permCount)
perms := make([]*domain.Permission, permCount)
for i := 0; i < permCount; i++ {
perms[i] = &domain.Permission{
Name: fmt.Sprintf("scale_perm_%d", i),
Code: fmt.Sprintf("scale:perm:%d", i),
Type: 1,
}
if err := permRepo.Create(context.Background(), perms[i]); err != nil {
t.Fatalf("Create permission %d failed: %v", i, err)
}
}
// 创建角色服务用于分配权限
roleSvc := NewRoleService(roleRepo, rolePermRepo)
// 分配所有权限给所有角色
t.Logf("Assigning %d permissions to %d roles...", permCount, roleCount)
start := time.Now()
for i := 0; i < roleCount; i++ {
permIDs := make([]int64, permCount)
for j := 0; j < permCount; j++ {
permIDs[j] = perms[j].ID
}
if err := roleSvc.AssignPermissions(context.Background(), roles[i].ID, permIDs); err != nil {
t.Fatalf("AssignPermissions for role %d failed: %v", i, err)
}
}
elapsed := time.Since(start)
totalAssignments := roleCount * permCount
t.Logf("Assigned %d permissions (%d roles x %d perms) in %v (%.2f assigns/sec) [SLA=10s]",
totalAssignments, roleCount, permCount, elapsed, float64(totalAssignments)/elapsed.Seconds())
if elapsed > 10*time.Second {
t.Errorf("SLA BREACH: Role-permission assignment took %v, exceeded 10s", elapsed)
}
}
// =============================================================================
// ⚡ 并发压测CONC_SCALE_001~003
// 模拟真实多用户并发场景50~100 goroutine 同时操作
// SQLite WAL 模式下可支持 100+ 并发写入
// =============================================================================
// runConcurrent 辅助函数:并发运行 n 个 goroutine返回成功次数
func runConcurrent(n int, fn func(idx int) error) int {
const maxRetries = 5
var wg sync.WaitGroup
var mu sync.Mutex
successCount := 0
wg.Add(n)
for i := 0; i < n; i++ {
go func(idx int) {
defer wg.Done()
var err error
for attempt := 0; attempt <= maxRetries; attempt++ {
err = fn(idx)
if err == nil {
break
}
if attempt < maxRetries {
time.Sleep(time.Duration(attempt+1) * 2 * time.Millisecond)
continue
}
}
if err == nil {
mu.Lock()
successCount++
mu.Unlock()
}
}(i)
}
wg.Wait()
return successCount
}
// TestScale_CONC_001_ConcurrentUserRegistration 并发注册50 goroutine 同时注册不同用户
// 行业参考:峰值注册并发约 50~100 req/s
func TestScale_CONC_001_ConcurrentUserRegistration(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
userRepo := repository.NewUserRepository(db)
userRoleRepo := repository.NewUserRoleRepository(db)
roleRepo := repository.NewRoleRepository(db)
passwordHistoryRepo := repository.NewPasswordHistoryRepository(db)
userSvc := NewUserService(userRepo, userRoleRepo, roleRepo, passwordHistoryRepo)
ctx := context.Background()
const goroutines = 50
start := time.Now()
successCount := runConcurrent(goroutines, func(idx int) error {
user := &domain.User{
Username: fmt.Sprintf("conc001_user_%d_%d", time.Now().UnixNano(), idx),
Email: ptrString(fmt.Sprintf("conc001_%d_%d@test.com", time.Now().UnixNano(), idx)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
return userSvc.Create(ctx, user)
})
elapsed := time.Since(start)
t.Logf("Concurrent registration: %d/%d succeeded in %v (%.1f req/s)",
successCount, goroutines, elapsed, float64(successCount)/elapsed.Seconds())
// 至少 90% 成功
minExpected := goroutines * 90 / 100
if successCount < minExpected {
t.Errorf("expected at least %d successes, got %d", minExpected, successCount)
}
}
// TestScale_CONC_002_ConcurrentDeviceCreation 并发创建设备50 goroutine 同时为不同用户创建设备
// 行业参考IoT 场景下 50+ 设备同时注册
func TestScale_CONC_002_ConcurrentDeviceCreation(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
deviceRepo := repository.NewDeviceRepository(db)
userRepo := repository.NewUserRepository(db)
deviceSvc := NewDeviceService(deviceRepo, userRepo)
// 预先创建 50 个用户
ctx := context.Background()
users := make([]*domain.User, 50)
for i := 0; i < 50; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("conc002_user_%d", i),
Email: ptrString(fmt.Sprintf("conc002_%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
if err := db.Create(users[i]).Error; err != nil {
t.Fatalf("Create user %d failed: %v", i, err)
}
}
const goroutines = 50
start := time.Now()
successCount := runConcurrent(goroutines, func(idx int) error {
_, err := deviceSvc.CreateDevice(ctx, users[idx].ID, &CreateDeviceRequest{
DeviceID: fmt.Sprintf("conc002_device_%d_%d", time.Now().UnixNano(), idx),
DeviceName: fmt.Sprintf("Device %d", idx),
DeviceType: int(domain.DeviceTypeWeb),
})
return err
})
elapsed := time.Since(start)
t.Logf("Concurrent device creation: %d/%d succeeded in %v (%.1f req/s)",
successCount, goroutines, elapsed, float64(successCount)/elapsed.Seconds())
minExpected := goroutines * 90 / 100
if successCount < minExpected {
t.Errorf("expected at least %d successes, got %d", minExpected, successCount)
}
}
// TestScale_CONC_003_ConcurrentLoginLogWrite 并发登录日志写入100 goroutine 同时写登录日志
// 行业参考1000 并发用户,每用户每秒约 0.5~2 次登录,峰值约 100~500 log/s
func TestScale_CONC_003_ConcurrentLoginLogWrite(t *testing.T) {
if testing.Short() {
t.Skip("skipping scale test in short mode")
}
db := newIsolatedDB(t)
loginLogRepo := repository.NewLoginLogRepository(db)
loginLogSvc := NewLoginLogService(loginLogRepo)
// 预先创建用户
ctx := context.Background()
users := make([]*domain.User, 10)
for i := 0; i < 10; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("conc003_user_%d", i),
Email: ptrString(fmt.Sprintf("conc003_%d@test.com", i)),
Password: "$2a$10$dummy",
Status: domain.UserStatusActive,
}
if err := db.Create(users[i]).Error; err != nil {
t.Fatalf("Create user failed: %v", err)
}
}
const goroutines = 100
start := time.Now()
successCount := runConcurrent(goroutines, func(idx int) error {
return loginLogSvc.RecordLogin(ctx, &RecordLoginRequest{
UserID: users[idx%10].ID,
LoginType: int(domain.LoginTypePassword),
IP: fmt.Sprintf("10.%d.%d.%d", idx/65536, (idx/256)%256, idx%256),
Status: 1,
})
})
elapsed := time.Since(start)
t.Logf("Concurrent login log write: %d/%d written in %v (%.1f logs/sec)",
successCount, goroutines, elapsed, float64(successCount)/elapsed.Seconds())
minExpected := goroutines * 80 / 100
if successCount < minExpected {
t.Errorf("expected at least %d successes, got %d", minExpected, successCount)
}
}
// =============================================================================
// Helper Functions
// =============================================================================
// generateTestUsers generates test users with status distribution
func generateTestUsers(count int) []*domain.User {
users := make([]*domain.User, count)
statuses := []domain.UserStatus{
domain.UserStatusActive,
domain.UserStatusActive,
domain.UserStatusActive,
domain.UserStatusActive,
domain.UserStatusInactive,
domain.UserStatusLocked,
domain.UserStatusDisabled,
}
for i := 0; i < count; i++ {
users[i] = &domain.User{
Username: fmt.Sprintf("testuser_%08d", i),
Email: ptrString(fmt.Sprintf("testuser_%08d@test.com", i)),
Password: "$2a$10$dummy",
Status: statuses[i%len(statuses)],
}
}
return users
}
func generatePermissionTree(count int) []*domain.Permission {
permissions := make([]*domain.Permission, count)
// Create root permissions
rootCount := 10
for i := 0; i < rootCount && i < count; i++ {
permissions[i] = &domain.Permission{
Name: fmt.Sprintf("模块_%d", i),
Code: fmt.Sprintf("module_%d", i),
ParentID: nil,
Sort: i,
}
}
// Create child permissions under each root
childIndex := rootCount
for rootIdx := 0; rootIdx < rootCount && childIndex < count; rootIdx++ {
parentID := int64(rootIdx + 1)
childrenPerRoot := (count - rootCount) / rootCount
for j := 0; j < childrenPerRoot && childIndex < count; j++ {
permissions[childIndex] = &domain.Permission{
Name: fmt.Sprintf("权限_%d_%d", rootIdx, j),
Code: fmt.Sprintf("perm_%d_%d", rootIdx, j),
ParentID: &parentID,
Sort: j,
}
childIndex++
}
}
return permissions[:childIndex]
}
func ptrString(s string) *string {
return &s
}
func ptrDeviceStatus(s domain.DeviceStatus) *domain.DeviceStatus {
return &s
}
func ptrInt64(i int64) *int64 {
return &i
}
func generateDeepPermissionTree(count int) []*domain.Permission {
permissions := make([]*domain.Permission, 0, count)
// Create a deep hierarchical tree: root -> module -> category -> subcategory -> action
levels := 5
childrenPerLevel := 5
currentID := int64(1)
var addChildren func(parentID *int64, level, remaining int)
addChildren = func(parentID *int64, level, remaining int) {
if level >= levels || remaining <= 0 {
return
}
for i := 0; i < childrenPerLevel && remaining > 0; i++ {
perm := &domain.Permission{
Name: fmt.Sprintf("level%d_child%d", level, i),
Code: fmt.Sprintf("l%d_c%d_%d", level, i, currentID),
ParentID: parentID,
Sort: i,
}
permissions = append(permissions, perm)
currentID++
remaining--
if level < levels-1 && remaining > 0 {
newParentID := currentID - 1
addChildren(&newParentID, level+1, remaining)
}
}
}
// Add root permissions
for i := 0; i < 3 && currentID <= int64(count); i++ {
rootID := currentID
perm := &domain.Permission{
Name: fmt.Sprintf("root_module_%d", i),
Code: fmt.Sprintf("root_%d", i),
ParentID: nil,
Sort: i,
}
permissions = append(permissions, perm)
currentID++
addChildren(&rootID, 1, count-len(permissions))
}
// Fill remaining with flat permissions if needed
for len(permissions) < count {
permissions = append(permissions, &domain.Permission{
Name: fmt.Sprintf("flat_perm_%d", currentID),
Code: fmt.Sprintf("flat_%d", currentID),
ParentID: nil,
Sort: int(currentID),
})
currentID++
}
return permissions[:count]
}