diff --git a/internal/tasks/aggregation_parity_test.go b/internal/tasks/aggregation_parity_test.go new file mode 100644 index 0000000..09b0684 --- /dev/null +++ b/internal/tasks/aggregation_parity_test.go @@ -0,0 +1,589 @@ +package tasks + +import ( + "context" + "fmt" + "io" + "log/slog" + "math" + "testing" + "time" + "vctp/db" + + "github.com/jmoiron/sqlx" +) + +type tasksTestDatabase struct { + dbConn *sqlx.DB + logger *slog.Logger +} + +func (d *tasksTestDatabase) DB() *sqlx.DB { return d.dbConn } +func (d *tasksTestDatabase) Queries() db.Querier { return nil } +func (d *tasksTestDatabase) Logger() *slog.Logger { + if d.logger != nil { + return d.logger + } + return slog.New(slog.NewTextHandler(io.Discard, nil)) +} +func (d *tasksTestDatabase) Close() error { return d.dbConn.Close() } + +type dailySummaryRow struct { + Name string `db:"Name"` + Vcenter string `db:"Vcenter"` + VmId string `db:"VmId"` + VmUuid string `db:"VmUuid"` + ResourcePool string `db:"ResourcePool"` + CreationTime int64 `db:"CreationTime"` + DeletionTime int64 `db:"DeletionTime"` + SnapshotTime int64 `db:"SnapshotTime"` + SamplesPresent int64 `db:"SamplesPresent"` + AvgVcpuCount float64 `db:"AvgVcpuCount"` + AvgRamGB float64 `db:"AvgRamGB"` + AvgProvisionedDisk float64 `db:"AvgProvisionedDisk"` + AvgIsPresent float64 `db:"AvgIsPresent"` + PoolTinPct float64 `db:"PoolTinPct"` + PoolBronzePct float64 `db:"PoolBronzePct"` + PoolSilverPct float64 `db:"PoolSilverPct"` + PoolGoldPct float64 `db:"PoolGoldPct"` +} + +type monthlySummaryRow struct { + Name string `db:"Name"` + Vcenter string `db:"Vcenter"` + VmId string `db:"VmId"` + VmUuid string `db:"VmUuid"` + ResourcePool string `db:"ResourcePool"` + CreationTime int64 `db:"CreationTime"` + DeletionTime int64 `db:"DeletionTime"` + SamplesPresent int64 `db:"SamplesPresent"` + AvgVcpuCount float64 `db:"AvgVcpuCount"` + AvgRamGB float64 `db:"AvgRamGB"` + AvgProvisionedDisk float64 `db:"AvgProvisionedDisk"` + AvgIsPresent float64 `db:"AvgIsPresent"` + PoolTinPct float64 `db:"PoolTinPct"` + PoolBronzePct float64 `db:"PoolBronzePct"` + PoolSilverPct float64 `db:"PoolSilverPct"` + PoolGoldPct float64 `db:"PoolGoldPct"` +} + +type hourlySeedRow struct { + SnapshotTime int64 + Name string + Vcenter string + VmID string + VmUUID string + ResourcePool string + Datacenter string + Cluster string + Folder string + ProvisionedDisk float64 + VcpuCount int64 + RamGB int64 + CreationTime int64 + DeletionTime int64 + IsTemplate string + PoweredOn string + SrmPlaceholder string +} + +type dailySeedRow struct { + SnapshotTime int64 + Name string + Vcenter string + VmID string + VmUUID string + ResourcePool string + Datacenter string + Cluster string + Folder string + ProvisionedDisk float64 + VcpuCount int64 + RamGB int64 + CreationTime int64 + DeletionTime int64 + IsTemplate string + PoweredOn string + SrmPlaceholder string + SamplesPresent int64 + AvgVcpuCount float64 + AvgRamGB float64 + AvgProvisionedDisk float64 + AvgIsPresent float64 + PoolTinPct float64 + PoolBronzePct float64 + PoolSilverPct float64 + PoolGoldPct float64 + Tin float64 + Bronze float64 + Silver float64 + Gold float64 + TotalSamples int64 + SumVcpu int64 + SumRam int64 + SumDisk float64 + TinHits int64 + BronzeHits int64 + SilverHits int64 + GoldHits int64 +} + +func TestDailyGoldenParity_SQLUnionVsGoCanonical(t *testing.T) { + ctx := context.Background() + dbConn := newTasksTestDB(t) + task := newTasksTestCronTask(dbConn) + + if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil { + t.Fatalf("failed to ensure vm_hourly_stats: %v", err) + } + + dayStart := time.Date(2026, time.January, 15, 0, 0, 0, 0, time.UTC) + dayEnd := dayStart.AddDate(0, 0, 1) + t1 := dayStart.Add(1 * time.Hour).Unix() + t2 := dayStart.Add(2 * time.Hour).Unix() + t3 := dayStart.Add(3 * time.Hour).Unix() + + rows := []hourlySeedRow{ + {SnapshotTime: t1, Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1", ResourcePool: "Tin", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", ProvisionedDisk: 100, VcpuCount: 2, RamGB: 8, CreationTime: 0, IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"}, + {SnapshotTime: t3, Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1", ResourcePool: "Gold", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", ProvisionedDisk: 120, VcpuCount: 4, RamGB: 16, CreationTime: dayStart.Add(30 * time.Minute).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"}, + {SnapshotTime: t2, Name: "vm-bravo", Vcenter: "vc-a", VmID: "vm-2", VmUUID: "uuid-2", ResourcePool: "Bronze", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", ProvisionedDisk: 30, VcpuCount: 1, RamGB: 2, CreationTime: dayStart.Add(-2 * time.Hour).Unix(), DeletionTime: dayStart.Add(4 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"}, + {SnapshotTime: t1, Name: "vm-charlie", Vcenter: "vc-a", VmID: "vm-3", VmUUID: "uuid-3", ResourcePool: "Silver", Datacenter: "dc-1", Cluster: "cluster-2", Folder: "/prod2", ProvisionedDisk: 50, VcpuCount: 2, RamGB: 4, CreationTime: dayStart.Add(-5 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"}, + {SnapshotTime: t3, Name: "vm-charlie", Vcenter: "vc-a", VmID: "vm-3", VmUUID: "uuid-3", ResourcePool: "Silver", Datacenter: "dc-1", Cluster: "cluster-2", Folder: "/prod2", ProvisionedDisk: 50, VcpuCount: 2, RamGB: 4, CreationTime: dayStart.Add(-5 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"}, + {SnapshotTime: t3, Name: "vm-template", Vcenter: "vc-a", VmID: "vm-t", VmUUID: "uuid-t", ResourcePool: "Tin", Datacenter: "dc-1", Cluster: "cluster-3", Folder: "/templates", ProvisionedDisk: 500, VcpuCount: 16, RamGB: 64, CreationTime: dayStart.Add(-10 * time.Hour).Unix(), IsTemplate: "TRUE", PoweredOn: "FALSE", SrmPlaceholder: "FALSE"}, + } + for _, row := range rows { + if err := insertHourlyCacheSeedRow(ctx, dbConn, row); err != nil { + t.Fatalf("failed to insert vm_hourly_stats row: %v", err) + } + } + + hourlyTableTimes := []int64{t1, t2, t3} + hourlyTables := make([]string, 0, len(hourlyTableTimes)) + for _, ts := range hourlyTableTimes { + tableName, err := hourlyInventoryTableName(time.Unix(ts, 0).UTC()) + if err != nil { + t.Fatalf("failed to build hourly table name: %v", err) + } + hourlyTables = append(hourlyTables, tableName) + if err := db.EnsureSnapshotTable(ctx, dbConn, tableName); err != nil { + t.Fatalf("failed to ensure snapshot table %s: %v", tableName, err) + } + } + for _, row := range rows { + tableName, err := hourlyInventoryTableName(time.Unix(row.SnapshotTime, 0).UTC()) + if err != nil { + t.Fatalf("failed to build per-row hourly table name: %v", err) + } + if err := insertHourlySnapshotSeedRow(ctx, dbConn, tableName, row); err != nil { + t.Fatalf("failed to insert snapshot row for table %s: %v", tableName, err) + } + } + + oldSummaryTable, err := db.SafeTableName("test_daily_sql_union_summary") + if err != nil { + t.Fatalf("failed to build old summary table name: %v", err) + } + newSummaryTable, err := db.SafeTableName("test_daily_go_cache_summary") + if err != nil { + t.Fatalf("failed to build new summary table name: %v", err) + } + if err := db.EnsureSummaryTable(ctx, dbConn, oldSummaryTable); err != nil { + t.Fatalf("failed to ensure old summary table: %v", err) + } + if err := db.EnsureSummaryTable(ctx, dbConn, newSummaryTable); err != nil { + t.Fatalf("failed to ensure new summary table: %v", err) + } + + unionQuery, err := buildUnionQuery(hourlyTables, summaryUnionColumns, templateExclusionFilter()) + if err != nil { + t.Fatalf("failed to build union query: %v", err) + } + insertSQL, err := db.BuildDailySummaryInsert(oldSummaryTable, unionQuery) + if err != nil { + t.Fatalf("failed to build daily sql insert: %v", err) + } + if _, err := dbConn.ExecContext(ctx, insertSQL); err != nil { + t.Fatalf("failed to execute daily sql insert: %v", err) + } + + aggMap, snapTimes, err := task.scanHourlyCache(ctx, dayStart, dayEnd) + if err != nil { + t.Fatalf("scanHourlyCache failed: %v", err) + } + totalSamplesByVcenter := sampleCountsByVcenter(aggMap) + if err := task.insertDailyAggregates(ctx, newSummaryTable, aggMap, len(snapTimes), totalSamplesByVcenter); err != nil { + t.Fatalf("insertDailyAggregates failed: %v", err) + } + + oldRows, err := loadDailySummaryRows(ctx, dbConn, oldSummaryTable) + if err != nil { + t.Fatalf("failed to load old daily rows: %v", err) + } + newRows, err := loadDailySummaryRows(ctx, dbConn, newSummaryTable) + if err != nil { + t.Fatalf("failed to load new daily rows: %v", err) + } + assertDailySummaryParity(t, oldRows, newRows) + + byKey := mapRowsByKeyDaily(newRows) + alpha := byKey["vc-a|vm-1|uuid-1|vm-alpha"] + if !approxEqual(alpha.AvgIsPresent, 2.0/3.0, 1e-9) { + t.Fatalf("unexpected alpha AvgIsPresent: got %.12f want %.12f", alpha.AvgIsPresent, 2.0/3.0) + } + if alpha.CreationTime != dayStart.Add(30*time.Minute).Unix() { + t.Fatalf("unexpected alpha CreationTime: got %d want %d", alpha.CreationTime, dayStart.Add(30*time.Minute).Unix()) + } + if alpha.ResourcePool != "Gold" { + t.Fatalf("unexpected alpha ResourcePool: got %q want %q", alpha.ResourcePool, "Gold") + } + if alpha.SnapshotTime != t3 { + t.Fatalf("unexpected alpha SnapshotTime: got %d want %d", alpha.SnapshotTime, t3) + } + if !approxEqual(alpha.PoolTinPct, 50.0, 1e-9) || !approxEqual(alpha.PoolGoldPct, 50.0, 1e-9) { + t.Fatalf("unexpected alpha pool mix: tin=%.6f gold=%.6f", alpha.PoolTinPct, alpha.PoolGoldPct) + } + + bravo := byKey["vc-a|vm-2|uuid-2|vm-bravo"] + if bravo.DeletionTime != dayStart.Add(4*time.Hour).Unix() { + t.Fatalf("unexpected bravo DeletionTime: got %d want %d", bravo.DeletionTime, dayStart.Add(4*time.Hour).Unix()) + } + if !approxEqual(bravo.AvgIsPresent, 1.0/3.0, 1e-9) { + t.Fatalf("unexpected bravo AvgIsPresent: got %.12f want %.12f", bravo.AvgIsPresent, 1.0/3.0) + } +} + +func TestMonthlyGoldenParity_SQLDailyUnionVsGoDailyRollup(t *testing.T) { + ctx := context.Background() + dbConn := newTasksTestDB(t) + task := newTasksTestCronTask(dbConn) + + if err := db.EnsureVmDailyRollup(ctx, dbConn); err != nil { + t.Fatalf("failed to ensure vm_daily_rollup: %v", err) + } + + monthStart := time.Date(2026, time.February, 1, 0, 0, 0, 0, time.UTC) + monthEnd := monthStart.AddDate(0, 1, 0) + day1 := time.Date(2026, time.February, 3, 0, 0, 0, 0, time.UTC) + day2 := day1.AddDate(0, 0, 1) + + day1Table, err := dailySummaryTableName(day1) + if err != nil { + t.Fatalf("failed to build day1 table name: %v", err) + } + day2Table, err := dailySummaryTableName(day2) + if err != nil { + t.Fatalf("failed to build day2 table name: %v", err) + } + for _, table := range []string{day1Table, day2Table} { + if err := db.EnsureSummaryTable(ctx, dbConn, table); err != nil { + t.Fatalf("failed to ensure daily summary table %s: %v", table, err) + } + } + + seeds := []dailySeedRow{ + { + SnapshotTime: day1.Unix(), Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1", + ResourcePool: "Bronze", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", + ProvisionedDisk: 100, VcpuCount: 4, RamGB: 8, CreationTime: monthStart.Add(-24 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE", + SamplesPresent: 2, AvgVcpuCount: 3, AvgRamGB: 6, AvgProvisionedDisk: 90, AvgIsPresent: 1.0, + PoolBronzePct: 100, Bronze: 100, + TotalSamples: 2, SumVcpu: 6, SumRam: 12, SumDisk: 180, BronzeHits: 2, + }, + { + SnapshotTime: day2.Unix(), Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1", + ResourcePool: "Tin", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", + ProvisionedDisk: 110, VcpuCount: 2, RamGB: 8, CreationTime: monthStart.Add(-24 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE", + SamplesPresent: 2, AvgVcpuCount: 2, AvgRamGB: 8, AvgProvisionedDisk: 110, AvgIsPresent: 1.0, + PoolTinPct: 100, Tin: 100, + TotalSamples: 2, SumVcpu: 4, SumRam: 16, SumDisk: 220, TinHits: 2, + }, + } + + for _, seed := range seeds { + targetTable := day1Table + if seed.SnapshotTime == day2.Unix() { + targetTable = day2Table + } + if err := insertDailySummarySeedRow(ctx, dbConn, targetTable, seed); err != nil { + t.Fatalf("failed to insert daily summary seed row: %v", err) + } + if err := insertDailyRollupSeedRow(ctx, dbConn, seed); err != nil { + t.Fatalf("failed to insert daily rollup seed row: %v", err) + } + } + + oldMonthlyTable, err := db.SafeTableName("test_monthly_sql_union_summary") + if err != nil { + t.Fatalf("failed to build old monthly table name: %v", err) + } + newMonthlyTable, err := db.SafeTableName("test_monthly_go_rollup_summary") + if err != nil { + t.Fatalf("failed to build new monthly table name: %v", err) + } + if err := db.EnsureSummaryTable(ctx, dbConn, oldMonthlyTable); err != nil { + t.Fatalf("failed to ensure old monthly table: %v", err) + } + if err := db.EnsureSummaryTable(ctx, dbConn, newMonthlyTable); err != nil { + t.Fatalf("failed to ensure new monthly table: %v", err) + } + + unionQuery, err := buildUnionQuery([]string{day1Table, day2Table}, monthlyUnionColumns, templateExclusionFilter()) + if err != nil { + t.Fatalf("failed to build monthly union query: %v", err) + } + insertSQL, err := db.BuildMonthlySummaryInsert(oldMonthlyTable, unionQuery) + if err != nil { + t.Fatalf("failed to build monthly sql insert: %v", err) + } + if _, err := dbConn.ExecContext(ctx, insertSQL); err != nil { + t.Fatalf("failed to execute monthly sql insert: %v", err) + } + + aggMap, err := task.scanDailyRollup(ctx, monthStart, monthEnd) + if err != nil { + t.Fatalf("scanDailyRollup failed: %v", err) + } + if err := task.insertMonthlyAggregates(ctx, newMonthlyTable, aggMap); err != nil { + t.Fatalf("insertMonthlyAggregates failed: %v", err) + } + + oldRows, err := loadMonthlySummaryRows(ctx, dbConn, oldMonthlyTable) + if err != nil { + t.Fatalf("failed to load old monthly rows: %v", err) + } + newRows, err := loadMonthlySummaryRows(ctx, dbConn, newMonthlyTable) + if err != nil { + t.Fatalf("failed to load new monthly rows: %v", err) + } + assertMonthlySummaryParity(t, oldRows, newRows) + + byKey := mapRowsByKeyMonthly(newRows) + alpha := byKey["vc-a|vm-1|uuid-1|vm-alpha"] + if !approxEqual(alpha.AvgVcpuCount, 2.5, 1e-9) { + t.Fatalf("unexpected alpha AvgVcpuCount: got %.6f want %.6f", alpha.AvgVcpuCount, 2.5) + } + if !approxEqual(alpha.AvgIsPresent, 1.0, 1e-9) { + t.Fatalf("unexpected alpha AvgIsPresent: got %.6f want %.6f", alpha.AvgIsPresent, 1.0) + } + if alpha.ResourcePool != "Tin" { + t.Fatalf("unexpected alpha ResourcePool: got %q want %q", alpha.ResourcePool, "Tin") + } + if !approxEqual(alpha.PoolTinPct, 50.0, 1e-9) || !approxEqual(alpha.PoolBronzePct, 50.0, 1e-9) { + t.Fatalf("unexpected alpha monthly pool mix: tin=%.6f bronze=%.6f", alpha.PoolTinPct, alpha.PoolBronzePct) + } +} + +func newTasksTestCronTask(dbConn *sqlx.DB) *CronTask { + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + return &CronTask{ + Logger: logger, + Database: &tasksTestDatabase{dbConn: dbConn, logger: logger}, + } +} + +func insertHourlyCacheSeedRow(ctx context.Context, dbConn *sqlx.DB, row hourlySeedRow) error { + _, err := dbConn.ExecContext(ctx, ` +INSERT INTO vm_hourly_stats ( + "SnapshotTime","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","ResourcePool", + "Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder" +) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) +`, + row.SnapshotTime, row.Vcenter, row.VmID, row.VmUUID, row.Name, row.CreationTime, row.DeletionTime, row.ResourcePool, + row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder, + ) + return err +} + +func insertHourlySnapshotSeedRow(ctx context.Context, dbConn *sqlx.DB, table string, row hourlySeedRow) error { + sql := fmt.Sprintf(` +INSERT INTO %s ( + "Name","Vcenter","VmId","VmUuid","EventKey","CloudId","CreationTime","DeletionTime","ResourcePool", + "Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder","SnapshotTime" +) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) +`, table) + _, err := dbConn.ExecContext(ctx, sql, + row.Name, row.Vcenter, row.VmID, row.VmUUID, nil, nil, row.CreationTime, row.DeletionTime, row.ResourcePool, + row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder, row.SnapshotTime, + ) + return err +} + +func insertDailySummarySeedRow(ctx context.Context, dbConn *sqlx.DB, table string, row dailySeedRow) error { + sql := fmt.Sprintf(` +INSERT INTO %s ( + "Name","Vcenter","VmId","VmUuid","EventKey","CloudId","CreationTime","DeletionTime","ResourcePool", + "Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder", + "SnapshotTime","SamplesPresent","AvgVcpuCount","AvgRamGB","AvgProvisionedDisk","AvgIsPresent", + "PoolTinPct","PoolBronzePct","PoolSilverPct","PoolGoldPct","Tin","Bronze","Silver","Gold" +) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) +`, table) + _, err := dbConn.ExecContext(ctx, sql, + row.Name, row.Vcenter, row.VmID, row.VmUUID, nil, nil, row.CreationTime, row.DeletionTime, row.ResourcePool, + row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder, + row.SnapshotTime, row.SamplesPresent, row.AvgVcpuCount, row.AvgRamGB, row.AvgProvisionedDisk, row.AvgIsPresent, + row.PoolTinPct, row.PoolBronzePct, row.PoolSilverPct, row.PoolGoldPct, row.Tin, row.Bronze, row.Silver, row.Gold, + ) + return err +} + +func insertDailyRollupSeedRow(ctx context.Context, dbConn *sqlx.DB, row dailySeedRow) error { + _, err := dbConn.ExecContext(ctx, ` +INSERT INTO vm_daily_rollup ( + "Date","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","SamplesPresent","TotalSamples", + "SumVcpu","SumRam","SumDisk","TinHits","BronzeHits","SilverHits","GoldHits", + "LastResourcePool","LastDatacenter","LastCluster","LastFolder","LastProvisionedDisk","LastVcpuCount","LastRamGB","IsTemplate","PoweredOn","SrmPlaceholder" +) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) +`, + row.SnapshotTime, row.Vcenter, row.VmID, row.VmUUID, row.Name, row.CreationTime, row.DeletionTime, row.SamplesPresent, row.TotalSamples, + row.SumVcpu, row.SumRam, row.SumDisk, row.TinHits, row.BronzeHits, row.SilverHits, row.GoldHits, + row.ResourcePool, row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder, + ) + return err +} + +func loadDailySummaryRows(ctx context.Context, dbConn *sqlx.DB, table string) ([]dailySummaryRow, error) { + sql := fmt.Sprintf(` +SELECT + COALESCE("Name",'') AS "Name", + COALESCE("Vcenter",'') AS "Vcenter", + COALESCE("VmId",'') AS "VmId", + COALESCE("VmUuid",'') AS "VmUuid", + COALESCE("ResourcePool",'') AS "ResourcePool", + COALESCE("CreationTime",0) AS "CreationTime", + COALESCE("DeletionTime",0) AS "DeletionTime", + COALESCE("SnapshotTime",0) AS "SnapshotTime", + COALESCE("SamplesPresent",0) AS "SamplesPresent", + COALESCE("AvgVcpuCount",0) AS "AvgVcpuCount", + COALESCE("AvgRamGB",0) AS "AvgRamGB", + COALESCE("AvgProvisionedDisk",0) AS "AvgProvisionedDisk", + COALESCE("AvgIsPresent",0) AS "AvgIsPresent", + COALESCE("PoolTinPct",0) AS "PoolTinPct", + COALESCE("PoolBronzePct",0) AS "PoolBronzePct", + COALESCE("PoolSilverPct",0) AS "PoolSilverPct", + COALESCE("PoolGoldPct",0) AS "PoolGoldPct" +FROM %s +ORDER BY "Vcenter", "VmId", "VmUuid", "Name" +`, table) + var out []dailySummaryRow + return out, dbConn.SelectContext(ctx, &out, sql) +} + +func loadMonthlySummaryRows(ctx context.Context, dbConn *sqlx.DB, table string) ([]monthlySummaryRow, error) { + sql := fmt.Sprintf(` +SELECT + COALESCE("Name",'') AS "Name", + COALESCE("Vcenter",'') AS "Vcenter", + COALESCE("VmId",'') AS "VmId", + COALESCE("VmUuid",'') AS "VmUuid", + COALESCE("ResourcePool",'') AS "ResourcePool", + COALESCE("CreationTime",0) AS "CreationTime", + COALESCE("DeletionTime",0) AS "DeletionTime", + COALESCE("SamplesPresent",0) AS "SamplesPresent", + COALESCE("AvgVcpuCount",0) AS "AvgVcpuCount", + COALESCE("AvgRamGB",0) AS "AvgRamGB", + COALESCE("AvgProvisionedDisk",0) AS "AvgProvisionedDisk", + COALESCE("AvgIsPresent",0) AS "AvgIsPresent", + COALESCE("PoolTinPct",0) AS "PoolTinPct", + COALESCE("PoolBronzePct",0) AS "PoolBronzePct", + COALESCE("PoolSilverPct",0) AS "PoolSilverPct", + COALESCE("PoolGoldPct",0) AS "PoolGoldPct" +FROM %s +ORDER BY "Vcenter", "VmId", "VmUuid", "Name" +`, table) + var out []monthlySummaryRow + return out, dbConn.SelectContext(ctx, &out, sql) +} + +func mapRowsByKeyDaily(rows []dailySummaryRow) map[string]dailySummaryRow { + out := make(map[string]dailySummaryRow, len(rows)) + for _, row := range rows { + out[dailyRowKey(row)] = row + } + return out +} + +func mapRowsByKeyMonthly(rows []monthlySummaryRow) map[string]monthlySummaryRow { + out := make(map[string]monthlySummaryRow, len(rows)) + for _, row := range rows { + out[monthlyRowKey(row)] = row + } + return out +} + +func dailyRowKey(r dailySummaryRow) string { + return fmt.Sprintf("%s|%s|%s|%s", r.Vcenter, r.VmId, r.VmUuid, r.Name) +} + +func monthlyRowKey(r monthlySummaryRow) string { + return fmt.Sprintf("%s|%s|%s|%s", r.Vcenter, r.VmId, r.VmUuid, r.Name) +} + +func assertDailySummaryParity(t *testing.T, oldRows, newRows []dailySummaryRow) { + t.Helper() + if len(oldRows) != len(newRows) { + t.Fatalf("daily row count mismatch: old=%d new=%d", len(oldRows), len(newRows)) + } + oldByKey := mapRowsByKeyDaily(oldRows) + newByKey := mapRowsByKeyDaily(newRows) + for key, oldRow := range oldByKey { + newRow, ok := newByKey[key] + if !ok { + t.Fatalf("missing key in new daily output: %s", key) + } + if oldRow.ResourcePool != newRow.ResourcePool || + oldRow.CreationTime != newRow.CreationTime || + oldRow.DeletionTime != newRow.DeletionTime || + oldRow.SnapshotTime != newRow.SnapshotTime || + oldRow.SamplesPresent != newRow.SamplesPresent { + t.Fatalf("daily scalar mismatch key=%s old=%+v new=%+v", key, oldRow, newRow) + } + assertFloatClose(t, "AvgVcpuCount", key, oldRow.AvgVcpuCount, newRow.AvgVcpuCount, 1e-9) + assertFloatClose(t, "AvgRamGB", key, oldRow.AvgRamGB, newRow.AvgRamGB, 1e-9) + assertFloatClose(t, "AvgProvisionedDisk", key, oldRow.AvgProvisionedDisk, newRow.AvgProvisionedDisk, 1e-9) + assertFloatClose(t, "AvgIsPresent", key, oldRow.AvgIsPresent, newRow.AvgIsPresent, 1e-9) + assertFloatClose(t, "PoolTinPct", key, oldRow.PoolTinPct, newRow.PoolTinPct, 1e-9) + assertFloatClose(t, "PoolBronzePct", key, oldRow.PoolBronzePct, newRow.PoolBronzePct, 1e-9) + assertFloatClose(t, "PoolSilverPct", key, oldRow.PoolSilverPct, newRow.PoolSilverPct, 1e-9) + assertFloatClose(t, "PoolGoldPct", key, oldRow.PoolGoldPct, newRow.PoolGoldPct, 1e-9) + } +} + +func assertMonthlySummaryParity(t *testing.T, oldRows, newRows []monthlySummaryRow) { + t.Helper() + if len(oldRows) != len(newRows) { + t.Fatalf("monthly row count mismatch: old=%d new=%d", len(oldRows), len(newRows)) + } + oldByKey := mapRowsByKeyMonthly(oldRows) + newByKey := mapRowsByKeyMonthly(newRows) + for key, oldRow := range oldByKey { + newRow, ok := newByKey[key] + if !ok { + t.Fatalf("missing key in new monthly output: %s", key) + } + if oldRow.ResourcePool != newRow.ResourcePool || + oldRow.CreationTime != newRow.CreationTime || + oldRow.DeletionTime != newRow.DeletionTime || + oldRow.SamplesPresent != newRow.SamplesPresent { + t.Fatalf("monthly scalar mismatch key=%s old=%+v new=%+v", key, oldRow, newRow) + } + assertFloatClose(t, "AvgVcpuCount", key, oldRow.AvgVcpuCount, newRow.AvgVcpuCount, 1e-9) + assertFloatClose(t, "AvgRamGB", key, oldRow.AvgRamGB, newRow.AvgRamGB, 1e-9) + assertFloatClose(t, "AvgProvisionedDisk", key, oldRow.AvgProvisionedDisk, newRow.AvgProvisionedDisk, 1e-9) + assertFloatClose(t, "AvgIsPresent", key, oldRow.AvgIsPresent, newRow.AvgIsPresent, 1e-9) + assertFloatClose(t, "PoolTinPct", key, oldRow.PoolTinPct, newRow.PoolTinPct, 1e-9) + assertFloatClose(t, "PoolBronzePct", key, oldRow.PoolBronzePct, newRow.PoolBronzePct, 1e-9) + assertFloatClose(t, "PoolSilverPct", key, oldRow.PoolSilverPct, newRow.PoolSilverPct, 1e-9) + assertFloatClose(t, "PoolGoldPct", key, oldRow.PoolGoldPct, newRow.PoolGoldPct, 1e-9) + } +} + +func assertFloatClose(t *testing.T, field, key string, oldVal, newVal, eps float64) { + t.Helper() + if !approxEqual(oldVal, newVal, eps) { + t.Fatalf("%s mismatch key=%s old=%.12f new=%.12f", field, key, oldVal, newVal) + } +} + +func approxEqual(a, b, eps float64) bool { + return math.Abs(a-b) <= eps +}