Files
vctp2/db/helpers_cache_and_index_test.go
Nathan Coad c66679a71f
All checks were successful
continuous-integration/drone/push Build is passing
more index cleanups to optimise space
2026-02-08 15:40:42 +11:00

165 lines
5.4 KiB
Go

package db
import (
"context"
"database/sql"
"fmt"
"testing"
"time"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite"
)
func newTestSQLiteDB(t *testing.T) *sqlx.DB {
t.Helper()
dbConn, err := sqlx.Open("sqlite", ":memory:")
if err != nil {
t.Fatalf("failed to open sqlite test db: %v", err)
}
t.Cleanup(func() {
_ = dbConn.Close()
})
return dbConn
}
func indexExists(t *testing.T, dbConn *sqlx.DB, name string) bool {
t.Helper()
var count int
if err := dbConn.Get(&count, `SELECT COUNT(1) FROM sqlite_master WHERE type='index' AND name=?`, name); err != nil {
t.Fatalf("failed to query index %s: %v", name, err)
}
return count > 0
}
func TestCleanupHourlySnapshotIndexesOlderThan(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
oldTable := "inventory_hourly_1700000000"
newTable := "inventory_hourly_1800000000"
for _, table := range []string{oldTable, newTable} {
if err := EnsureSnapshotTable(ctx, dbConn, table); err != nil {
t.Fatalf("failed to create snapshot table %s: %v", table, err)
}
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s_snapshottime_idx ON %s ("SnapshotTime")`, table, table)); err != nil {
t.Fatalf("failed to create snapshottime index for %s: %v", table, err)
}
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s_resourcepool_idx ON %s ("ResourcePool")`, table, table)); err != nil {
t.Fatalf("failed to create resourcepool index for %s: %v", table, err)
}
}
cutoff := time.Unix(1750000000, 0)
dropped, err := CleanupHourlySnapshotIndexesOlderThan(ctx, dbConn, cutoff)
if err != nil {
t.Fatalf("cleanup failed: %v", err)
}
if dropped != 3 {
t.Fatalf("expected 3 old indexes dropped, got %d", dropped)
}
oldIndexes := []string{
oldTable + "_vm_vcenter_idx",
oldTable + "_snapshottime_idx",
oldTable + "_resourcepool_idx",
}
for _, idx := range oldIndexes {
if indexExists(t, dbConn, idx) {
t.Fatalf("expected old index %s to be removed", idx)
}
}
newIndexes := []string{
newTable + "_vm_vcenter_idx",
newTable + "_snapshottime_idx",
newTable + "_resourcepool_idx",
}
for _, idx := range newIndexes {
if !indexExists(t, dbConn, idx) {
t.Fatalf("expected recent index %s to remain", idx)
}
}
}
func TestFetchVmTraceAndLifecycleUseCacheTables(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if err := EnsureVmHourlyStats(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_hourly_stats: %v", err)
}
if err := EnsureVmLifecycleCache(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_lifecycle_cache: %v", err)
}
insertSQL := `
INSERT INTO vm_hourly_stats (
"SnapshotTime","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`
rows := [][]interface{}{
{int64(1000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(900), int64(0), "Tin", "dc", "cluster", "folder", 100.0, int64(2), int64(4), "FALSE", "TRUE", "FALSE"},
{int64(2000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(900), int64(0), "Gold", "dc", "cluster", "folder", 150.0, int64(4), int64(8), "FALSE", "TRUE", "FALSE"},
}
for _, args := range rows {
if _, err := dbConn.ExecContext(ctx, insertSQL, args...); err != nil {
t.Fatalf("failed to insert hourly cache row: %v", err)
}
}
if err := UpsertVmLifecycleCache(ctx, dbConn, "vc-a", "vm-1", "uuid-1", "demo-vm", "cluster", time.Unix(1000, 0), sql.NullInt64{Int64: 900, Valid: true}); err != nil {
t.Fatalf("failed to upsert lifecycle cache: %v", err)
}
if err := MarkVmDeletedWithDetails(ctx, dbConn, "vc-a", "vm-1", "uuid-1", "demo-vm", "cluster", 2500); err != nil {
t.Fatalf("failed to mark vm deleted: %v", err)
}
traceRows, err := FetchVmTrace(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmTrace failed: %v", err)
}
if len(traceRows) != 2 {
t.Fatalf("expected 2 trace rows, got %d", len(traceRows))
}
if traceRows[0].SnapshotTime != 1000 || traceRows[1].SnapshotTime != 2000 {
t.Fatalf("trace rows are not sorted by snapshot time: %#v", traceRows)
}
lifecycle, err := FetchVmLifecycle(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmLifecycle failed: %v", err)
}
if lifecycle.FirstSeen != 900 {
t.Fatalf("expected FirstSeen=900 (earliest known from lifecycle cache), got %d", lifecycle.FirstSeen)
}
if lifecycle.LastSeen != 2000 {
t.Fatalf("expected LastSeen=2000, got %d", lifecycle.LastSeen)
}
if lifecycle.CreationTime != 900 || lifecycle.CreationApprox {
t.Fatalf("expected exact CreationTime=900, got time=%d approx=%v", lifecycle.CreationTime, lifecycle.CreationApprox)
}
if lifecycle.DeletionTime != 2500 {
t.Fatalf("expected DeletionTime=2500 from lifecycle cache, got %d", lifecycle.DeletionTime)
}
}
func TestParseHourlySnapshotUnix(t *testing.T) {
cases := []struct {
table string
ok bool
val int64
}{
{table: "inventory_hourly_1700000000", ok: true, val: 1700000000},
{table: "inventory_hourly_bad", ok: false, val: 0},
{table: "inventory_daily_summary_20260101", ok: false, val: 0},
}
for _, tc := range cases {
got, ok := parseHourlySnapshotUnix(tc.table)
if ok != tc.ok || got != tc.val {
t.Fatalf("parseHourlySnapshotUnix(%q) = (%d,%v), expected (%d,%v)", tc.table, got, ok, tc.val, tc.ok)
}
}
}