Files
vctp2/internal/report/snapshots.go
Nathan Coad 1f2783fc86
All checks were successful
continuous-integration/drone/push Build is passing
fix
2026-01-28 13:14:05 +11:00

1688 lines
53 KiB
Go

package report
import (
"bytes"
"context"
"database/sql"
"fmt"
"log/slog"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
"github.com/xuri/excelize/v2"
)
type SnapshotRecord struct {
TableName string
SnapshotTime time.Time
SnapshotType string
SnapshotCount int64
}
type SnapshotMigrationStats struct {
HourlyRenamed int
HourlyRegistered int
DailyRegistered int
MonthlyRegistered int
Errors int
}
func ListTablesByPrefix(ctx context.Context, database db.Database, prefix string) ([]string, error) {
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
pattern := prefix + "%"
var rows *sqlx.Rows
var err error
switch driver {
case "sqlite":
rows, err = dbConn.QueryxContext(ctx, `
SELECT name
FROM sqlite_master
WHERE type = 'table'
AND name LIKE ?
ORDER BY name DESC
`, pattern)
case "pgx", "postgres":
rows, err = dbConn.QueryxContext(ctx, `
SELECT tablename
FROM pg_catalog.pg_tables
WHERE schemaname = 'public'
AND tablename LIKE $1
ORDER BY tablename DESC
`, pattern)
default:
return nil, fmt.Errorf("unsupported driver for listing tables: %s", driver)
}
if err != nil {
return nil, err
}
defer rows.Close()
tables := make([]string, 0)
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
tables = append(tables, name)
}
return tables, rows.Err()
}
func EnsureSnapshotRegistry(ctx context.Context, database db.Database) error {
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
switch driver {
case "sqlite":
_, err := dbConn.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS snapshot_registry (
id INTEGER PRIMARY KEY AUTOINCREMENT,
snapshot_type TEXT NOT NULL,
table_name TEXT NOT NULL UNIQUE,
snapshot_time BIGINT NOT NULL,
snapshot_count BIGINT NOT NULL DEFAULT 0
)
`)
if err != nil {
return err
}
_, err = dbConn.ExecContext(ctx, `ALTER TABLE snapshot_registry ADD COLUMN snapshot_count BIGINT NOT NULL DEFAULT 0`)
if err != nil && !strings.Contains(strings.ToLower(err.Error()), "duplicate column name") {
slog.Warn("failed to add snapshot_count column", "error", err)
return err
}
if _, err := dbConn.ExecContext(ctx, `CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time)`); err != nil {
slog.Warn("failed to create snapshot_registry index", "error", err)
}
if _, err := dbConn.ExecContext(ctx, `CREATE INDEX IF NOT EXISTS idx_snapshot_registry_table_name ON snapshot_registry (table_name)`); err != nil {
slog.Warn("failed to create snapshot_registry index", "error", err)
}
return nil
case "pgx", "postgres":
_, err := dbConn.ExecContext(ctx, `
CREATE TABLE IF NOT EXISTS snapshot_registry (
id BIGSERIAL PRIMARY KEY,
snapshot_type TEXT NOT NULL,
table_name TEXT NOT NULL UNIQUE,
snapshot_time BIGINT NOT NULL,
snapshot_count BIGINT NOT NULL DEFAULT 0
)
`)
if err != nil {
return err
}
_, err = dbConn.ExecContext(ctx, `ALTER TABLE snapshot_registry ADD COLUMN snapshot_count BIGINT NOT NULL DEFAULT 0`)
if err != nil && !strings.Contains(strings.ToLower(err.Error()), "column \"snapshot_count\" of relation \"snapshot_registry\" already exists") {
slog.Warn("failed to add snapshot_count column", "error", err)
return err
}
if _, err := dbConn.ExecContext(ctx, `CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time DESC)`); err != nil {
slog.Warn("failed to create snapshot_registry index", "error", err)
}
if _, err := dbConn.ExecContext(ctx, `CREATE INDEX IF NOT EXISTS idx_snapshot_registry_table_name ON snapshot_registry (table_name)`); err != nil {
slog.Warn("failed to create snapshot_registry index", "error", err)
}
return nil
default:
return fmt.Errorf("unsupported driver for snapshot registry: %s", driver)
}
}
func MigrateSnapshotRegistry(ctx context.Context, database db.Database) (SnapshotMigrationStats, error) {
stats := SnapshotMigrationStats{}
if err := EnsureSnapshotRegistry(ctx, database); err != nil {
return stats, err
}
dbConn := database.DB()
if _, err := dbConn.ExecContext(ctx, `DELETE FROM snapshot_registry`); err != nil {
return stats, fmt.Errorf("unable to clear snapshot registry: %w", err)
}
allTables, err := ListTablesByPrefix(ctx, database, "inventory_")
if err != nil {
return stats, err
}
tableSet := make(map[string]struct{}, len(allTables))
for _, table := range allTables {
tableSet[table] = struct{}{}
}
hourlyTables, err := ListTablesByPrefix(ctx, database, "inventory_hourly_")
if err != nil {
return stats, err
}
for _, table := range hourlyTables {
snapshotTime, err := latestSnapshotTime(ctx, dbConn, table)
if err != nil {
stats.Errors++
continue
}
if snapshotTime.IsZero() {
suffix := strings.TrimPrefix(table, "inventory_hourly_")
if parsed, parseErr := time.Parse("200601021504", suffix); parseErr == nil {
// Name encoded with date+hour+minute (e.g., 15-minute cadence)
snapshotTime = parsed
} else if parsed, parseErr := time.Parse("2006010215", suffix); parseErr == nil {
// Legacy hour-only encoding
snapshotTime = parsed
} else if epoch, parseErr := strconv.ParseInt(suffix, 10, 64); parseErr == nil {
snapshotTime = time.Unix(epoch, 0)
}
}
if snapshotTime.IsZero() {
stats.Errors++
continue
}
newName := fmt.Sprintf("inventory_hourly_%d", snapshotTime.Unix())
if newName != table {
if _, exists := tableSet[newName]; exists {
stats.Errors++
continue
}
if err := renameTable(ctx, dbConn, table, newName); err != nil {
stats.Errors++
continue
}
delete(tableSet, table)
tableSet[newName] = struct{}{}
table = newName
stats.HourlyRenamed++
}
rowCount, _ := db.TableRowCount(ctx, dbConn, table)
if err := RegisterSnapshot(ctx, database, "hourly", table, snapshotTime, rowCount); err != nil {
stats.Errors++
continue
}
stats.HourlyRegistered++
}
dailyTables, err := ListTablesByPrefix(ctx, database, "inventory_daily_summary_")
if err != nil {
return stats, err
}
for _, table := range dailyTables {
suffix := strings.TrimPrefix(table, "inventory_daily_summary_")
parsed, err := time.Parse("20060102", suffix)
if err != nil {
stats.Errors++
continue
}
rowCount, _ := db.TableRowCount(ctx, dbConn, table)
if err := RegisterSnapshot(ctx, database, "daily", table, parsed, rowCount); err != nil {
stats.Errors++
continue
}
stats.DailyRegistered++
}
monthlyTables, err := ListTablesByPrefix(ctx, database, "inventory_monthly_summary_")
if err != nil {
return stats, err
}
for _, table := range monthlyTables {
suffix := strings.TrimPrefix(table, "inventory_monthly_summary_")
parsed, err := time.Parse("200601", suffix)
if err != nil {
stats.Errors++
continue
}
rowCount, _ := db.TableRowCount(ctx, dbConn, table)
if err := RegisterSnapshot(ctx, database, "monthly", table, parsed, rowCount); err != nil {
stats.Errors++
continue
}
stats.MonthlyRegistered++
}
if stats.Errors > 0 {
return stats, fmt.Errorf("migration completed with %d error(s)", stats.Errors)
}
return stats, nil
}
func RegisterSnapshot(ctx context.Context, database db.Database, snapshotType string, tableName string, snapshotTime time.Time, snapshotCount int64) error {
if snapshotType == "" || tableName == "" {
return fmt.Errorf("snapshot type or table name is empty")
}
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
start := time.Now()
slog.Debug("snapshot registry upsert start",
"type", snapshotType,
"table", tableName,
"snapshot_time", snapshotTime.Unix(),
"row_count", snapshotCount,
)
var err error
switch driver {
case "sqlite":
_, err = dbConn.ExecContext(ctx, `
INSERT INTO snapshot_registry (snapshot_type, table_name, snapshot_time, snapshot_count)
VALUES (?, ?, ?, ?)
ON CONFLICT(table_name) DO UPDATE SET
snapshot_time = excluded.snapshot_time,
snapshot_type = excluded.snapshot_type,
snapshot_count = excluded.snapshot_count
`, snapshotType, tableName, snapshotTime.Unix(), snapshotCount)
case "pgx", "postgres":
_, err = dbConn.ExecContext(ctx, `
INSERT INTO snapshot_registry (snapshot_type, table_name, snapshot_time, snapshot_count)
VALUES ($1, $2, $3, $4)
ON CONFLICT (table_name) DO UPDATE SET
snapshot_time = EXCLUDED.snapshot_time,
snapshot_type = EXCLUDED.snapshot_type,
snapshot_count = EXCLUDED.snapshot_count
`, snapshotType, tableName, snapshotTime.Unix(), snapshotCount)
default:
return fmt.Errorf("unsupported driver for snapshot registry: %s", driver)
}
if err != nil {
slog.Warn("snapshot registry upsert failed",
"type", snapshotType,
"table", tableName,
"duration", time.Since(start),
"error", err,
)
return err
}
slog.Debug("snapshot registry upsert complete",
"type", snapshotType,
"table", tableName,
"duration", time.Since(start),
)
return nil
}
func DeleteSnapshotRecord(ctx context.Context, database db.Database, tableName string) error {
if tableName == "" {
return nil
}
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
switch driver {
case "sqlite":
_, err := dbConn.ExecContext(ctx, `DELETE FROM snapshot_registry WHERE table_name = ?`, tableName)
return err
case "pgx", "postgres":
_, err := dbConn.ExecContext(ctx, `DELETE FROM snapshot_registry WHERE table_name = $1`, tableName)
return err
default:
return fmt.Errorf("unsupported driver for snapshot registry: %s", driver)
}
}
func ListSnapshots(ctx context.Context, database db.Database, snapshotType string) ([]SnapshotRecord, error) {
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
var rows *sqlx.Rows
var err error
switch driver {
case "sqlite":
rows, err = dbConn.QueryxContext(ctx, `
SELECT table_name, snapshot_time, snapshot_type, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = ?
ORDER BY snapshot_time DESC, table_name DESC
`, snapshotType)
case "pgx", "postgres":
rows, err = dbConn.QueryxContext(ctx, `
SELECT table_name, snapshot_time, snapshot_type, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = $1
ORDER BY snapshot_time DESC, table_name DESC
`, snapshotType)
default:
return nil, fmt.Errorf("unsupported driver for listing snapshots: %s", driver)
}
if err != nil {
return nil, err
}
defer rows.Close()
records := make([]SnapshotRecord, 0)
for rows.Next() {
var (
tableName string
snapshotTime int64
recordType string
snapshotCnt int64
)
if err := rows.Scan(&tableName, &snapshotTime, &recordType, &snapshotCnt); err != nil {
return nil, err
}
records = append(records, SnapshotRecord{
TableName: tableName,
SnapshotTime: time.Unix(snapshotTime, 0),
SnapshotType: recordType,
SnapshotCount: snapshotCnt,
})
}
return records, rows.Err()
}
func ListSnapshotsByRange(ctx context.Context, database db.Database, snapshotType string, start time.Time, end time.Time) ([]SnapshotRecord, error) {
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
startUnix := start.Unix()
endUnix := end.Unix()
loc := start.Location()
var rows *sqlx.Rows
var err error
switch driver {
case "sqlite":
rows, err = dbConn.QueryxContext(ctx, `
SELECT table_name, snapshot_time, snapshot_type, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = ?
AND snapshot_time >= ?
AND snapshot_time < ?
ORDER BY snapshot_time ASC, table_name ASC
`, snapshotType, startUnix, endUnix)
case "pgx", "postgres":
rows, err = dbConn.QueryxContext(ctx, `
SELECT table_name, snapshot_time, snapshot_type, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = $1
AND snapshot_time >= $2
AND snapshot_time < $3
ORDER BY snapshot_time ASC, table_name ASC
`, snapshotType, startUnix, endUnix)
default:
return nil, fmt.Errorf("unsupported driver for listing snapshots: %s", driver)
}
if err != nil {
return nil, err
}
defer rows.Close()
records := make([]SnapshotRecord, 0)
for rows.Next() {
var (
tableName string
snapshotTime int64
recordType string
snapshotCnt int64
)
if err := rows.Scan(&tableName, &snapshotTime, &recordType, &snapshotCnt); err != nil {
return nil, err
}
records = append(records, SnapshotRecord{
TableName: tableName,
SnapshotTime: time.Unix(snapshotTime, 0).In(loc),
SnapshotType: recordType,
SnapshotCount: snapshotCnt,
})
}
return records, rows.Err()
}
func SnapshotRecordsWithFallback(ctx context.Context, database db.Database, snapshotType, prefix, layout string, start, end time.Time) ([]SnapshotRecord, error) {
records, err := ListSnapshotsByRange(ctx, database, snapshotType, start, end)
if err == nil && len(records) > 0 {
return records, nil
}
fallback, err2 := recordsFromTableNames(ctx, database, snapshotType, prefix, layout, start, end)
if err2 != nil {
if err != nil {
return nil, err
}
return nil, err2
}
if len(fallback) > 0 {
return fallback, nil
}
return records, err
}
func recordsFromTableNames(ctx context.Context, database db.Database, snapshotType, prefix, layout string, start, end time.Time) ([]SnapshotRecord, error) {
tables, err := ListTablesByPrefix(ctx, database, prefix)
if err != nil {
return nil, err
}
records := make([]SnapshotRecord, 0, len(tables))
for _, table := range tables {
if !strings.HasPrefix(table, prefix) {
continue
}
suffix := strings.TrimPrefix(table, prefix)
var ts time.Time
switch layout {
case "epoch":
val, err := strconv.ParseInt(suffix, 10, 64)
if err != nil {
continue
}
ts = time.Unix(val, 0)
default:
parsed, err := time.ParseInLocation(layout, suffix, time.Local)
if err != nil {
continue
}
ts = parsed
}
if !ts.Before(start) && ts.Before(end) {
records = append(records, SnapshotRecord{
TableName: table,
SnapshotTime: ts,
SnapshotType: snapshotType,
// Unknown row count when snapshot_registry isn't available.
SnapshotCount: -1,
})
}
}
sort.Slice(records, func(i, j int) bool {
return records[i].SnapshotTime.Before(records[j].SnapshotTime)
})
return records, nil
}
func LatestSnapshotTime(ctx context.Context, database db.Database, snapshotType string) (time.Time, error) {
dbConn := database.DB()
driver := strings.ToLower(dbConn.DriverName())
var maxTime sql.NullInt64
switch driver {
case "sqlite":
if err := dbConn.GetContext(ctx, &maxTime, `
SELECT MAX(snapshot_time)
FROM snapshot_registry
WHERE snapshot_type = ?
`, snapshotType); err != nil {
return time.Time{}, err
}
case "pgx", "postgres":
if err := dbConn.GetContext(ctx, &maxTime, `
SELECT MAX(snapshot_time)
FROM snapshot_registry
WHERE snapshot_type = $1
`, snapshotType); err != nil {
return time.Time{}, err
}
default:
return time.Time{}, fmt.Errorf("unsupported driver for listing snapshots: %s", driver)
}
if !maxTime.Valid || maxTime.Int64 <= 0 {
return time.Time{}, nil
}
return time.Unix(maxTime.Int64, 0), nil
}
func FormatSnapshotLabel(snapshotType string, snapshotTime time.Time, tableName string) string {
switch snapshotType {
case "hourly":
return snapshotTime.Format("2006-01-02 15:04")
case "daily":
return snapshotTime.Format("2006-01-02")
case "monthly":
return snapshotTime.Format("2006-01")
default:
return tableName
}
}
func CreateTableReport(logger *slog.Logger, Database db.Database, ctx context.Context, tableName string) ([]byte, error) {
if err := db.ValidateTableName(tableName); err != nil {
return nil, err
}
start := time.Now()
logger.Debug("Create table report start", "table", tableName)
dbConn := Database.DB()
if strings.HasPrefix(tableName, "inventory_daily_summary_") || strings.HasPrefix(tableName, "inventory_monthly_summary_") {
if err := ensureSummaryReportColumns(ctx, dbConn, tableName); err != nil {
logger.Warn("Unable to ensure summary columns for report", "error", err, "table", tableName)
}
}
columns, err := tableColumns(ctx, dbConn, tableName)
if err != nil {
logger.Warn("Failed to load report columns", "table", tableName, "error", err)
return nil, err
}
if len(columns) == 0 {
return nil, fmt.Errorf("no columns found for table %s", tableName)
}
logger.Debug("Report columns loaded", "table", tableName, "columns", len(columns))
isHourlySnapshot := strings.HasPrefix(tableName, "inventory_hourly_")
isDailySummary := strings.HasPrefix(tableName, "inventory_daily_summary_")
isMonthlySummary := strings.HasPrefix(tableName, "inventory_monthly_summary_")
hideInventoryID := isHourlySnapshot || isDailySummary || isMonthlySummary
hideRowID := isHourlySnapshot || isDailySummary || isMonthlySummary
humanizeTimes := isDailySummary || isMonthlySummary
applyTemplateFilter := isHourlySnapshot || isDailySummary || isMonthlySummary
type columnSpec struct {
Name string
SourceIndex int
Humanize bool
}
specs := make([]columnSpec, 0, len(columns)+2)
columnIndex := make(map[string]int, len(columns))
for i, columnName := range columns {
columnIndex[strings.ToLower(columnName)] = i
}
used := make(map[string]struct{}, len(columns))
addSpec := func(columnName string, sourceIndex int) {
if hideInventoryID && strings.EqualFold(columnName, "InventoryId") {
return
}
if hideRowID && strings.EqualFold(columnName, "RowId") {
return
}
if strings.EqualFold(columnName, "VmType") {
return
}
if (isDailySummary || isMonthlySummary) && (strings.EqualFold(columnName, "EventKey") || strings.EqualFold(columnName, "CloudId")) {
return
}
if (isDailySummary || isMonthlySummary) && strings.EqualFold(columnName, "Gold") {
return
}
specs = append(specs, columnSpec{Name: columnName, SourceIndex: sourceIndex})
if humanizeTimes && columnName == "CreationTime" {
specs = append(specs, columnSpec{Name: "CreationTimeReadable", SourceIndex: sourceIndex, Humanize: true})
}
if humanizeTimes && columnName == "DeletionTime" {
specs = append(specs, columnSpec{Name: "DeletionTimeReadable", SourceIndex: sourceIndex, Humanize: true})
}
}
if isDailySummary || isMonthlySummary {
for _, columnName := range summaryReportOrder() {
if idx, ok := columnIndex[strings.ToLower(columnName)]; ok {
addSpec(columnName, idx)
used[strings.ToLower(columnName)] = struct{}{}
} else {
logger.Warn("Summary report column missing from table", "table", tableName, "column", columnName)
addSpec(columnName, -1)
}
}
} else {
for i, columnName := range columns {
if _, ok := used[strings.ToLower(columnName)]; ok {
continue
}
addSpec(columnName, i)
}
}
query := fmt.Sprintf(`SELECT * FROM %s`, tableName)
if applyTemplateFilter && hasColumn(columns, "IsTemplate") {
query = fmt.Sprintf(`%s WHERE %s`, query, templateExclusionFilter())
}
orderBy := snapshotOrderBy(columns)
orderDir := "DESC"
isHourly := strings.HasPrefix(tableName, "inventory_hourly_")
isDailySummaryTable := strings.HasPrefix(tableName, "inventory_daily_summary_")
isMonthlySummaryTable := strings.HasPrefix(tableName, "inventory_monthly_summary_")
if hasColumn(columns, "CreationTime") && (isHourly || isDailySummaryTable || isMonthlySummaryTable) {
// Sort inventory rows by creation time ascending so newest VMs appear last.
orderBy = "CreationTime"
orderDir = "ASC"
}
if orderBy != "" {
query = fmt.Sprintf(`%s ORDER BY "%s" %s`, query, orderBy, orderDir)
}
logger.Debug("Report query prepared", "table", tableName, "order_by", orderBy, "order_dir", orderDir, "template_filter", applyTemplateFilter)
rows, err := dbConn.QueryxContext(ctx, query)
if err != nil {
logger.Warn("Report query failed", "table", tableName, "error", err)
return nil, err
}
defer rows.Close()
sheetName := "Snapshot Report"
var buffer bytes.Buffer
xlsx := excelize.NewFile()
if err := xlsx.SetSheetName("Sheet1", sheetName); err != nil {
return nil, err
}
if err := xlsx.SetDocProps(&excelize.DocProperties{
Creator: "vctp",
Created: time.Now().Format(time.RFC3339),
}); err != nil {
logger.Error("Error setting document properties", "error", err, "sheet_name", sheetName)
}
for i, spec := range specs {
cell, err := excelize.CoordinatesToCellName(i+1, 1)
if err != nil {
logger.Error("Error determining header cell", "error", err, "column", i+1)
continue
}
xlsx.SetCellValue(sheetName, cell, spec.Name)
}
if endCell, err := excelize.CoordinatesToCellName(len(specs), 1); err == nil {
filterRange := "A1:" + endCell
if err := xlsx.AutoFilter(sheetName, filterRange, nil); err != nil {
logger.Error("Error setting autofilter", "error", err)
}
}
headerStyle, err := xlsx.NewStyle(&excelize.Style{
Font: &excelize.Font{
Bold: true,
},
})
if err == nil {
if err := xlsx.SetRowStyle(sheetName, 1, 1, headerStyle); err != nil {
logger.Error("Error setting header style", "error", err)
}
}
rowIndex := 2
for rows.Next() {
values, err := scanRowValues(rows, len(columns))
if err != nil {
logger.Warn("Report row scan failed", "table", tableName, "error", err)
return nil, err
}
for colIndex, spec := range specs {
cell, err := excelize.CoordinatesToCellName(colIndex+1, rowIndex)
if err != nil {
logger.Error("Error determining data cell", "error", err, "column", colIndex+1, "row", rowIndex)
continue
}
if spec.SourceIndex < 0 || spec.SourceIndex >= len(values) {
xlsx.SetCellValue(sheetName, cell, "")
continue
}
value := values[spec.SourceIndex]
if spec.Humanize {
xlsx.SetCellValue(sheetName, cell, formatEpochHuman(value))
} else {
xlsx.SetCellValue(sheetName, cell, normalizeCellValue(value))
}
}
rowIndex++
}
if err := rows.Err(); err != nil {
logger.Warn("Report row iteration failed", "table", tableName, "error", err)
return nil, err
}
rowCount := rowIndex - 2
logger.Debug("Report rows populated", "table", tableName, "rows", rowCount)
if err := xlsx.SetPanes(sheetName, &excelize.Panes{
Freeze: true,
Split: false,
XSplit: 0,
YSplit: 1,
TopLeftCell: "A2",
ActivePane: "bottomLeft",
Selection: []excelize.Selection{
{SQRef: "A2", ActiveCell: "A2", Pane: "bottomLeft"},
},
}); err != nil {
logger.Error("Error freezing top row", "error", err)
}
if err := SetColAutoWidth(xlsx, sheetName); err != nil {
logger.Error("Error setting auto width", "error", err)
}
if isDailySummary || isMonthlySummary {
addReportMetadataSheet(logger, xlsx)
}
addTotalsChartSheet(logger, Database, ctx, xlsx, tableName)
logger.Debug("Report charts complete", "table", tableName)
if index, err := xlsx.GetSheetIndex(sheetName); err == nil {
xlsx.SetActiveSheet(index)
}
if err := xlsx.Write(&buffer); err != nil {
logger.Warn("Report write failed", "table", tableName, "error", err)
return nil, err
}
logger.Debug("Create table report complete", "table", tableName, "rows", rowCount, "bytes", buffer.Len(), "duration", time.Since(start))
return buffer.Bytes(), nil
}
// SaveTableReport renders a table report and writes it to the destination directory with a .xlsx extension.
func SaveTableReport(logger *slog.Logger, Database db.Database, ctx context.Context, tableName, destDir string) (string, error) {
if err := db.ValidateTableName(tableName); err != nil {
return "", err
}
if strings.TrimSpace(destDir) == "" {
return "", fmt.Errorf("destination directory is empty")
}
start := time.Now()
logger.Debug("Save table report start", "table", tableName, "dest", destDir)
if err := os.MkdirAll(destDir, 0o755); err != nil {
logger.Warn("Report directory create failed", "table", tableName, "dest", destDir, "error", err)
return "", fmt.Errorf("failed to create reports directory: %w", err)
}
logger.Debug("Report directory ready", "dest", destDir)
data, err := CreateTableReport(logger, Database, ctx, tableName)
if err != nil {
logger.Warn("Report render failed", "table", tableName, "error", err)
return "", err
}
logger.Debug("Report rendered", "table", tableName, "bytes", len(data))
filename := filepath.Join(destDir, fmt.Sprintf("%s.xlsx", tableName))
if err := os.WriteFile(filename, data, 0o644); err != nil {
logger.Warn("Report write failed", "table", tableName, "file", filename, "error", err)
return "", err
}
logger.Debug("Save table report complete", "table", tableName, "file", filename, "duration", time.Since(start))
return filename, nil
}
func addTotalsChartSheet(logger *slog.Logger, database db.Database, ctx context.Context, xlsx *excelize.File, tableName string) {
if strings.HasPrefix(tableName, "inventory_daily_summary_") {
suffix := strings.TrimPrefix(tableName, "inventory_daily_summary_")
dayStart, err := time.ParseInLocation("20060102", suffix, time.Local)
if err != nil {
return
}
dayEnd := dayStart.AddDate(0, 0, 1)
if err := EnsureSnapshotRegistry(ctx, database); err != nil {
return
}
records, err := SnapshotRecordsWithFallback(ctx, database, "hourly", "inventory_hourly_", "epoch", dayStart, dayEnd.Add(2*time.Hour))
if err != nil || len(records) == 0 {
return
}
points, err := buildHourlyTotals(ctx, logger, database.DB(), records, dayStart, dayEnd)
if err != nil || len(points) == 0 {
return
}
writeTotalsChart(logger, xlsx, "Hourly Totals", points)
return
}
if strings.HasPrefix(tableName, "inventory_monthly_summary_") {
suffix := strings.TrimPrefix(tableName, "inventory_monthly_summary_")
monthStart, err := time.ParseInLocation("200601", suffix, time.Local)
if err != nil {
return
}
monthEnd := monthStart.AddDate(0, 1, 0)
if err := EnsureSnapshotRegistry(ctx, database); err != nil {
return
}
records, err := SnapshotRecordsWithFallback(ctx, database, "daily", "inventory_daily_summary_", "20060102", monthStart, monthEnd)
if err != nil || len(records) == 0 {
return
}
points, err := buildDailyTotals(ctx, database.DB(), records, true)
if err != nil || len(points) == 0 {
return
}
writeTotalsChart(logger, xlsx, "Daily Totals", points)
}
}
func tableColumns(ctx context.Context, dbConn *sqlx.DB, tableName string) ([]string, error) {
driver := strings.ToLower(dbConn.DriverName())
switch driver {
case "sqlite":
query := fmt.Sprintf(`PRAGMA table_info("%s")`, tableName)
rows, err := dbConn.QueryxContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
columns := make([]string, 0)
for rows.Next() {
var (
cid int
name string
colType string
notNull int
defaultVal sql.NullString
pk int
)
if err := rows.Scan(&cid, &name, &colType, &notNull, &defaultVal, &pk); err != nil {
return nil, err
}
columns = append(columns, name)
}
return columns, rows.Err()
case "pgx", "postgres":
rows, err := dbConn.QueryxContext(ctx, `
SELECT column_name
FROM information_schema.columns
WHERE table_schema = 'public'
AND table_name = $1
ORDER BY ordinal_position
`, tableName)
if err != nil {
return nil, err
}
defer rows.Close()
columns := make([]string, 0)
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, err
}
columns = append(columns, name)
}
return columns, rows.Err()
default:
return nil, fmt.Errorf("unsupported driver for table columns: %s", driver)
}
}
func snapshotOrderBy(columns []string) string {
normalized := make(map[string]struct{}, len(columns))
for _, col := range columns {
normalized[strings.ToLower(col)] = struct{}{}
}
if _, ok := normalized["snapshottime"]; ok {
return "SnapshotTime"
}
if _, ok := normalized["samplespresent"]; ok {
return "SamplesPresent"
}
if _, ok := normalized["avgispresent"]; ok {
return "AvgIsPresent"
}
if _, ok := normalized["name"]; ok {
return "Name"
}
return ""
}
func hasColumn(columns []string, name string) bool {
for _, col := range columns {
if strings.EqualFold(col, name) {
return true
}
}
return false
}
func templateExclusionFilter() string {
return `COALESCE(CAST("IsTemplate" AS TEXT), '') NOT IN ('TRUE', 'true', '1')`
}
func quoteSheetName(name string) string {
escaped := strings.ReplaceAll(name, "'", "''")
return fmt.Sprintf("'%s'", escaped)
}
type columnDef struct {
Name string
Type string
}
func ensureSummaryReportColumns(ctx context.Context, dbConn *sqlx.DB, tableName string) error {
if err := db.ValidateTableName(tableName); err != nil {
return err
}
columns, err := tableColumns(ctx, dbConn, tableName)
if err != nil {
return err
}
existing := make(map[string]struct{}, len(columns))
for _, col := range columns {
existing[strings.ToLower(col)] = struct{}{}
}
for _, column := range summaryReportColumns() {
if _, ok := existing[strings.ToLower(column.Name)]; ok {
continue
}
query := fmt.Sprintf(`ALTER TABLE %s ADD COLUMN "%s" %s`, tableName, column.Name, column.Type)
if _, err := dbConn.ExecContext(ctx, query); err != nil {
return err
}
}
return nil
}
func summaryReportColumns() []columnDef {
return []columnDef{
{Name: "InventoryId", Type: "BIGINT"},
{Name: "Name", Type: "TEXT"},
{Name: "Vcenter", Type: "TEXT"},
{Name: "VmId", Type: "TEXT"},
{Name: "EventKey", Type: "TEXT"},
{Name: "CloudId", Type: "TEXT"},
{Name: "CreationTime", Type: "BIGINT"},
{Name: "DeletionTime", Type: "BIGINT"},
{Name: "ResourcePool", Type: "TEXT"},
{Name: "Datacenter", Type: "TEXT"},
{Name: "Cluster", Type: "TEXT"},
{Name: "Folder", Type: "TEXT"},
{Name: "ProvisionedDisk", Type: "REAL"},
{Name: "VcpuCount", Type: "BIGINT"},
{Name: "RamGB", Type: "BIGINT"},
{Name: "IsTemplate", Type: "TEXT"},
{Name: "PoweredOn", Type: "TEXT"},
{Name: "SrmPlaceholder", Type: "TEXT"},
{Name: "VmUuid", Type: "TEXT"},
{Name: "SamplesPresent", Type: "BIGINT"},
{Name: "AvgVcpuCount", Type: "REAL"},
{Name: "AvgRamGB", Type: "REAL"},
{Name: "AvgProvisionedDisk", Type: "REAL"},
{Name: "AvgIsPresent", Type: "REAL"},
{Name: "PoolTinPct", Type: "REAL"},
{Name: "PoolBronzePct", Type: "REAL"},
{Name: "PoolSilverPct", Type: "REAL"},
{Name: "PoolGoldPct", Type: "REAL"},
{Name: "Tin", Type: "REAL"},
{Name: "Bronze", Type: "REAL"},
{Name: "Silver", Type: "REAL"},
{Name: "Gold", Type: "REAL"},
}
}
func summaryReportOrder() []string {
return []string{
"Name",
"Vcenter",
"VmId",
"CreationTime",
"DeletionTime",
"ResourcePool",
"Datacenter",
"Cluster",
"Folder",
"ProvisionedDisk",
"VcpuCount",
"RamGB",
"IsTemplate",
"PoweredOn",
"SrmPlaceholder",
"VmUuid",
"SamplesPresent",
"AvgVcpuCount",
"AvgRamGB",
"AvgProvisionedDisk",
"AvgIsPresent",
"PoolTinPct",
"PoolBronzePct",
"PoolSilverPct",
"PoolGoldPct",
}
}
func addReportMetadataSheet(logger *slog.Logger, xlsx *excelize.File) {
sheetName := "Metadata"
if _, err := xlsx.NewSheet(sheetName); err != nil {
logger.Error("Error creating metadata sheet", "error", err)
return
}
xlsx.SetCellValue(sheetName, "A1", "ReportGeneratedAt")
xlsx.SetCellValue(sheetName, "B1", time.Now().Format(time.RFC3339))
if err := SetColAutoWidth(xlsx, sheetName); err != nil {
logger.Error("Error setting metadata auto width", "error", err)
}
}
func scanRowValues(rows *sqlx.Rows, columnCount int) ([]interface{}, error) {
rawValues := make([]interface{}, columnCount)
scanArgs := make([]interface{}, columnCount)
for i := range rawValues {
scanArgs[i] = &rawValues[i]
}
if err := rows.Scan(scanArgs...); err != nil {
return nil, err
}
return rawValues, nil
}
func normalizeCellValue(value interface{}) interface{} {
switch v := value.(type) {
case nil:
return ""
case []byte:
return string(v)
case time.Time:
return v.Format(time.RFC3339)
default:
return v
}
}
type totalsPoint struct {
Label string
VmCount float64
VcpuTotal float64
RamTotal float64
PresenceRatio float64
TinTotal float64
BronzeTotal float64
SilverTotal float64
GoldTotal float64
}
func buildHourlyTotals(ctx context.Context, logger *slog.Logger, dbConn *sqlx.DB, records []SnapshotRecord, windowStart, windowEnd time.Time) ([]totalsPoint, error) {
if logger == nil {
logger = slog.Default()
}
if windowEnd.Before(windowStart) {
return nil, fmt.Errorf("hourly totals window end is before start")
}
sort.Slice(records, func(i, j int) bool {
return records[i].SnapshotTime.Before(records[j].SnapshotTime)
})
expectedInterval := estimateSnapshotInterval(records)
maxLag := expectedInterval
if maxLag <= 0 {
maxLag = time.Hour
}
points := make([]totalsPoint, 0, 24)
hourStart := windowStart.Truncate(time.Hour)
if hourStart.Before(windowStart) {
hourStart = hourStart.Add(time.Hour)
}
recordIndex := 0
for hourEnd := hourStart.Add(time.Hour); !hourEnd.After(windowEnd); hourEnd = hourEnd.Add(time.Hour) {
hourWindowStart := hourEnd.Add(-time.Hour)
var selected *SnapshotRecord
selectedIndex := recordIndex
for selectedIndex < len(records) {
record := records[selectedIndex]
if record.SnapshotTime.Before(hourEnd) {
selectedIndex++
continue
}
if record.SnapshotTime.After(hourEnd.Add(maxLag)) {
break
}
if err := db.ValidateTableName(record.TableName); err != nil {
return nil, err
}
if record.SnapshotCount == 0 {
logger.Debug("hourly totals skipping empty snapshot", "table", record.TableName, "snapshot_time", record.SnapshotTime)
selectedIndex++
continue
}
if record.SnapshotCount < 0 {
rowsExist, err := db.TableHasRows(ctx, dbConn, record.TableName)
if err != nil {
logger.Debug("hourly totals snapshot probe failed", "table", record.TableName, "snapshot_time", record.SnapshotTime, "error", err)
}
if err != nil || !rowsExist {
selectedIndex++
continue
}
}
selected = &record
break
}
if selected == nil {
logger.Debug(
"hourly totals missing snapshot for interval",
"interval_start", hourWindowStart.Format("2006-01-02 15:04"),
"interval_end", hourEnd.Format("2006-01-02 15:04"),
"max_lag_seconds", int64(maxLag.Seconds()),
)
recordIndex = selectedIndex
continue
}
var prev *SnapshotRecord
for prevIndex := selectedIndex - 1; prevIndex >= 0; prevIndex-- {
record := records[prevIndex]
if record.SnapshotTime.After(hourEnd) {
continue
}
if err := db.ValidateTableName(record.TableName); err != nil {
return nil, err
}
if record.SnapshotCount == 0 {
continue
}
if record.SnapshotCount < 0 {
rowsExist, err := db.TableHasRows(ctx, dbConn, record.TableName)
if err != nil || !rowsExist {
continue
}
}
prev = &record
break
}
recordIndex = selectedIndex
hourStartUnix := hourWindowStart.Unix()
hourEndUnix := hourEnd.Unix()
durationSeconds := float64(hourEndUnix - hourStartUnix)
prevTableName := selected.TableName
if prev != nil {
prevTableName = prev.TableName
}
startExpr := `CASE WHEN "CreationTime" IS NOT NULL AND "CreationTime" > 0 AND "CreationTime" > ? THEN "CreationTime" ELSE ? END`
endExpr := `CASE WHEN "DeletionTime" IS NOT NULL AND "DeletionTime" > 0 AND "DeletionTime" < ? THEN "DeletionTime" ELSE ? END`
overlapExpr := fmt.Sprintf(`CASE WHEN %s > %s THEN (CAST((%s - %s) AS REAL) / ?) ELSE 0 END`, endExpr, startExpr, endExpr, startExpr)
aggStartExpr := `CASE WHEN creation_time IS NOT NULL AND creation_time > 0 AND creation_time > ? THEN creation_time ELSE ? END`
aggEndExpr := `CASE WHEN deletion_time IS NOT NULL AND deletion_time > 0 AND deletion_time < ? THEN deletion_time ELSE ? END`
aggOverlapExpr := fmt.Sprintf(`CASE WHEN %s > %s THEN (CAST((%s - %s) AS REAL) / ?) ELSE 0 END`, aggEndExpr, aggStartExpr, aggEndExpr, aggStartExpr)
idExpr := `COALESCE(NULLIF("VmId", ''), NULLIF("VmUuid", ''), NULLIF("Name", ''), 'unknown')`
vmKeyExpr := fmt.Sprintf(`(%s || '|' || COALESCE("Vcenter", ''))`, idExpr)
query := fmt.Sprintf(`
WITH base AS (
SELECT
%s AS vm_key,
"VmId",
"VmUuid",
"Name",
"Vcenter",
"VcpuCount",
"RamGB",
LOWER(COALESCE("ResourcePool", '')) AS pool,
NULLIF("CreationTime", 0) AS creation_time,
NULLIF("DeletionTime", 0) AS deletion_time,
%s AS presence
FROM %s
WHERE %s
),
agg AS (
SELECT
vm_key,
MAX("VcpuCount") AS "VcpuCount",
MAX("RamGB") AS "RamGB",
MAX(pool) AS pool,
MIN(creation_time) AS creation_time,
MIN(deletion_time) AS deletion_time
FROM base
GROUP BY vm_key
),
lifecycle AS (
SELECT
(COALESCE(NULLIF("VmId", ''), NULLIF("VmUuid", ''), NULLIF("Name", ''), 'unknown') || '|' || COALESCE("Vcenter", '')) AS vm_key,
MIN(NULLIF("FirstSeen", 0)) AS first_seen,
MIN(NULLIF("DeletedAt", 0)) AS deleted_at
FROM vm_lifecycle_cache
GROUP BY vm_key
),
prev_base AS (
SELECT
%s AS vm_key,
"VcpuCount",
"RamGB",
LOWER(COALESCE("ResourcePool", '')) AS pool,
NULLIF("CreationTime", 0) AS creation_time,
NULLIF("DeletionTime", 0) AS deletion_time
FROM %s
WHERE %s
),
prev_agg AS (
SELECT
vm_key,
MAX("VcpuCount") AS "VcpuCount",
MAX("RamGB") AS "RamGB",
MAX(pool) AS pool,
MIN(creation_time) AS creation_time,
MIN(deletion_time) AS deletion_time
FROM prev_base
GROUP BY vm_key
),
missing_deleted AS (
SELECT
prev_agg.vm_key,
prev_agg."VcpuCount",
prev_agg."RamGB",
prev_agg.pool,
COALESCE(prev_agg.creation_time, lifecycle.first_seen) AS creation_time,
COALESCE(lifecycle.deleted_at, prev_agg.deletion_time) AS deletion_time,
%s AS presence
FROM prev_agg
LEFT JOIN lifecycle ON lifecycle.vm_key = prev_agg.vm_key
LEFT JOIN agg ON agg.vm_key = prev_agg.vm_key
WHERE agg.vm_key IS NULL
AND COALESCE(lifecycle.deleted_at, prev_agg.deletion_time, 0) > 0
AND COALESCE(lifecycle.deleted_at, prev_agg.deletion_time) > ? AND COALESCE(lifecycle.deleted_at, prev_agg.deletion_time) < ?
),
agg_presence AS (
SELECT
agg.vm_key,
agg."VcpuCount",
agg."RamGB",
agg.pool,
COALESCE(agg.creation_time, lifecycle.first_seen) AS creation_time,
COALESCE(lifecycle.deleted_at, agg.deletion_time) AS deletion_time,
%s AS presence
FROM agg
LEFT JOIN lifecycle ON lifecycle.vm_key = agg.vm_key
UNION ALL
SELECT
missing_deleted.vm_key,
missing_deleted."VcpuCount",
missing_deleted."RamGB",
missing_deleted.pool,
missing_deleted.creation_time,
missing_deleted.deletion_time,
missing_deleted.presence
FROM missing_deleted
),
diag AS (
SELECT
COUNT(*) AS row_count,
COUNT(DISTINCT vm_key) AS distinct_keys,
COALESCE(SUM(CASE WHEN vm_key LIKE 'unknown|%%' THEN 1 ELSE 0 END), 0) AS unknown_keys,
COALESCE(SUM(CASE WHEN "VmId" IS NULL OR "VmId" = '' THEN 1 ELSE 0 END), 0) AS missing_vm_id,
COALESCE(SUM(CASE WHEN "VmUuid" IS NULL OR "VmUuid" = '' THEN 1 ELSE 0 END), 0) AS missing_vm_uuid,
COALESCE(SUM(CASE WHEN "Name" IS NULL OR "Name" = '' THEN 1 ELSE 0 END), 0) AS missing_name,
COALESCE(SUM(CASE WHEN presence > 1 THEN 1 ELSE 0 END), 0) AS presence_over_one,
COALESCE(SUM(CASE WHEN presence < 0 THEN 1 ELSE 0 END), 0) AS presence_under_zero,
COALESCE(SUM(presence), 0) AS base_presence_sum
FROM base
),
agg_diag AS (
SELECT
COUNT(*) AS agg_count,
COALESCE(SUM(CASE WHEN creation_time IS NULL OR creation_time = 0 THEN 1 ELSE 0 END), 0) AS missing_creation,
COALESCE(SUM(CASE WHEN deletion_time IS NULL OR deletion_time = 0 THEN 1 ELSE 0 END), 0) AS missing_deletion,
COALESCE(SUM(CASE WHEN creation_time > ? AND creation_time < ? THEN 1 ELSE 0 END), 0) AS created_in_interval,
COALESCE(SUM(CASE WHEN deletion_time > ? AND deletion_time < ? THEN 1 ELSE 0 END), 0) AS deleted_in_interval,
COALESCE(SUM(CASE WHEN presence > 0 AND presence < 1 THEN 1 ELSE 0 END), 0) AS partial_presence
FROM agg_presence
)
SELECT
(SELECT COUNT(*) FROM agg_presence) AS vm_count,
(SELECT COALESCE(SUM(CASE WHEN "VcpuCount" IS NOT NULL THEN "VcpuCount" ELSE 0 END), 0) FROM agg_presence) AS vcpu_total,
(SELECT COALESCE(SUM(CASE WHEN "RamGB" IS NOT NULL THEN "RamGB" ELSE 0 END), 0) FROM agg_presence) AS ram_total,
(SELECT COALESCE(SUM(presence), 0) FROM agg_presence) AS presence_ratio,
(SELECT COALESCE(SUM(CASE WHEN pool = 'tin' THEN presence ELSE 0 END), 0) FROM agg_presence) AS tin_total,
(SELECT COALESCE(SUM(CASE WHEN pool = 'bronze' THEN presence ELSE 0 END), 0) FROM agg_presence) AS bronze_total,
(SELECT COALESCE(SUM(CASE WHEN pool = 'silver' THEN presence ELSE 0 END), 0) FROM agg_presence) AS silver_total,
(SELECT COALESCE(SUM(CASE WHEN pool = 'gold' THEN presence ELSE 0 END), 0) FROM agg_presence) AS gold_total,
diag.row_count,
diag.distinct_keys,
diag.unknown_keys,
diag.missing_vm_id,
diag.missing_vm_uuid,
diag.missing_name,
diag.presence_over_one,
diag.presence_under_zero,
diag.base_presence_sum,
agg_diag.agg_count,
agg_diag.missing_creation,
agg_diag.missing_deletion,
agg_diag.created_in_interval,
agg_diag.deleted_in_interval,
agg_diag.partial_presence
FROM diag, agg_diag
`, vmKeyExpr, overlapExpr, selected.TableName, templateExclusionFilter(), vmKeyExpr, prevTableName, templateExclusionFilter(), aggOverlapExpr, aggOverlapExpr)
query = dbConn.Rebind(query)
var row struct {
VmCount int64 `db:"vm_count"`
VcpuTotal int64 `db:"vcpu_total"`
RamTotal int64 `db:"ram_total"`
PresenceRatio float64 `db:"presence_ratio"`
TinTotal float64 `db:"tin_total"`
BronzeTotal float64 `db:"bronze_total"`
SilverTotal float64 `db:"silver_total"`
GoldTotal float64 `db:"gold_total"`
RowCount int64 `db:"row_count"`
DistinctKeys int64 `db:"distinct_keys"`
UnknownKeys int64 `db:"unknown_keys"`
MissingVmID int64 `db:"missing_vm_id"`
MissingVmUUID int64 `db:"missing_vm_uuid"`
MissingName int64 `db:"missing_name"`
PresenceOverOne int64 `db:"presence_over_one"`
PresenceUnderZero int64 `db:"presence_under_zero"`
BasePresenceSum float64 `db:"base_presence_sum"`
AggCount int64 `db:"agg_count"`
MissingCreation int64 `db:"missing_creation"`
MissingDeletion int64 `db:"missing_deletion"`
CreatedInInterval int64 `db:"created_in_interval"`
DeletedInInterval int64 `db:"deleted_in_interval"`
PartialPresence int64 `db:"partial_presence"`
}
overlapArgs := []interface{}{
hourEndUnix, hourEndUnix,
hourStartUnix, hourStartUnix,
hourEndUnix, hourEndUnix,
hourStartUnix, hourStartUnix,
durationSeconds,
}
args := make([]interface{}, 0, len(overlapArgs)*3+6)
args = append(args, overlapArgs...)
args = append(args, overlapArgs...)
args = append(args, hourStartUnix, hourEndUnix)
args = append(args, overlapArgs...)
args = append(args, hourStartUnix, hourEndUnix)
args = append(args, hourStartUnix, hourEndUnix)
if err := dbConn.GetContext(ctx, &row, query, args...); err != nil {
return nil, err
}
snapshotLag := selected.SnapshotTime.Sub(hourEnd)
duplicateRows := row.RowCount - row.DistinctKeys
logger.Debug(
"hourly totals snapshot diagnostics",
"table", selected.TableName,
"snapshot_time", selected.SnapshotTime.Format(time.RFC3339),
"snapshot_lag_seconds", int64(snapshotLag.Seconds()),
"interval_start", hourWindowStart.Format("2006-01-02 15:04"),
"interval_end", hourEnd.Format("2006-01-02 15:04"),
"row_count", row.RowCount,
"distinct_keys", row.DistinctKeys,
"duplicate_rows", duplicateRows,
"unknown_keys", row.UnknownKeys,
"missing_vm_id", row.MissingVmID,
"missing_vm_uuid", row.MissingVmUUID,
"missing_name", row.MissingName,
"presence_over_one", row.PresenceOverOne,
"presence_under_zero", row.PresenceUnderZero,
"base_presence_sum", row.BasePresenceSum,
"agg_count", row.AggCount,
"missing_creation", row.MissingCreation,
"missing_deletion", row.MissingDeletion,
"created_in_interval", row.CreatedInInterval,
"deleted_in_interval", row.DeletedInInterval,
"partial_presence", row.PartialPresence,
"presence_ratio", row.PresenceRatio,
"vm_count", row.VmCount,
)
label := formatHourIntervalLabel(hourWindowStart, hourEnd)
logger.Debug(
"hourly totals bucket",
"interval_start", hourWindowStart.Format("2006-01-02 15:04"),
"interval_end", hourEnd.Format("2006-01-02 15:04"),
"presence_ratio", row.PresenceRatio,
"tin_total", row.TinTotal,
"bronze_total", row.BronzeTotal,
"silver_total", row.SilverTotal,
"gold_total", row.GoldTotal,
)
points = append(points, totalsPoint{
Label: label,
VmCount: float64(row.VmCount),
VcpuTotal: float64(row.VcpuTotal),
RamTotal: float64(row.RamTotal),
PresenceRatio: row.PresenceRatio,
TinTotal: row.TinTotal,
BronzeTotal: row.BronzeTotal,
SilverTotal: row.SilverTotal,
GoldTotal: row.GoldTotal,
})
}
return points, nil
}
func estimateSnapshotInterval(records []SnapshotRecord) time.Duration {
if len(records) < 2 {
return time.Hour
}
diffs := make([]int64, 0, len(records)-1)
for i := 1; i < len(records); i++ {
diff := records[i].SnapshotTime.Sub(records[i-1].SnapshotTime)
if diff > 0 {
diffs = append(diffs, int64(diff.Seconds()))
}
}
if len(diffs) == 0 {
return time.Hour
}
sort.Slice(diffs, func(i, j int) bool { return diffs[i] < diffs[j] })
median := diffs[len(diffs)/2]
if median <= 0 {
return time.Hour
}
return time.Duration(median) * time.Second
}
func formatHourIntervalLabel(start, end time.Time) string {
startLabel := start.Format("2006-01-02 15:04")
if start.Year() == end.Year() && start.YearDay() == end.YearDay() {
return fmt.Sprintf("%s to %s", startLabel, end.Format("15:04"))
}
return fmt.Sprintf("%s to %s", startLabel, end.Format("2006-01-02 15:04"))
}
func formatDayIntervalLabel(start, end time.Time) string {
return fmt.Sprintf("%s to %s", start.Format("2006-01-02"), end.Format("2006-01-02"))
}
func buildDailyTotals(ctx context.Context, dbConn *sqlx.DB, records []SnapshotRecord, prorateByAvg bool) ([]totalsPoint, error) {
points := make([]totalsPoint, 0, len(records))
tinExpr := `COALESCE(SUM(CASE WHEN "Tin" IS NOT NULL THEN "Tin" ELSE 0 END) / 100.0, 0)`
bronzeExpr := `COALESCE(SUM(CASE WHEN "Bronze" IS NOT NULL THEN "Bronze" ELSE 0 END) / 100.0, 0)`
silverExpr := `COALESCE(SUM(CASE WHEN "Silver" IS NOT NULL THEN "Silver" ELSE 0 END) / 100.0, 0)`
goldExpr := `COALESCE(SUM(CASE WHEN "Gold" IS NOT NULL THEN "Gold" ELSE 0 END) / 100.0, 0)`
if prorateByAvg {
tinExpr = `COALESCE(SUM(CASE WHEN "Tin" IS NOT NULL THEN "Tin" * COALESCE("AvgIsPresent", 0) ELSE 0 END) / 100.0, 0)`
bronzeExpr = `COALESCE(SUM(CASE WHEN "Bronze" IS NOT NULL THEN "Bronze" * COALESCE("AvgIsPresent", 0) ELSE 0 END) / 100.0, 0)`
silverExpr = `COALESCE(SUM(CASE WHEN "Silver" IS NOT NULL THEN "Silver" * COALESCE("AvgIsPresent", 0) ELSE 0 END) / 100.0, 0)`
goldExpr = `COALESCE(SUM(CASE WHEN "Gold" IS NOT NULL THEN "Gold" * COALESCE("AvgIsPresent", 0) ELSE 0 END) / 100.0, 0)`
}
for _, record := range records {
if err := db.ValidateTableName(record.TableName); err != nil {
return nil, err
}
if record.SnapshotCount == 0 {
slog.Debug("daily totals skipping empty snapshot", "table", record.TableName, "snapshot_time", record.SnapshotTime)
continue
}
if record.SnapshotCount < 0 {
rowsExist, err := db.TableHasRows(ctx, dbConn, record.TableName)
if err != nil {
slog.Debug("daily totals snapshot probe failed", "table", record.TableName, "snapshot_time", record.SnapshotTime, "error", err)
}
if err != nil || !rowsExist {
continue
}
}
query := fmt.Sprintf(`
SELECT
COUNT(DISTINCT "VmId") AS vm_count,
COALESCE(SUM(CASE WHEN "AvgVcpuCount" IS NOT NULL THEN "AvgVcpuCount" ELSE 0 END), 0) AS vcpu_total,
COALESCE(SUM(CASE WHEN "AvgRamGB" IS NOT NULL THEN "AvgRamGB" ELSE 0 END), 0) AS ram_total,
COALESCE(AVG(CASE WHEN "AvgIsPresent" IS NOT NULL THEN "AvgIsPresent" ELSE 0 END), 0) AS presence_ratio,
%s AS tin_total,
%s AS bronze_total,
%s AS silver_total,
%s AS gold_total
FROM %s
WHERE %s
`, tinExpr, bronzeExpr, silverExpr, goldExpr, record.TableName, templateExclusionFilter())
var row struct {
VmCount int64 `db:"vm_count"`
VcpuTotal float64 `db:"vcpu_total"`
RamTotal float64 `db:"ram_total"`
PresenceRatio float64 `db:"presence_ratio"`
TinTotal float64 `db:"tin_total"`
BronzeTotal float64 `db:"bronze_total"`
SilverTotal float64 `db:"silver_total"`
GoldTotal float64 `db:"gold_total"`
}
if err := dbConn.GetContext(ctx, &row, query); err != nil {
return nil, err
}
dayTime := record.SnapshotTime.Local()
dayStart := time.Date(dayTime.Year(), dayTime.Month(), dayTime.Day(), 0, 0, 0, 0, dayTime.Location())
dayEnd := dayStart.AddDate(0, 0, 1)
points = append(points, totalsPoint{
Label: formatDayIntervalLabel(dayStart, dayEnd),
VmCount: float64(row.VmCount),
VcpuTotal: row.VcpuTotal,
RamTotal: row.RamTotal,
PresenceRatio: computeProratedVmCount(row.PresenceRatio, float64(row.VmCount), prorateByAvg),
TinTotal: row.TinTotal,
BronzeTotal: row.BronzeTotal,
SilverTotal: row.SilverTotal,
GoldTotal: row.GoldTotal,
})
}
return points, nil
}
func computeProratedVmCount(presenceRatio float64, vmCount float64, prorate bool) float64 {
if !prorate {
return vmCount
}
return presenceRatio * vmCount
}
func writeTotalsChart(logger *slog.Logger, xlsx *excelize.File, sheetName string, points []totalsPoint) {
if len(points) == 0 {
return
}
index, err := xlsx.NewSheet(sheetName)
if err != nil {
logger.Error("Error creating totals sheet", "error", err)
return
}
xlsx.SetActiveSheet(index)
headers := []string{"Label", "VmCount", "VcpuCount", "RamGB", "ProratedVmCount", "Tin", "Bronze", "Silver", "Gold"}
for i, header := range headers {
cell, _ := excelize.CoordinatesToCellName(i+1, 1)
xlsx.SetCellValue(sheetName, cell, header)
}
for i, point := range points {
row := i + 2
xlsx.SetCellValue(sheetName, fmt.Sprintf("A%d", row), point.Label)
xlsx.SetCellValue(sheetName, fmt.Sprintf("B%d", row), point.VmCount)
xlsx.SetCellValue(sheetName, fmt.Sprintf("C%d", row), point.VcpuTotal)
xlsx.SetCellValue(sheetName, fmt.Sprintf("D%d", row), point.RamTotal)
xlsx.SetCellValue(sheetName, fmt.Sprintf("E%d", row), point.PresenceRatio)
xlsx.SetCellValue(sheetName, fmt.Sprintf("F%d", row), point.TinTotal)
xlsx.SetCellValue(sheetName, fmt.Sprintf("G%d", row), point.BronzeTotal)
xlsx.SetCellValue(sheetName, fmt.Sprintf("H%d", row), point.SilverTotal)
xlsx.SetCellValue(sheetName, fmt.Sprintf("I%d", row), point.GoldTotal)
}
if lastRow := len(points) + 1; lastRow >= 2 {
numFmt := "0.00000000"
styleID, err := xlsx.NewStyle(&excelize.Style{CustomNumFmt: &numFmt})
if err == nil {
if err := xlsx.SetCellStyle(sheetName, "E2", fmt.Sprintf("I%d", lastRow), styleID); err != nil {
logger.Error("Error setting totals number format", "error", err)
}
} else {
logger.Error("Error creating totals number format", "error", err)
}
}
if endCell, err := excelize.CoordinatesToCellName(len(headers), 1); err == nil {
filterRange := "A1:" + endCell
if err := xlsx.AutoFilter(sheetName, filterRange, nil); err != nil {
logger.Error("Error setting totals autofilter", "error", err)
}
}
if err := SetColAutoWidth(xlsx, sheetName); err != nil {
logger.Error("Error setting totals auto width", "error", err)
}
lastRow := len(points) + 1
sheetRef := quoteSheetName(sheetName)
categories := fmt.Sprintf("%s!$A$2:$A$%d", sheetRef, lastRow)
buildSeries := func(col string) excelize.ChartSeries {
return excelize.ChartSeries{
Name: fmt.Sprintf("%s!$%s$1", sheetRef, col),
Categories: categories,
Values: fmt.Sprintf("%s!$%s$2:$%s$%d", sheetRef, col, col, lastRow),
}
}
makeChart := func(anchor string, cols ...string) {
series := make([]excelize.ChartSeries, 0, len(cols))
for _, col := range cols {
series = append(series, buildSeries(col))
}
chart := excelize.Chart{
Type: excelize.Line,
Series: series,
Legend: excelize.ChartLegend{Position: "bottom"},
XAxis: excelize.ChartAxis{MajorGridLines: true},
YAxis: excelize.ChartAxis{MajorGridLines: true},
Dimension: excelize.ChartDimension{Width: 960, Height: 480},
}
if err := xlsx.AddChart(sheetName, anchor, &chart); err != nil {
logger.Error("Error adding totals chart", "error", err, "anchor", anchor)
}
}
makeChart("K2", "B", "E")
makeChart("K27", "C", "D")
makeChart("K52", "F", "G", "H", "I")
}
func formatEpochHuman(value interface{}) string {
var epoch int64
switch v := value.(type) {
case nil:
return ""
case int64:
epoch = v
case int32:
epoch = int64(v)
case int:
epoch = int64(v)
case float64:
epoch = int64(v)
case []byte:
parsed, err := strconv.ParseInt(string(v), 10, 64)
if err != nil {
return ""
}
epoch = parsed
case string:
parsed, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return ""
}
epoch = parsed
default:
return ""
}
if epoch <= 0 {
return ""
}
return time.Unix(epoch, 0).Local().Format("Mon 02 Jan 2006 15:04:05 MST")
}
func renameTable(ctx context.Context, dbConn *sqlx.DB, oldName string, newName string) error {
if err := db.ValidateTableName(oldName); err != nil {
return err
}
if err := db.ValidateTableName(newName); err != nil {
return err
}
_, err := dbConn.ExecContext(ctx, fmt.Sprintf(`ALTER TABLE %s RENAME TO %s`, oldName, newName))
if err != nil {
return fmt.Errorf("failed to rename table %s to %s: %w", oldName, newName, err)
}
return nil
}
func latestSnapshotTime(ctx context.Context, dbConn *sqlx.DB, tableName string) (time.Time, error) {
if err := db.ValidateTableName(tableName); err != nil {
return time.Time{}, err
}
query := fmt.Sprintf(`SELECT MAX("SnapshotTime") FROM %s`, tableName)
var maxTime sql.NullInt64
if err := dbConn.GetContext(ctx, &maxTime, query); err != nil {
return time.Time{}, err
}
if !maxTime.Valid || maxTime.Int64 <= 0 {
return time.Time{}, nil
}
return time.Unix(maxTime.Int64, 0), nil
}