Enhance snapshot handling by backfilling provisioned disk data and updating backfill logic
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2026-02-13 16:17:56 +11:00
parent c446638eac
commit 6fbd6bc9d2
4 changed files with 38 additions and 5 deletions

View File

@@ -762,6 +762,21 @@ func snapshotFromVM(ctx context.Context, dbConn *sqlx.DB, vmObject *mo.VirtualMa
}
}
// Some VMs can be returned without complete virtual disk device metadata.
// Fall back to summary storage (committed + uncommitted) when available.
if !row.ProvisionedDisk.Valid && vmObject.Summary.Storage != nil {
provisionedBytes := vmObject.Summary.Storage.Committed
if vmObject.Summary.Storage.Uncommitted > 0 {
provisionedBytes += vmObject.Summary.Storage.Uncommitted
}
if provisionedBytes > 0 {
row.ProvisionedDisk = sql.NullFloat64{
Float64: float64(provisionedBytes) / 1024.0 / 1024.0 / 1024.0,
Valid: true,
}
}
}
if vmObject.Runtime.PowerState == "poweredOff" {
row.PoweredOn = "FALSE"
} else {
@@ -930,7 +945,7 @@ func snapshotFromVM(ctx context.Context, dbConn *sqlx.DB, vmObject *mo.VirtualMa
}
}
if dbConn != nil && needsSnapshotBackfill(row) {
if dbConn != nil && (needsSnapshotBackfill(row) || !row.ProvisionedDisk.Valid) {
if backfillSnapshotRowFromHourlyCache(ctx, dbConn, &row) && vc.Logger != nil {
vc.Logger.Debug("backfilled sparse VM snapshot row from hourly cache", "vm_id", row.VmId.String, "name", row.Name, "vcenter", row.Vcenter)
}
@@ -957,7 +972,6 @@ func snapshotFromVM(ctx context.Context, dbConn *sqlx.DB, vmObject *mo.VirtualMa
func needsSnapshotBackfill(row InventorySnapshotRow) bool {
return !row.CreationTime.Valid ||
!row.ProvisionedDisk.Valid ||
!row.VcpuCount.Valid ||
!row.RamGB.Valid ||
!row.Cluster.Valid ||

View File

@@ -104,3 +104,19 @@ func TestBackfillSnapshotRowFromHourlyCacheNoMatch(t *testing.T) {
t.Fatal("expected no backfill change for missing VM")
}
}
func TestNeedsSnapshotBackfillIgnoresDiskOnlyGap(t *testing.T) {
row := InventorySnapshotRow{
CreationTime: sql.NullInt64{Int64: 100, Valid: true},
VcpuCount: sql.NullInt64{Int64: 2, Valid: true},
RamGB: sql.NullInt64{Int64: 8, Valid: true},
Cluster: sql.NullString{String: "cluster-a", Valid: true},
Datacenter: sql.NullString{String: "dc-a", Valid: true},
SrmPlaceholder: "FALSE",
VmUuid: sql.NullString{String: "uuid-1", Valid: true},
// ProvisionedDisk intentionally missing.
}
if needsSnapshotBackfill(row) {
t.Fatal("expected disk-only gap to be non-critical for sparse-row detection")
}
}

View File

@@ -107,7 +107,8 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
// Optional Go-based aggregation path.
if useGoAgg {
if granularity == "daily" {
switch granularity {
case "daily":
c.Logger.Debug("Using go implementation of monthly aggregation (daily)")
if err := c.aggregateMonthlySummaryGo(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
@@ -116,7 +117,7 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil
}
} else if granularity == "hourly" {
case "hourly":
c.Logger.Debug("Using go implementation of monthly aggregation (hourly)")
if err := c.aggregateMonthlySummaryGoHourly(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
@@ -125,7 +126,7 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil
}
} else {
default:
c.Logger.Warn("MONTHLY_AGG_GO is set but granularity is unsupported; using SQL path", "granularity", granularity)
}
}