Enhance snapshot handling by backfilling provisioned disk data and updating backfill logic
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2026-02-13 16:17:56 +11:00
parent c446638eac
commit 6fbd6bc9d2
4 changed files with 38 additions and 5 deletions

View File

@@ -762,6 +762,21 @@ func snapshotFromVM(ctx context.Context, dbConn *sqlx.DB, vmObject *mo.VirtualMa
} }
} }
// Some VMs can be returned without complete virtual disk device metadata.
// Fall back to summary storage (committed + uncommitted) when available.
if !row.ProvisionedDisk.Valid && vmObject.Summary.Storage != nil {
provisionedBytes := vmObject.Summary.Storage.Committed
if vmObject.Summary.Storage.Uncommitted > 0 {
provisionedBytes += vmObject.Summary.Storage.Uncommitted
}
if provisionedBytes > 0 {
row.ProvisionedDisk = sql.NullFloat64{
Float64: float64(provisionedBytes) / 1024.0 / 1024.0 / 1024.0,
Valid: true,
}
}
}
if vmObject.Runtime.PowerState == "poweredOff" { if vmObject.Runtime.PowerState == "poweredOff" {
row.PoweredOn = "FALSE" row.PoweredOn = "FALSE"
} else { } else {
@@ -930,7 +945,7 @@ func snapshotFromVM(ctx context.Context, dbConn *sqlx.DB, vmObject *mo.VirtualMa
} }
} }
if dbConn != nil && needsSnapshotBackfill(row) { if dbConn != nil && (needsSnapshotBackfill(row) || !row.ProvisionedDisk.Valid) {
if backfillSnapshotRowFromHourlyCache(ctx, dbConn, &row) && vc.Logger != nil { if backfillSnapshotRowFromHourlyCache(ctx, dbConn, &row) && vc.Logger != nil {
vc.Logger.Debug("backfilled sparse VM snapshot row from hourly cache", "vm_id", row.VmId.String, "name", row.Name, "vcenter", row.Vcenter) vc.Logger.Debug("backfilled sparse VM snapshot row from hourly cache", "vm_id", row.VmId.String, "name", row.Name, "vcenter", row.Vcenter)
} }
@@ -957,7 +972,6 @@ func snapshotFromVM(ctx context.Context, dbConn *sqlx.DB, vmObject *mo.VirtualMa
func needsSnapshotBackfill(row InventorySnapshotRow) bool { func needsSnapshotBackfill(row InventorySnapshotRow) bool {
return !row.CreationTime.Valid || return !row.CreationTime.Valid ||
!row.ProvisionedDisk.Valid ||
!row.VcpuCount.Valid || !row.VcpuCount.Valid ||
!row.RamGB.Valid || !row.RamGB.Valid ||
!row.Cluster.Valid || !row.Cluster.Valid ||

View File

@@ -104,3 +104,19 @@ func TestBackfillSnapshotRowFromHourlyCacheNoMatch(t *testing.T) {
t.Fatal("expected no backfill change for missing VM") t.Fatal("expected no backfill change for missing VM")
} }
} }
func TestNeedsSnapshotBackfillIgnoresDiskOnlyGap(t *testing.T) {
row := InventorySnapshotRow{
CreationTime: sql.NullInt64{Int64: 100, Valid: true},
VcpuCount: sql.NullInt64{Int64: 2, Valid: true},
RamGB: sql.NullInt64{Int64: 8, Valid: true},
Cluster: sql.NullString{String: "cluster-a", Valid: true},
Datacenter: sql.NullString{String: "dc-a", Valid: true},
SrmPlaceholder: "FALSE",
VmUuid: sql.NullString{String: "uuid-1", Valid: true},
// ProvisionedDisk intentionally missing.
}
if needsSnapshotBackfill(row) {
t.Fatal("expected disk-only gap to be non-critical for sparse-row detection")
}
}

View File

@@ -107,7 +107,8 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
// Optional Go-based aggregation path. // Optional Go-based aggregation path.
if useGoAgg { if useGoAgg {
if granularity == "daily" { switch granularity {
case "daily":
c.Logger.Debug("Using go implementation of monthly aggregation (daily)") c.Logger.Debug("Using go implementation of monthly aggregation (daily)")
if err := c.aggregateMonthlySummaryGo(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil { if err := c.aggregateMonthlySummaryGo(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err) c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
@@ -116,7 +117,7 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable) c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil return nil
} }
} else if granularity == "hourly" { case "hourly":
c.Logger.Debug("Using go implementation of monthly aggregation (hourly)") c.Logger.Debug("Using go implementation of monthly aggregation (hourly)")
if err := c.aggregateMonthlySummaryGoHourly(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil { if err := c.aggregateMonthlySummaryGoHourly(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err) c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
@@ -125,7 +126,7 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable) c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil return nil
} }
} else { default:
c.Logger.Warn("MONTHLY_AGG_GO is set but granularity is unsupported; using SQL path", "granularity", granularity) c.Logger.Warn("MONTHLY_AGG_GO is set but granularity is unsupported; using SQL path", "granularity", granularity)
} }
} }

View File

@@ -286,6 +286,7 @@ func (v *Vcenter) GetAllVMsWithProps() ([]mo.VirtualMachine, error) {
"config.hardware", "config.hardware",
"config.managedBy", "config.managedBy",
"config.template", "config.template",
"summary.storage",
"runtime.powerState", "runtime.powerState",
"runtime.host", "runtime.host",
"resourcePool", "resourcePool",
@@ -304,6 +305,7 @@ func (v *Vcenter) GetVMWithSnapshotPropsByRef(ref types.ManagedObjectReference)
"name", "name",
"parent", "parent",
"config", "config",
"summary.storage",
"runtime.powerState", "runtime.powerState",
"runtime.host", "runtime.host",
"resourcePool", "resourcePool",