Compare commits

...

109 Commits

Author SHA1 Message Date
nathan 27cab61e89 improve title overflow
continuous-integration/drone/push Build is passing
2026-04-20 17:10:58 +10:00
nathan 11df6e0560 golden parity + lifecycle edge-case coverage in internal/tasks 2026-04-20 17:09:38 +10:00
nathan 2e0788caf2 more logging
continuous-integration/drone/push Build is passing
2026-04-20 16:59:45 +10:00
nathan 83bd4b2026 more postgresql type fixes
continuous-integration/drone/push Build is passing
2026-04-20 16:35:23 +10:00
nathan aa0d8099c7 fixed benchmark
continuous-integration/drone/push Build is passing
2026-04-20 16:24:48 +10:00
nathan 8ccf5a7009 enhance utilisation of postgres features
continuous-integration/drone/push Build is passing
2026-04-20 10:19:27 +10:00
nathan 98e92a8264 updated UI
continuous-integration/drone/push Build is passing
2026-04-17 15:21:27 +10:00
nathan 7848557002 update docs
continuous-integration/drone/push Build is passing
2026-04-17 14:00:48 +10:00
nathan ae3e2be89a add auth support
continuous-integration/drone/push Build is passing
2026-04-17 13:19:08 +10:00
nathan 9a561f3b07 cleanups and code fixes incl templ
continuous-integration/drone/push Build is passing
2026-03-20 13:21:15 +11:00
nathan 4fbb2582e3 update and bugfix
continuous-integration/drone/push Build is failing
2026-03-20 12:49:06 +11:00
nathan 3b0dfda992 feat: enhance summary pivot specifications to include column fields
continuous-integration/drone/push Build is passing
2026-02-27 14:11:02 +11:00
nathan 504621f80d Refactor settings handling to support context-based reloading and add utility functions for context management
continuous-integration/drone/push Build is passing
2026-02-19 11:06:05 +11:00
nathan f2d6b3158b Refactor code to use 'any' type and improve context handling
continuous-integration/drone/push Build is passing
2026-02-18 16:16:27 +11:00
nathan 6517a30fa2 Enhance table report creation by reloading settings and ensuring logger initialization
continuous-integration/drone/push Build is passing
2026-02-18 12:14:20 +11:00
nathan dcebd3cf60 Update pivot ranges and add title cells for summary report metrics
continuous-integration/drone/push Build is passing
2026-02-18 12:02:49 +11:00
nathan 29c277f863 Add support for customizable pivot titles and ranges in summary reports
continuous-integration/drone/push Build was killed
2026-02-18 11:59:22 +11:00
nathan 92c6797f05 update pivot table title cell
continuous-integration/drone/push Build is passing
2026-02-18 11:34:16 +11:00
nathan 9419103709 Fix SQL insert statement to properly quote column names in monthly aggregates
continuous-integration/drone/push Build is passing
2026-02-18 11:19:44 +11:00
nathan e4d73ee294 Update pivot table ranges in summary report for accurate data representation
continuous-integration/drone/push Build is passing
2026-02-16 09:48:52 +11:00
nathan 6da2da3e82 Add PostgreSQL checkpoint functionality and update related database operations
continuous-integration/drone/push Build is passing
2026-02-16 09:21:00 +11:00
nathan ff1ec3f4aa postgres fix
continuous-integration/drone/push Build is passing
2026-02-16 08:56:24 +11:00
nathan bc84931c37 Add vCenter cache rebuild functionality and related API endpoint
continuous-integration/drone/push Build is passing
2026-02-16 08:46:38 +11:00
nathan 6fbd6bc9d2 Enhance snapshot handling by backfilling provisioned disk data and updating backfill logic
continuous-integration/drone/push Build is passing
2026-02-13 16:17:56 +11:00
nathan c446638eac Implement targeted VM property refresh and backfill logic for snapshot rows
continuous-integration/drone/push Build is passing
2026-02-13 16:09:29 +11:00
nathan e2779f80c0 Adjust snapshot interval calculations to use one-third of the configured cadence
continuous-integration/drone/push Build is passing
2026-02-13 15:17:05 +11:00
nathan 1f39b46613 Deprecate legacy VM inventory endpoints and add gating logic
continuous-integration/drone/push Build is passing
2026-02-13 14:59:19 +11:00
nathan 18be1fbe06 Add vCenter reference cache tables and update related functions
continuous-integration/drone/push Build is passing
2026-02-13 14:45:13 +11:00
nathan 5cd8f9c2a1 pagination of vcenter queries
continuous-integration/drone/push Build is passing
2026-02-13 14:04:28 +11:00
nathan 1b052b93b6 backfill CreationTime in daily aggregation
continuous-integration/drone/push Build is passing
2026-02-11 11:50:29 +11:00
nathan f1be31781c improve logging
continuous-integration/drone/push Build is passing
2026-02-11 11:24:27 +11:00
nathan 34ac9287b4 fix sql timeout
continuous-integration/drone/push Build is passing
2026-02-11 11:04:19 +11:00
nathan b5bcea9da5 work on daily aggregation with postgresql
continuous-integration/drone/push Build is passing
2026-02-11 10:48:32 +11:00
nathan a0556e3ac8 more db fixes
continuous-integration/drone/push Build is passing
2026-02-11 10:20:20 +11:00
nathan f4d5577de1 add postgres migration
continuous-integration/drone/push Build is passing
2026-02-11 10:10:54 +11:00
nathan ee3b2d7f21 fix postgres code path
continuous-integration/drone/push Build is passing
2026-02-11 09:54:55 +11:00
nathan a09d94a266 improve sqlite import
continuous-integration/drone/push Build is passing
2026-02-11 08:41:32 +11:00
nathan 3e3d8c2eb0 improve postgres support
continuous-integration/drone/push Build is passing
2026-02-10 19:40:20 +11:00
nathan e0cbc20140 potential performance improvements for hourly inventory
continuous-integration/drone/push Build is passing
2026-02-09 17:13:20 +11:00
nathan 24bf74ad34 fix crash again
continuous-integration/drone/push Build is passing
2026-02-09 16:06:55 +11:00
nathan b70dfcf5be fix crash
continuous-integration/drone/push Build is passing
2026-02-09 15:55:19 +11:00
nathan 6dcbb9caef lifecycle diagnostics
continuous-integration/drone/push Build is passing
2026-02-09 14:27:41 +11:00
nathan 59b16db04f speed up vm trace pages
continuous-integration/drone/push Build is passing
2026-02-09 14:19:24 +11:00
nathan c4097ca608 try again
continuous-integration/drone/push Build is passing
2026-02-09 13:58:06 +11:00
nathan 4f4163f77a revert service yaml filename
continuous-integration/drone/push Build is failing
2026-02-09 13:54:46 +11:00
nathan 4d53927f9d debug pipeline
continuous-integration/drone/push Build is failing
2026-02-09 13:51:16 +11:00
nathan 68fcdb8625 fix yml reference
continuous-integration/drone/push Build is failing
2026-02-09 13:47:04 +11:00
nathan 5736dc6929 avoid vcenter totals pages scanning whole database
continuous-integration/drone/push Build is failing
2026-02-09 13:44:43 +11:00
nathan c66679a71f more index cleanups to optimise space
continuous-integration/drone/push Build is passing
2026-02-08 15:40:42 +11:00
nathan a993aedf79 use javascript chart instead of svg
continuous-integration/drone/push Build is passing
2026-02-06 16:42:48 +11:00
nathan 9677d083a8 updated docs
continuous-integration/drone/push Build is passing
2026-02-06 16:01:01 +11:00
nathan 0e3cf5aae9 [ci skip] more suggested improvements 2026-02-06 15:35:18 +11:00
nathan dfbaacb6f3 [ci skip] more codex 5.3 improvements 2026-02-06 15:18:30 +11:00
nathan dc96431f06 [ci skip] codex 5.3 review 2026-02-06 15:07:44 +11:00
nathan 5dcc11e5e0 reduce unnecessary sqlite indexes
continuous-integration/drone/push Build is passing
2026-02-06 08:53:36 +11:00
nathan 32ced35130 more metadata in reports
continuous-integration/drone/push Build is passing
2026-01-29 12:27:08 +11:00
nathan ff783fb45a still working on creation/deletion times
continuous-integration/drone/push Build is passing
2026-01-28 15:19:10 +11:00
nathan 49484900ac sql fix
continuous-integration/drone/push Build is passing
2026-01-28 13:49:41 +11:00
nathan aa6abb8cb2 bugfix hourly totals
continuous-integration/drone/push Build is passing
2026-01-28 13:27:05 +11:00
nathan 1f2783fc86 fix
continuous-integration/drone/push Build is passing
2026-01-28 13:14:05 +11:00
nathan b9eae50f69 updated snapshots logic
continuous-integration/drone/push Build is passing
2026-01-28 09:47:51 +11:00
nathan c566456ebd add configuration for monthly aggregation job timing
continuous-integration/drone/push Build is passing
2026-01-28 09:04:16 +11:00
nathan ee01d8deac improve lifecycle data
continuous-integration/drone/push Build is passing
2026-01-28 08:49:04 +11:00
nathan 93b5769145 improve logging for pro-rata
continuous-integration/drone/push Build is passing
2026-01-27 21:40:41 +11:00
Nathan Coad 38480e52c0 improve vm deletion detection
continuous-integration/drone/push Build is passing
2026-01-27 14:20:30 +11:00
Nathan Coad 6981bd9994 even more diagnostics
continuous-integration/drone/push Build is passing
2026-01-27 11:21:47 +11:00
Nathan Coad fe96172253 add diagnostic endpoint
continuous-integration/drone/push Build is passing
2026-01-27 11:02:39 +11:00
Nathan Coad 35b4a50cf6 try to fix pro-rata yet again
continuous-integration/drone/push Build is passing
2026-01-27 09:09:24 +11:00
nathan 73ec80bb6f update monthly aggregation and docs
continuous-integration/drone/push Build is passing
2026-01-23 15:35:10 +11:00
nathan 0d509179aa update daily aggregation to use hourly intervals
continuous-integration/drone/push Build is passing
2026-01-23 14:33:22 +11:00
nathan e6c7596239 extreme logging
continuous-integration/drone/push Build is passing
2026-01-23 13:51:03 +11:00
nathan b39865325a more logging
continuous-integration/drone/push Build is passing
2026-01-23 13:44:50 +11:00
nathan b4a3c0fb3a in depth fix of deletion/creation data
continuous-integration/drone/push Build is passing
2026-01-23 13:02:58 +11:00
nathan 2caf2763f6 improve aggregation
continuous-integration/drone/push Build is passing
2026-01-23 12:19:28 +11:00
nathan 25564efa54 more accurate resource pool data in aggregation reports
continuous-integration/drone/push Build is passing
2026-01-23 11:59:52 +11:00
nathan 871d7c2024 more logging
continuous-integration/drone/push Build is passing
2026-01-23 11:02:30 +11:00
nathan 3671860b7d another fix to aggregation reports
continuous-integration/drone/push Build is passing
2026-01-23 10:11:14 +11:00
nathan 3e2d95d3b9 fix aggregation logic
continuous-integration/drone/push Build is passing
2026-01-23 09:38:08 +11:00
nathan 8a3481b966 fix creationtime in aggregations
continuous-integration/drone/push Build is passing
2026-01-23 07:29:59 +11:00
nathan 13adc159a2 more accurate deletion times in aggregations
continuous-integration/drone/push Build is passing
2026-01-22 20:50:29 +11:00
nathan c8f04efd51 add more documentation
continuous-integration/drone/push Build is passing
2026-01-22 20:30:02 +11:00
Nathan Coad 68ee2838e4 fix deletiontime from event
continuous-integration/drone/push Build is passing
2026-01-22 15:13:40 +11:00
Nathan Coad b0592a2539 fix daily aggregation sample count
continuous-integration/drone/push Build is passing
2026-01-22 14:27:27 +11:00
Nathan Coad baea0cc85c update aggregation calculations
continuous-integration/drone/push Build is passing
2026-01-22 13:30:53 +11:00
Nathan Coad ceadf42048 update godoc
continuous-integration/drone/push Build is passing
2026-01-22 12:52:28 +11:00
Nathan Coad 374d4921e1 update aggregation jobs
continuous-integration/drone/push Build is passing
2026-01-22 12:04:41 +11:00
Nathan Coad 7dc8f598c3 more logging in daily aggregation
continuous-integration/drone/push Build is passing
2026-01-22 10:50:03 +11:00
Nathan Coad 148df38219 fix daily aggregation
continuous-integration/drone/push Build is passing
2026-01-22 10:20:18 +11:00
nathan 0a2c529111 code refactor
continuous-integration/drone/push Build is passing
2026-01-21 14:40:37 +11:00
nathan 3cdf368bc4 re-apply minimum snapshot interval
continuous-integration/drone/push Build is passing
2026-01-21 14:17:40 +11:00
nathan 32d4a352dc reduced the places where we probe hourly tables
continuous-integration/drone/push Build is passing
2026-01-21 11:44:13 +11:00
nathan b77f8671da improve concurrency handling for inventory job
continuous-integration/drone/push Build encountered an error
2026-01-21 11:21:51 +11:00
nathan 715b293894 [CI SKIP] add cache for docker hub images 2026-01-21 10:55:13 +11:00
nathan 2483091861 improve logging and concurrent vcenter inventory
continuous-integration/drone/push Build is passing
2026-01-21 10:25:04 +11:00
nathan 00805513c9 fix new-vm detection interval
continuous-integration/drone/push Build is passing
2026-01-21 09:36:19 +11:00
nathan fd9cc185ce code re-org and bugfix hanging hourly snapshot
continuous-integration/drone/push Build is passing
2026-01-21 09:12:25 +11:00
nathan c7c7fd3dc9 code cleanup
continuous-integration/drone/push Build is passing
2026-01-21 08:45:46 +11:00
nathan d683d23bfc use 0 instead of start of aggregation window for creationtime in xlsx
continuous-integration/drone/push Build is passing
2026-01-20 20:02:33 +11:00
nathan c8bb30c788 better handle skipped inventories
continuous-integration/drone/push Build is passing
2026-01-20 17:18:43 +11:00
nathan 7ea02be91a refactor code and improve daily cache handling of deleted VMs
continuous-integration/drone/push Build is passing
2026-01-20 16:46:07 +11:00
nathan 0517ef88c3 [CI SKIP] bugfixes for vm deletion tracking 2026-01-20 16:33:31 +11:00
nathan a9e522cc84 improve scheduler
continuous-integration/drone/push Build is passing
2026-01-19 14:04:01 +11:00
nathan e186644db7 add repair functionality
continuous-integration/drone/push Build is passing
2026-01-17 12:51:11 +11:00
nathan 22fa250a43 bugfixes for monthly aggregation
continuous-integration/drone/push Build is passing
2026-01-17 08:48:18 +11:00
nathan 1874b2c621 ensure we logout, fix aggregations
continuous-integration/drone/push Build is passing
2026-01-16 20:29:40 +11:00
nathan a12fe5cad0 bugfixes
continuous-integration/drone/push Build is passing
2026-01-16 17:53:24 +11:00
nathan 1cd1046433 progress on go based aggregation
continuous-integration/drone/push Build is passing
2026-01-16 17:37:55 +11:00
nathan 6af49471b2 Merge branch 'main' of https://git.coadcorp.com/nathan/vctp2 into dev
continuous-integration/drone/push Build is passing
2026-01-16 16:56:11 +11:00
nathan 7b7afbf1d5 start work on dev branch [CI SKIP] 2026-01-16 16:28:19 +11:00
126 changed files with 23557 additions and 4346 deletions
+10 -10
View File
@@ -4,7 +4,7 @@ name: default
steps:
- name: restore-cache-with-filesystem
image: meltwater/drone-cache
image: cache.coadcorp.com/meltwater/drone-cache
pull: true
settings:
backend: "filesystem"
@@ -23,7 +23,7 @@ steps:
path: /go
- name: build
image: golang
image: cache.coadcorp.com/library/golang
environment:
CGO_ENABLED: 0
GOMODCACHE: '/drone/src/pkg.mod'
@@ -34,15 +34,15 @@ steps:
path: /shared
commands:
- export PATH=/drone/src/pkg.tools:$PATH
- go install github.com/a-h/templ/cmd/templ@latest
- go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
- go install github.com/swaggo/swag/cmd/swag@latest
- go install github.com/a-h/templ/cmd/templ@v0.3.1001
- go install github.com/sqlc-dev/sqlc/cmd/sqlc@v1.30.0
- go install github.com/swaggo/swag/cmd/swag@v1.16.6
# - go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
- sqlc generate
- templ generate -path ./components
- swag init --exclude "pkg.mod,pkg.build,pkg.tools" -o server/router/docs
- chmod +x ./scripts/*.sh
- ./scripts/update-swagger-ui.sh
- ./scripts/update-swagger-ui.sh v5.32.4
- ./scripts/drone.sh
- cp ./build/vctp-linux-amd64 /shared/
@@ -55,12 +55,12 @@ steps:
path: /shared
commands:
- cp /shared/vctp-linux-amd64 ./build/vctp-linux-amd64
#- find .
- nfpm package --config vctp.yml --packager rpm --target ./build/
- ls -lah .
- nfpm package --config vctp-service.yml --packager rpm --target ./build/
- ls -lah ./build/
- name: dell-sftp-deploy
image: hypervtechnics/drone-sftp
image: cache.coadcorp.com/hypervtechnics/drone-sftp
settings:
host: deft.dell.com
username:
@@ -76,7 +76,7 @@ steps:
verbose: true
- name: rebuild-cache-with-filesystem
image: meltwater/drone-cache
image: cache.coadcorp.com/meltwater/drone-cache
pull: true
#when:
# event:
+2
View File
@@ -72,3 +72,5 @@ Network Trash Folder
Temporary Items
.apdisk
#/db/queries/*.go
.gocache/
+7 -4
View File
@@ -1,16 +1,19 @@
## Build
FROM golang:1.22-alpine AS build
FROM golang:1.26.0-alpine AS build
ARG VERSION='dev'
ARG TAILWIND_VERSION='v3.4.17'
ARG TEMPL_VERSION='v0.3.1001'
ARG SQLC_VERSION='v1.29.0'
RUN apk update && apk add --no-cache curl
RUN curl -sLO https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 \
RUN curl -fsSLo tailwindcss-linux-x64 https://github.com/tailwindlabs/tailwindcss/releases/download/${TAILWIND_VERSION}/tailwindcss-linux-x64 \
&& chmod +x tailwindcss-linux-x64 \
&& mv tailwindcss-linux-x64 /usr/local/bin/tailwindcss
RUN go install github.com/a-h/templ/cmd/templ@v0.2.663 \
&& go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
RUN go install github.com/a-h/templ/cmd/templ@${TEMPL_VERSION} \
&& go install github.com/sqlc-dev/sqlc/cmd/sqlc@${SQLC_VERSION}
WORKDIR /app
+366 -16
View File
@@ -3,23 +3,71 @@ vCTP is a vSphere Chargeback Tracking Platform, designed for a specific customer
## Snapshots and Reports
- Hourly snapshots capture inventory per vCenter (concurrency via `hourly_snapshot_concurrency`).
- Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month.
- Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).
- Snapshots are registered in `snapshot_registry` so regeneration via `/api/snapshots/aggregate` can locate the correct tables (fallback scanning is also supported).
- vCenter totals pages now provide two views:
- Daily Aggregated (`/vcenters/totals/daily`) for fast long-range trends.
- Hourly Detail 45d (`/vcenters/totals/hourly`) for recent granular change tracking.
- vCenter totals performance is accelerated with compact cache tables:
- `vcenter_latest_totals` (one latest row per vCenter)
- `vcenter_aggregate_totals` (hourly/daily/monthly per-vCenter totals by snapshot time)
- VM Trace now supports two modes on `/vm/trace`:
- `view=hourly` (default) for full snapshot detail
- `view=daily` for daily aggregated trend lines (using `vm_daily_rollup` when available)
- Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.
- Hourly totals in reports are interval-based: each row represents `[HH:00, HH+1:00)` and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence by creation/deletion overlap.
- Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (`YYYY-MM-DD to YYYY-MM-DD`) and prorated totals derived from daily summaries.
- Prometheus metrics are exposed at `/metrics`:
- Snapshots/aggregations: `vctp_hourly_snapshots_total`, `vctp_hourly_snapshots_failed_total`, `vctp_hourly_snapshot_last_unix`, `vctp_hourly_snapshot_last_rows`, `vctp_daily_aggregations_total`, `vctp_daily_aggregations_failed_total`, `vctp_daily_aggregation_duration_seconds`, `vctp_monthly_aggregations_total`, `vctp_monthly_aggregations_failed_total`, `vctp_monthly_aggregation_duration_seconds`, `vctp_reports_available`
- vCenter health/perf: `vctp_vcenter_connect_failures_total{vcenter}`, `vctp_vcenter_snapshot_duration_seconds{vcenter}`, `vctp_vcenter_inventory_size{vcenter}`
## Prorating and Aggregation Logic
Daily aggregation runs per VM using sample counts for the day:
- `SamplesPresent`: count of snapshot samples in which the VM appears.
- `TotalSamples`: count of unique snapshot timestamps for the vCenter in the day.
- `AvgIsPresent`: `SamplesPresent / TotalSamples` (0 when `TotalSamples` is 0).
- `AvgVcpuCount`, `AvgRamGB`, `AvgProvisionedDisk` (daily): `sum(values_per_sample) / TotalSamples` to timeweight config changes and prorate partialday VMs.
- `PoolTinPct`, `PoolBronzePct`, `PoolSilverPct`, `PoolGoldPct` (daily): `(pool_hits / SamplesPresent) * 100`, so pool percentages reflect only the time the VM existed.
- `CreationTime`: only set when vCenter provides it; otherwise it remains `0`.
Monthly aggregation builds on daily summaries (or the daily rollup cache):
- For each VM, daily averages are converted to weighted sums: `daily_avg * daily_total_samples`.
- Monthly averages are `sum(weighted_sums) / monthly_total_samples` (per vCenter).
- Pool percentages are weighted the same way: `(daily_pool_pct / 100) * daily_total_samples`, summed, then divided by `monthly_total_samples` and multiplied by 100.
### Hourly Snapshot Fields
Each hourly snapshot row tracks:
- Identity: `InventoryId`, `Name`, `Vcenter`, `VmId`, `VmUuid`, `EventKey`, `CloudId`
- Lifecycle/timing: `CreationTime`, `DeletionTime`, `SnapshotTime`
- Placement: `ResourcePool`, `Datacenter`, `Cluster`, `Folder`
- Sizing/state: `ProvisionedDisk`, `VcpuCount`, `RamGB`, `IsTemplate`, `PoweredOn`, `SrmPlaceholder`
### Daily Aggregate Fields
Daily summary rows retain identity/placement/sizing fields and add:
- Sample coverage: `SamplesPresent`, `TotalSamples`, `AvgIsPresent`
- Time-weighted sizing: `AvgVcpuCount`, `AvgRamGB`, `AvgProvisionedDisk`
- Pool distribution percentages: `PoolTinPct`, `PoolBronzePct`, `PoolSilverPct`, `PoolGoldPct`
- Chargeback totals columns: `Tin`, `Bronze`, `Silver`, `Gold`
- Lifecycle carry-forward used by reports and trace: `CreationTime`, `DeletionTime`, `SnapshotTime`
### Monthly Aggregate Fields
Monthly summary rows keep the same aggregate fields as daily summaries and recompute them over the month:
- `SamplesPresent` is summed across days.
- Monthly averages (`AvgVcpuCount`, `AvgRamGB`, `AvgProvisionedDisk`) are weighted by each day's sample volume.
- Monthly presence (`AvgIsPresent`) is normalized by monthly total samples.
- Monthly pool percentages (`PoolTinPct`, `PoolBronzePct`, `PoolSilverPct`, `PoolGoldPct`) are weighted by each days sample volume before normalization.
- `Tin`, `Bronze`, `Silver`, `Gold` totals remain available for reporting output.
## RPM Layout (summary)
The RPM installs the service and defaults under `/usr/bin`, config under `/etc/dtms`, and data under `/var/lib/vctp`:
- Binary: `/usr/bin/vctp-linux-amd64`
- Systemd unit: `/etc/systemd/system/vctp.service`
- Defaults/env: `/etc/dtms/vctp.yml` (override with `-settings`), `/etc/default/vctp` (environment)
- Defaults/config: `/etc/dtms/vctp.yml` (override with `-settings`), `/etc/default/vctp` (optional env flags)
- TLS cert/key: `/etc/dtms/vctp.crt` and `/etc/dtms/vctp.key` (generated if absent)
- Data: SQLite DB and reports default to `/var/lib/vctp` (reports under `/var/lib/vctp/reports`)
- Scripts: preinstall/postinstall handle directory creation and permissions.
## Settings File
# Settings File
Configuration now lives in the YAML settings file. By default the service reads
`/etc/dtms/vctp.yml`, or you can override it with the `-settings` flag.
@@ -27,34 +75,283 @@ Configuration now lives in the YAML settings file. By default the service reads
vctp -settings /path/to/vctp.yml
```
### Database Configuration
By default the app uses SQLite and creates/opens `db.sqlite3`. You can opt into PostgreSQL
by updating the settings file:
If you just want to run a single inventory snapshot across all configured vCenters and
exit (no scheduler/server), use:
- `settings.database_driver`: `sqlite` (default) or `postgres`
```shell
vctp -settings /path/to/vctp.yml -run-inventory
```
If you want a one-time SQLite cleanup to drop low-value hourly snapshot indexes and exit,
use:
```shell
vctp -settings /path/to/vctp.yml -db-cleanup
```
If you want a one-time cache backfill for the vCenter totals cache tables
(`vcenter_latest_totals` and `vcenter_aggregate_totals`) and exit, use:
```shell
vctp -settings /path/to/vctp.yml -backfill-vcenter-cache
```
The backfill command:
- Ensures/migrates `snapshot_registry` when needed.
- Rebuilds hourly/latest vCenter totals caches.
- Recomputes daily/monthly rows for `vcenter_aggregate_totals` from registered summary snapshots.
If you want a one-time SQLite-to-Postgres import and exit, use:
```shell
vctp -settings /path/to/vctp.yml -import-sqlite /path/to/legacy.sqlite3
```
The import command:
- Requires `settings.database_driver: postgres`.
- Copies data from the SQLite source into matching Postgres tables.
- Auto-creates runtime tables (hourly/daily/monthly snapshot tables and cache tables) when needed.
- Replaces existing data in imported Postgres tables during the run.
If you want a one-time canonical aggregation benchmark (Go vs SQL cores) and exit, use:
```shell
vctp -settings /path/to/vctp.yml -benchmark-aggregations -benchmark-runs 3
```
The benchmark command:
- Uses canonical cache sources (`vm_hourly_stats` for daily, `vm_daily_rollup` for monthly).
- Runs Go and SQL aggregation cores for the latest available daily/monthly windows.
- Writes results to startup logs and exits without changing scheduled defaults.
## Database Configuration
By default the app uses SQLite and creates/opens `db.sqlite3`.
PostgreSQL support is currently **experimental** and not a production target. To enable it,
set `settings.enable_experimental_postgres: true` in the settings file:
- `settings.database_driver`: `sqlite` (default) or `postgres` (experimental)
- `settings.database_url`: SQLite file path/DSN or PostgreSQL DSN
Examples:
```yaml
settings:
database_driver: sqlite
enable_experimental_postgres: false
database_url: ./db.sqlite3
settings:
database_driver: postgres
enable_experimental_postgres: true
database_url: postgres://user:pass@localhost:5432/vctp?sslmode=disable
```
### Initial PostgreSQL Setup
Create a dedicated PostgreSQL role and database (run as a PostgreSQL superuser):
```sql
CREATE ROLE vctp_user LOGIN PASSWORD 'change-this-password';
CREATE DATABASE vctp OWNER vctp_user;
```
Connect to the new database and grant privileges required for migrations and runtime table/index management:
```sql
\c vctp
ALTER DATABASE vctp OWNER TO vctp_user;
ALTER SCHEMA public OWNER TO vctp_user;
GRANT CONNECT, TEMP ON DATABASE vctp TO vctp_user;
GRANT USAGE, CREATE ON SCHEMA public TO vctp_user;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO vctp_user;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO vctp_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO vctp_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO vctp_user;
```
Verify effective schema privileges (useful if migrations fail creating `goose_db_version`):
```sql
SELECT has_schema_privilege('vctp_user', 'public', 'USAGE,CREATE');
```
Recommended auth/network configuration:
- Ensure PostgreSQL is listening on the expected interface/port in `postgresql.conf` (for example, `listen_addresses` and `port`).
- Allow vCTP connections in `pg_hba.conf`. Example entries:
```conf
# local socket
local vctp vctp_user scram-sha-256
# TCP from application subnet
host vctp vctp_user 10.0.0.0/24 scram-sha-256
```
- Reload/restart PostgreSQL after config changes (`SELECT pg_reload_conf();` or your service manager).
- Ensure host firewall/network ACLs allow traffic to PostgreSQL (default `5432`).
Example `vctp.yml` database settings:
```yaml
settings:
database_driver: postgres
enable_experimental_postgres: true
database_url: postgres://vctp_user:change-this-password@db-hostname:5432/vctp?sslmode=disable
```
Validate connectivity before starting vCTP:
```shell
psql "postgres://vctp_user:change-this-password@db-hostname:5432/vctp?sslmode=disable"
```
PostgreSQL migrations live in `db/migrations_postgres`, while SQLite migrations remain in
`db/migrations`.
### Snapshot Retention
## Snapshot Retention
Hourly and daily snapshot table retention can be configured in the settings file:
- `settings.hourly_snapshot_max_age_days` (default: 60)
- `settings.daily_snapshot_max_age_months` (default: 12)
### Settings Reference
## Runtime Environment Flags
These optional flags are read from the process environment (for example via `/etc/default/vctp`):
- `DAILY_AGG_GO`: set to `1` (default in `src/vctp.default`) to force Go for manual daily runs.
- `DAILY_AGG_SQL`: set to `1` to force legacy SQL fallback for manual daily runs.
- `MONTHLY_AGG_GO`: set to `1` (default in `src/vctp.default`) to force Go for manual monthly runs.
- `MONTHLY_AGG_SQL`: set to `1` to force legacy SQL fallback for manual monthly runs.
Scheduled aggregation engine selection is controlled by YAML (`settings.scheduled_aggregation_engine`), not these env vars.
## Authentication and Authorization
Authentication uses LDAP bind + JWT bearer tokens.
Login flow:
1. Call `POST /api/auth/login` with JSON body:
```json
{ "username": "your-user", "password": "your-password" }
```
2. On success, use returned `access_token` as:
```http
Authorization: Bearer <access_token>
```
3. Optional whoami/debug check: call `GET /api/auth/me` with the bearer token to view current JWT identity/role claims.
Auth audit logging:
- vCTP emits structured `auth_audit` log events for login decisions, token validation denials, and role authorization denials.
- Logs include request metadata and decision reason, but do not log credentials or raw bearer tokens.
Auth modes:
- `settings.auth_mode: disabled`: middleware bypassed.
- `settings.auth_mode: optional`: protected endpoints accept missing token, but validate any provided token.
- `settings.auth_mode: required`: protected endpoints require a valid bearer token.
Role policy:
- `viewer`: read/report APIs (for example `/api/report/*`, `/api/diagnostics/daily-creation`).
- `admin`: mutating/admin APIs (for example `/api/snapshots/*` mutating endpoints, `/api/event/*`, `/api/import/vm`, `/api/encrypt`, `/api/vcenters/cache/rebuild`).
- `admin` implies `viewer` access.
### LDAP group configuration (`auth_group_role_mappings` and `ldap_groups`)
Use full LDAP group DNs for both settings (for example `CN=vctp-admins,OU=Groups,DC=example,DC=com`).
- `settings.auth_group_role_mappings` is required when `settings.auth_enabled: true`.
- Mapping values must be `viewer` or `admin`.
- A user must resolve to at least one mapped role to log in.
- `settings.ldap_groups` is optional and acts as an additional allowlist gate.
- If `settings.ldap_groups` is empty/omitted, allowlist checking is skipped, but mapped-role resolution is still required.
- DN comparisons are normalized (trimmed + case-insensitive), but using exact directory DNs is still recommended.
Example (common setup where viewer/admin groups are both mapped and allowlisted):
```yaml
settings:
auth_enabled: true
auth_mode: required
ldap_bind_address: ldaps://ad01.example.com:636
ldap_base_dn: DC=example,DC=com
auth_group_role_mappings:
"CN=vctp-viewers,OU=Groups,DC=example,DC=com": viewer
"CN=vctp-admins,OU=Groups,DC=example,DC=com": admin
ldap_groups:
- "CN=vctp-viewers,OU=Groups,DC=example,DC=com"
- "CN=vctp-admins,OU=Groups,DC=example,DC=com"
```
Example (`ldap_groups` omitted, only role mapping enforced):
```yaml
settings:
auth_enabled: true
auth_mode: required
auth_group_role_mappings:
"CN=vctp-viewers,OU=Groups,DC=example,DC=com": viewer
"CN=vctp-admins,OU=Groups,DC=example,DC=com": admin
```
Example (`ldap_groups` can be broader, but users still need at least one mapped role):
```yaml
settings:
auth_enabled: true
auth_mode: required
auth_group_role_mappings:
"CN=vctp-viewers,OU=Groups,DC=example,DC=com": viewer
"CN=vctp-admins,OU=Groups,DC=example,DC=com": admin
ldap_groups:
- "CN=vctp-viewers,OU=Groups,DC=example,DC=com"
- "CN=vctp-admins,OU=Groups,DC=example,DC=com"
- "CN=platform-operators,OU=Groups,DC=example,DC=com"
```
Tip: after a successful login, call `GET /api/auth/me` and inspect the returned `groups` claim to copy exact group DN values from your directory.
Public endpoints:
- UI pages (`/`, `/vcenters`, `/snapshots/*`, `/vm/trace`)
- Swagger UI/docs (`/swagger`, `/swagger/`, `/swagger.json`)
- Metrics (`/metrics`)
- Login (`/api/auth/login`)
Debug endpoints:
- `/debug/pprof/*` handlers are only registered when `settings.enable_pprof: true`.
- When enabled, they require an authenticated `admin` token.
## Airgapped Static Assets
vCTP is safe for airgapped operation without internet/CDN dependencies for UI/docs assets:
- CSS, JS, and favicon assets are bundled into the binary via Go `embed` and served from local routes (`/assets/*`, `/favicon*`).
- Swagger UI is vendored under `server/router/swagger-ui-dist` and served locally from `/swagger/*`.
- Swagger spec is served locally from `/swagger.json` (`validatorUrl` is disabled in the initializer).
- Static responses include cache headers. In release builds, versioned assets are served with long-lived cache headers and immutable caching.
This means runtime access to external asset hosts is not required.
## Credential Encryption Lifecycle
At startup, vCTP resolves `settings.vcenter_password` using this order:
1. If value starts with `enc:v1:`, decrypt using the active key.
2. If no prefix, attempt legacy ciphertext decryption (active key, then legacy fallback keys).
3. If decrypt fails and value length is greater than 2, treat value as plaintext.
When steps 2 or 3 succeed, vCTP rewrites the setting in-place to `enc:v1:<ciphertext>`.
Behavior notes:
- Plaintext values with length `<= 2` are rejected.
- Malformed ciphertext is rejected safely (short payloads do not panic).
- Legacy encrypted values can still be migrated forward automatically.
## Deprecated API Endpoints
These endpoints are considered legacy and are disabled by default unless `settings.enable_legacy_api: true`:
- `/api/event/vm/create`
- `/api/event/vm/modify`
- `/api/event/vm/move`
- `/api/event/vm/delete`
- `/api/cleanup/updates`
- `/api/cleanup/vcenter`
When disabled, they return HTTP `410 Gone` with JSON error payload.
## Settings Reference
All configuration lives under the top-level `settings:` key in `vctp.yml`.
General:
@@ -62,34 +359,74 @@ General:
- `settings.log_output`: log format, `text` or `json`
Database:
- `settings.database_driver`: `sqlite` or `postgres`
- `settings.database_driver`: `sqlite` or `postgres` (experimental)
- `settings.enable_experimental_postgres`: set `true` to allow PostgreSQL startup
- `settings.database_url`: SQLite file path/DSN or PostgreSQL DSN
HTTP/TLS:
- `settings.bind_ip`: IP address to bind the HTTP server
- `settings.bind_port`: TCP port to bind the HTTP server
- `settings.bind_port` below `1024` (for example `443`) requires privileged bind permissions.
The packaged systemd unit grants `CAP_NET_BIND_SERVICE` to the `vctp` user; if you run
vCTP outside that unit, grant equivalent capability or use a non-privileged port.
- `settings.bind_disable_tls`: `true` to serve plain HTTP (no TLS)
- `settings.tls_cert_filename`: PEM certificate path (TLS mode)
- `settings.tls_key_filename`: PEM private key path (TLS mode)
Authentication:
- `settings.auth_enabled`: enables LDAP/JWT auth components.
- `settings.auth_mode`: `disabled`, `optional`, or `required`.
- `settings.auth_jwt_signing_key`: base64 signing key for JWTs.
- RPM postinstall auto-generates and writes this key to `/etc/dtms/vctp.yml` if it is missing/empty.
- `settings.auth_token_lifespan_minutes`: JWT access token lifetime.
- `settings.auth_jwt_issuer`: expected JWT issuer.
- `settings.auth_jwt_audience`: expected JWT audience.
- `settings.auth_clock_skew_seconds`: allowed clock skew for token validation.
- `settings.auth_group_role_mappings`: map of LDAP group DN -> role (`viewer` or `admin`).
- `settings.ldap_groups`: optional allowlist of LDAP group DNs required for login.
- `settings.auth_group_role_mappings` must be non-empty when `settings.auth_enabled: true`.
- A user must belong to at least one mapped group to receive any role and log in.
- `settings.ldap_groups` empty/omitted means no allowlist filter, but mapped-role requirement still applies.
- `settings.ldap_bind_address`: LDAP/LDAPS URL used for authentication.
- `settings.ldap_base_dn`: LDAP base DN for user/group lookups.
- `settings.ldap_trust_cert_file`: optional CA cert file for LDAP TLS.
- `settings.ldap_disable_validation`: disables LDAP TLS cert validation.
- `settings.ldap_insecure`: insecure LDAP TLS mode.
- `settings.enable_pprof`: enables `/debug/pprof/*` routes (still admin-gated).
vCenter:
- `settings.encryption_key`: optional explicit key source for credential encryption/decryption.
If unset, vCTP derives a host key from hardware/host identity.
- `settings.vcenter_username`: vCenter username
- `settings.vcenter_password`: vCenter password (encrypted at startup)
- `settings.vcenter_password`: vCenter password (auto-encrypted on startup if plaintext length > 2)
- `settings.vcenter_insecure`: `true` to skip TLS verification
- `settings.vcenter_event_polling_seconds`: event polling interval (0 disables)
- `settings.vcenter_inventory_polling_seconds`: inventory polling interval (0 disables)
- `settings.enable_legacy_api`: set `true` to temporarily re-enable deprecated legacy endpoints
- `settings.vcenter_event_polling_seconds`: deprecated and ignored
- `settings.vcenter_inventory_polling_seconds`: deprecated and ignored
- `settings.vcenter_inventory_snapshot_seconds`: hourly snapshot cadence (seconds)
- `settings.vcenter_inventory_aggregate_seconds`: daily aggregation cadence (seconds)
- `settings.vcenter_addresses`: list of vCenter SDK URLs to monitor
Credential encryption:
- New encrypted values are written with `enc:v1:` prefix.
Snapshots:
- `settings.hourly_snapshot_concurrency`: max concurrent vCenter snapshots (0 = unlimited)
- `settings.hourly_snapshot_max_age_days`: retention for hourly tables
- `settings.daily_snapshot_max_age_months`: retention for daily tables
- `settings.hourly_index_max_age_days`: age gate for keeping per-hourly-table indexes (`-1` disables cleanup, `0` trims all)
- `settings.snapshot_cleanup_cron`: cron expression for cleanup job
- `settings.reports_dir`: directory to store generated XLSX reports (default: `/var/lib/vctp/reports`)
- `settings.report_summary_pivots`: optional list to override Summary worksheet pivot titles/names/ranges in daily/monthly XLSX reports
- `metric`: one of `avg_vcpu`, `avg_ram`, `prorated_vm_count`, `vm_name_count`
- `title`: pivot title text shown on Summary sheet
- `pivot_name`: internal pivot table name in the XLSX workbook
- `pivot_range`: target range (for example `Summary!A3:H40` or `A3:H40`)
- `title_cell` (optional): explicit title cell; if omitted, derived from `pivot_range`
- `settings.hourly_snapshot_retry_seconds`: interval for retrying failed hourly snapshots (default: 300 seconds)
- `settings.hourly_snapshot_max_retries`: maximum retry attempts per vCenter snapshot (default: 3)
- `settings.postgres_vm_hourly_partitioning_enabled`: Postgres-only toggle to migrate/manage `vm_hourly_stats` as monthly range partitions (default: `false`)
- `settings.scheduled_aggregation_engine`: scheduled daily/monthly engine (`go` default, `sql` for canonical SQL rollout)
Filters/chargeback:
- `settings.tenants_to_filter`: list of tenant name patterns to exclude
@@ -101,9 +438,9 @@ Filters/chargeback:
## Pre-requisite tools
```shell
go install github.com/a-h/templ/cmd/templ@latest
go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
go install github.com/swaggo/swag/cmd/swag@latest
go install github.com/a-h/templ/cmd/templ@v0.3.977
go install github.com/sqlc-dev/sqlc/cmd/sqlc@v1.29.0
go install github.com/swaggo/swag/cmd/swag@v1.16.6
```
## Database
@@ -126,6 +463,19 @@ Run `templ generate -path ./components` to generate code based on template files
## Documentation
Run `swag init --exclude "pkg.mod,pkg.build,pkg.tools" -o server/router/docs`
## Tests
Run the test suite:
```shell
go test ./...
```
Recommended static analysis:
```shell
go vet ./...
```
## CI/CD (Drone)
- `.drone.yml` defines a Docker pipeline:
- Restore/build caches for Go modules/tools.
+1
View File
@@ -0,0 +1 @@
*.go
+6 -6
View File
@@ -1,9 +1,9 @@
package core
templ Footer() {
<footer class="fixed p-1 bottom-0 bg-gray-100 w-full border-t">
<div class="rounded-lg p-4 text-xs italic text-gray-700 text-center">
&copy; Nathan Coad (nathan.coad@dell.com)
</div>
</footer>
}
<footer class="web2-footer" role="contentinfo">
<div class="web2-footer-inner">
&copy; Nathan Coad (nathan.coad@dell.com)
</div>
</footer>
}
+2 -2
View File
@@ -1,6 +1,6 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
// templ: version: v0.3.1001
package core
//lint:file-ignore SA4006 This context is only used if a nested component is present.
@@ -29,7 +29,7 @@ func Footer() templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<footer class=\"fixed p-1 bottom-0 bg-gray-100 w-full border-t\"><div class=\"rounded-lg p-4 text-xs italic text-gray-700 text-center\">&copy; Nathan Coad (nathan.coad@dell.com)</div></footer>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<footer class=\"web2-footer\" role=\"contentinfo\"><div class=\"web2-footer-inner\">&copy; Nathan Coad (nathan.coad@dell.com)</div></footer>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
+8 -5
View File
@@ -6,13 +6,16 @@ templ Header() {
<head>
<meta charset="UTF-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta name="description" content="vCTP API endpoint"/>
<meta name="description" content="vCTP dashboard and API endpoint"/>
<meta name="color-scheme" content="light"/>
<meta name="theme-color" content="#195fc8"/>
<title>vCTP API</title>
<link rel="icon" href="/favicon.ico"/>
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"/>
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"/>
<link rel="icon" href={ "/favicon.ico?v=" + version.Value }/>
<link rel="icon" type="image/png" sizes="16x16" href={ "/favicon-16x16.png?v=" + version.Value }/>
<link rel="icon" type="image/png" sizes="32x32" href={ "/favicon-32x32.png?v=" + version.Value }/>
<script src="/assets/js/htmx@v2.0.2.min.js"></script>
<script src={ "/assets/js/web3-charts.js?v=" + version.Value }></script>
<link href={ "/assets/css/output@" + version.Value + ".css" } rel="stylesheet"/>
<link href="/assets/css/web3.css" rel="stylesheet"/>
<link href={ "/assets/css/web3.css?v=" + version.Value } rel="stylesheet"/>
</head>
}
+70 -5
View File
@@ -1,6 +1,6 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
// templ: version: v0.3.1001
package core
//lint:file-ignore SA4006 This context is only used if a nested component is present.
@@ -31,20 +31,85 @@ func Header() templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"><meta name=\"description\" content=\"vCTP API endpoint\"><title>vCTP API</title><link rel=\"icon\" href=\"/favicon.ico\"><link rel=\"icon\" type=\"image/png\" sizes=\"16x16\" href=\"/favicon-16x16.png\"><link rel=\"icon\" type=\"image/png\" sizes=\"32x32\" href=\"/favicon-32x32.png\"><script src=\"/assets/js/htmx@v2.0.2.min.js\"></script><link href=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"><meta name=\"description\" content=\"vCTP dashboard and API endpoint\"><meta name=\"color-scheme\" content=\"light\"><meta name=\"theme-color\" content=\"#195fc8\"><title>vCTP API</title><link rel=\"icon\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 templ.SafeURL
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinURLErrs("/assets/css/output@" + version.Value + ".css")
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinURLErrs("/favicon.ico?v=" + version.Value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 15, Col: 61}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 13, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "\" rel=\"stylesheet\"><link href=\"/assets/css/web3.css\" rel=\"stylesheet\"></head>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "\"><link rel=\"icon\" type=\"image/png\" sizes=\"16x16\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 templ.SafeURL
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinURLErrs("/favicon-16x16.png?v=" + version.Value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 14, Col: 96}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "\"><link rel=\"icon\" type=\"image/png\" sizes=\"32x32\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 templ.SafeURL
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinURLErrs("/favicon-32x32.png?v=" + version.Value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 15, Col: 96}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\"><script src=\"/assets/js/htmx@v2.0.2.min.js\"></script><script src=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs("/assets/js/web3-charts.js?v=" + version.Value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 17, Col: 62}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\"></script><link href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 templ.SafeURL
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinURLErrs("/assets/css/output@" + version.Value + ".css")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 18, Col: 61}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" rel=\"stylesheet\"><link href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 templ.SafeURL
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinURLErrs("/assets/css/web3.css?v=" + version.Value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 19, Col: 56}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "\" rel=\"stylesheet\"></head>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
+67
View File
@@ -0,0 +1,67 @@
package core
type ActionLink struct {
Label string
Href string
Class string
}
type SegmentedLink struct {
Label string
Href string
Class string
}
func actionLinkClass(class string) string {
if class == "" {
return "web2-button"
}
return class
}
func segmentedLinkClass(class string) string {
if class == "" {
return "web3-button"
}
return class
}
templ PageHeader(pill string, title string, subtitle string, actions []ActionLink) {
<div class="web2-page-head-row">
<div class="web2-head-copy">
if pill != "" {
<div class="web2-pill">{ pill }</div>
}
<h1 class="web2-page-title">{ title }</h1>
if subtitle != "" {
<p class="web2-page-subtitle">{ subtitle }</p>
}
</div>
if len(actions) > 0 {
<div class="web2-actions">
for _, action := range actions {
<a class={ actionLinkClass(action.Class) } href={ action.Href }>{ action.Label }</a>
}
</div>
}
</div>
}
templ SegmentedActions(actions []SegmentedLink) {
if len(actions) > 0 {
<div class="web3-button-group">
for _, action := range actions {
<a class={ segmentedLinkClass(action.Class) } href={ action.Href }>{ action.Label }</a>
}
</div>
}
}
templ SectionHead(title string, badge string) {
<div class="web2-section-head">
<h2 class="web2-section-title">{ title }</h2>
if badge != "" {
<span class="web2-badge">{ badge }</span>
}
</div>
}
+1
View File
@@ -0,0 +1 @@
*.go
+70 -19
View File
@@ -1,13 +1,22 @@
package views
import (
"vctp/components/core"
"strings"
"vctp/components/core"
)
type BuildInfo struct {
BuildTime string
SHA1Ver string
GoVersion string
GoVersion string
}
func truncateSHA(sha string) string {
trimmed := strings.TrimSpace(sha)
if len(trimmed) <= 14 {
return trimmed
}
return trimmed[:14] + "..."
}
templ Index(info BuildInfo) {
@@ -15,37 +24,79 @@ templ Index(info BuildInfo) {
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<main class="flex-grow web2-shell web2-card-grid">
<section class="web2-header web2-page-head">
<div class="web2-page-head-row">
<div class="web2-head-copy">
<div class="web2-pill">vCTP Console</div>
<h1 class="mt-3 text-4xl font-bold">Chargeback Intelligence Dashboard</h1>
<p class="mt-2 text-sm text-slate-600">Point in time snapshots of consumption.</p>
<h1 class="web2-page-title">Chargeback Intelligence Dashboard</h1>
<p class="web2-page-subtitle">Point-in-time snapshots of vSphere consumption with LDAP and JWT-protected API access.</p>
</div>
<div class="web2-button-group">
<div class="web2-actions">
<a class="web2-button" href="/snapshots/hourly">Hourly Snapshots</a>
<a class="web2-button" href="/snapshots/daily">Daily Snapshots</a>
<a class="web2-button" href="/snapshots/monthly">Monthly Snapshots</a>
<a class="web2-button" href="/vm/trace">VM Trace</a>
<a class="web2-button" href="/vcenters">vCenters</a>
<a class="web2-button" href="/swagger/">Swagger UI</a>
<a class="web2-button secondary" href="/vcenters">vCenters</a>
<a class="web2-button secondary" href="/swagger/">Swagger UI</a>
</div>
</div>
<div class="web2-note">
When authentication is enabled, obtain a token from <code class="web2-code">POST /api/auth/login</code> and send it as <code class="web2-code">Authorization: Bearer &lt;token&gt;</code>. Role policy: <code class="web2-code">viewer</code> covers read/report APIs, <code class="web2-code">admin</code> covers mutating/admin APIs (and includes viewer). UI pages and <code class="web2-code">/metrics</code> remain public.
</div>
</section>
<section class="grid gap-6 md:grid-cols-3">
<section class="web2-kpi-grid">
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.2em] text-slate-400">Build Time</p>
<p class="mt-3 text-xl font-semibold">{info.BuildTime}</p>
<p class="web2-kpi-label">Build Time</p>
<p class="web2-kpi-value">{ info.BuildTime }</p>
</div>
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.2em] text-slate-400">SHA1 Version</p>
<p class="mt-3 text-xl font-semibold">{info.SHA1Ver}</p>
<p class="web2-kpi-label">SHA1 Version</p>
<p class="web2-kpi-value web2-kpi-value-mono web2-kpi-truncate" title={ info.SHA1Ver }>{ truncateSHA(info.SHA1Ver) }</p>
</div>
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.2em] text-slate-400">Go Runtime</p>
<p class="mt-3 text-xl font-semibold">{info.GoVersion}</p>
<p class="web2-kpi-label">Go Runtime</p>
<p class="web2-kpi-value">{ info.GoVersion }</p>
</div>
</section>
<section class="web2-index-sections">
<div class="web2-card web2-card-overview web2-index-overview">
<h2 class="mb-2">Overview</h2>
<p class="web2-page-subtitle">
vCTP is a vSphere Chargeback Tracking Platform.
</p>
<p class="web2-page-subtitle">
Use fast vCenter totals views (Daily Aggregated and Hourly Detail 45d) and VM Trace views (Hourly Detail and Daily Aggregated) to move between long-range trends and granular timelines.
</p>
<p class="web2-page-subtitle">
Use <code class="web2-code">/api/auth/me</code> to inspect active claims and roles during integration and diagnostics.
</p>
</div>
<div class="web2-card web2-card-featured web2-index-featured">
<h2 class="mb-2">Snapshots and Reports</h2>
<div class="web2-paragraphs web2-page-subtitle">
<p>Hourly snapshots capture inventory per vCenter (concurrency via <code class="web2-code">hourly_snapshot_concurrency</code>), then daily and monthly summaries are derived from those snapshots.</p>
<p><strong>Hourly tracks:</strong> VM identity (<code class="web2-code">InventoryId</code>, <code class="web2-code">Name</code>, <code class="web2-code">VmId</code>, <code class="web2-code">VmUuid</code>, <code class="web2-code">Vcenter</code>, <code class="web2-code">EventKey</code>, <code class="web2-code">CloudId</code>), lifecycle (<code class="web2-code">CreationTime</code>, <code class="web2-code">DeletionTime</code>, <code class="web2-code">SnapshotTime</code>), placement (<code class="web2-code">Datacenter</code>, <code class="web2-code">Cluster</code>, <code class="web2-code">Folder</code>, <code class="web2-code">ResourcePool</code>), and sizing/state (<code class="web2-code">VcpuCount</code>, <code class="web2-code">RamGB</code>, <code class="web2-code">ProvisionedDisk</code>, <code class="web2-code">PoweredOn</code>, <code class="web2-code">IsTemplate</code>, <code class="web2-code">SrmPlaceholder</code>).</p>
<p><strong>Daily tracks:</strong> <code class="web2-code">SamplesPresent</code>, <code class="web2-code">TotalSamples</code>, <code class="web2-code">AvgIsPresent</code>, <code class="web2-code">AvgVcpuCount</code>, <code class="web2-code">AvgRamGB</code>, <code class="web2-code">AvgProvisionedDisk</code>, <code class="web2-code">PoolTinPct</code>, <code class="web2-code">PoolBronzePct</code>, <code class="web2-code">PoolSilverPct</code>, <code class="web2-code">PoolGoldPct</code>, plus chargeback totals columns <code class="web2-code">Tin</code>, <code class="web2-code">Bronze</code>, <code class="web2-code">Silver</code>, <code class="web2-code">Gold</code>.</p>
<p><strong>Monthly tracks:</strong> the same daily aggregate fields, with monthly values weighted by per-day sample volume so partial-day VMs and config changes stay proportional.</p>
<p>Snapshots are registered in <code class="web2-code">snapshot_registry</code> so regeneration via <code class="web2-code">/api/snapshots/aggregate</code> can locate the correct tables (fallback scanning is also supported).</p>
<p>vCenter totals pages are accelerated by compact cache tables: <code class="web2-code">vcenter_latest_totals</code> and <code class="web2-code">vcenter_aggregate_totals</code>.</p>
<p>VM Trace daily mode uses the <code class="web2-code">vm_daily_rollup</code> cache when available, and falls back to daily summary tables if needed.</p>
<p>Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.</p>
<p>Hourly totals are interval-based: each row represents <code class="web2-code">[HH:00, HH+1:00)</code> and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence.</p>
<p>Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (YYYY-MM-DD to YYYY-MM-DD) and prorated totals.</p>
</div>
</div>
<div class="web2-card web2-index-wide">
<h2 class="mb-2">Prorating and Aggregation</h2>
<div class="web2-paragraphs web2-page-subtitle">
<p><code class="web2-code">SamplesPresent</code> is the count of snapshots in which the VM appears; <code class="web2-code">TotalSamples</code> is the count of unique snapshot times for that vCenter/day.</p>
<p><code class="web2-code">AvgIsPresent = SamplesPresent / TotalSamples</code> (0 when <code class="web2-code">TotalSamples</code> is 0).</p>
<p>Daily <code class="web2-code">AvgVcpuCount</code>, <code class="web2-code">AvgRamGB</code>, and <code class="web2-code">AvgProvisionedDisk</code> are per-sample sums divided by <code class="web2-code">TotalSamples</code> (time-weighted).</p>
<p>Daily pool percentages (<code class="web2-code">PoolTinPct</code>/<code class="web2-code">PoolBronzePct</code>/<code class="web2-code">PoolSilverPct</code>/<code class="web2-code">PoolGoldPct</code>) use pool-hit counts divided by <code class="web2-code">SamplesPresent</code>.</p>
<p>Monthly aggregation converts each day into weighted sums using sample volume, then recomputes monthly averages and pool percentages from those weighted totals.</p>
<p>CreationTime is only set when vCenter provides it; otherwise it remains 0.</p>
</div>
</div>
</section>
</main>
File diff suppressed because one or more lines are too long
+105 -144
View File
@@ -18,39 +18,24 @@ type VcenterLink struct {
}
type VcenterTotalsEntry struct {
Snapshot string
RawTime int64
VmCount int64
VcpuTotal int64
Snapshot string
RawTime int64
VmCount int64
VcpuTotal int64
RamTotalGB int64
}
type VcenterTotalsMeta struct {
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
MonthlyLink string
HourlyClass string
DailyClass string
MonthlyClass string
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
HourlyClass string
DailyClass string
}
type VcenterChartData struct {
PointsVm string
PointsVcpu string
PointsRam string
Width int
Height int
GridX []float64
GridY []float64
YTicks []ChartTick
XTicks []ChartTick
}
type ChartTick struct {
Pos float64
Label string
ConfigJSON string
}
templ SnapshotHourlyList(entries []SnapshotEntry) {
@@ -70,24 +55,24 @@ templ SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) {
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">Snapshot Library</div>
<h1 class="mt-3 text-4xl font-bold">{title}</h1>
<p class="mt-2 text-sm text-slate-600">{subtitle}</p>
</div>
<a class="web2-button" href="/">Back to Dashboard</a>
</div>
<main class="flex-grow web2-shell web2-card-grid">
<section class="web2-header web2-page-head">
@core.PageHeader(
"Snapshot Library",
title,
subtitle,
[]core.ActionLink{
{
Label: "Back to Dashboard",
Href: "/",
Class: "web2-button secondary",
},
},
)
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">Available Exports</h2>
<span class="web2-badge">{len(entries)} files</span>
</div>
<div class="overflow-hidden border border-slate-200 rounded">
@core.SectionHead("Available Exports", fmt.Sprintf("%d files", len(entries)))
<div class="web2-table-shell">
<table class="web2-table">
<thead>
<tr>
@@ -100,20 +85,20 @@ templ SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) {
for i, entry := range entries {
if entry.Group != "" && (i == 0 || entries[i-1].Group != entry.Group) {
<tr class="web2-group-row">
<td colspan="3" class="font-semibold text-slate-700">{entry.Group}</td>
<td colspan="3" class="font-semibold">{ entry.Group }</td>
</tr>
}
<tr>
<td>
<div class="flex flex-col">
<span class="text-sm font-semibold text-slate-700">{entry.Label}</span>
<span class="text-sm font-semibold">{ entry.Label }</span>
</div>
</td>
<td>
<span class="web2-badge">{entry.Count} records</span>
<span class="web2-badge">{ entry.Count } records</span>
</td>
<td class="text-right">
<a class="web2-link" href={entry.Link}>Download XLSX</a>
<a class="web2-link" href={ entry.Link }>Download XLSX</a>
</td>
</tr>
}
@@ -132,24 +117,24 @@ templ VcenterList(links []VcenterLink) {
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">vCenter Inventory</div>
<h1 class="mt-3 text-4xl font-bold">Monitored vCenters</h1>
<p class="mt-2 text-sm text-slate-600">Select a vCenter to view snapshot totals over time.</p>
</div>
<a class="web2-button" href="/">Back to Dashboard</a>
</div>
<main class="flex-grow web2-shell web2-card-grid">
<section class="web2-header web2-page-head">
@core.PageHeader(
"vCenter Inventory",
"Monitored vCenters",
"Select a vCenter to view snapshot totals over time.",
[]core.ActionLink{
{
Label: "Back to Dashboard",
Href: "/",
Class: "web2-button secondary",
},
},
)
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">vCenters</h2>
<span class="web2-badge">{len(links)} total</span>
</div>
<div class="overflow-hidden border border-slate-200 rounded">
@core.SectionHead("vCenters", fmt.Sprintf("%d total", len(links)))
<div class="web2-table-shell">
<table class="web2-table">
<thead>
<tr>
@@ -160,9 +145,9 @@ templ VcenterList(links []VcenterLink) {
<tbody>
for _, link := range links {
<tr>
<td class="font-semibold text-slate-700">{link.Name}</td>
<td class="font-semibold">{ link.Name }</td>
<td class="text-right">
<a class="web2-link" href={link.Link}>View Totals</a>
<a class="web2-link" href={ link.Link }>View Totals</a>
</td>
</tr>
}
@@ -177,85 +162,61 @@ templ VcenterList(links []VcenterLink) {
}
templ VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart VcenterChartData, meta VcenterTotalsMeta) {
<!DOCTYPE html>
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto" style="max-width: 1400px;">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">vCenter Totals</div>
<h1 class="mt-3 text-4xl font-bold">Totals for {vcenter}</h1>
<p class="mt-2 text-sm text-slate-600">{meta.TypeLabel} snapshots of VM count, vCPU, and RAM over time.</p>
</div>
<div class="flex gap-3">
<a class="web2-button secondary" href="/vcenters">All vCenters</a>
<a class="web2-button" href="/">Dashboard</a>
</div>
</div>
<div class="web3-button-group mt-8 mb-3">
<a class={meta.HourlyClass} href={meta.HourlyLink}>Hourly</a>
<a class={meta.DailyClass} href={meta.DailyLink}>Daily</a>
<a class={meta.MonthlyClass} href={meta.MonthlyLink}>Monthly</a>
</div>
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">{meta.TypeLabel} Snapshots</h2>
<span class="web2-badge">{len(entries)} records</span>
</div>
if chart.PointsVm != "" {
<!DOCTYPE html>
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell web2-shell-wide web2-card-grid">
<section class="web2-header web2-page-head">
@core.PageHeader(
"vCenter Totals",
"Totals for "+vcenter,
meta.TypeLabel+" snapshots of VM count, vCPU, and RAM over time.",
[]core.ActionLink{
{
Label: "All vCenters",
Href: "/vcenters",
Class: "web2-button secondary",
},
{
Label: "Dashboard",
Href: "/",
Class: "web2-button",
},
},
)
@core.SegmentedActions(
[]core.SegmentedLink{
{
Label: "Hourly Detail (45d)",
Href: meta.HourlyLink,
Class: meta.HourlyClass,
},
{
Label: "Daily Aggregated",
Href: meta.DailyLink,
Class: meta.DailyClass,
},
},
)
</section>
<section class="web2-card">
@core.SectionHead(meta.TypeLabel+" Snapshots", fmt.Sprintf("%d records", len(entries)))
if chart.ConfigJSON != "" {
<div class="mb-6 overflow-auto">
<svg width="100%" height={fmt.Sprintf("%d", chart.Height+80)} viewBox={"0 0 " + fmt.Sprintf("%d", chart.Width) + " " + fmt.Sprintf("%d", chart.Height+70)} role="img" aria-label="Totals over time">
<defs>
<linearGradient id="grid" x1="0" y1="0" x2="0" y2="1">
<stop offset="0%" stop-color="#e2e8f0" stop-opacity="0.6"></stop>
</linearGradient>
</defs>
<rect x="40" y="10" width={fmt.Sprintf("%d", chart.Width-60)} height={fmt.Sprintf("%d", chart.Height)} fill="white" stroke="#e2e8f0"></rect>
<!-- grid lines -->
<g stroke="#e2e8f0" stroke-width="1" stroke-dasharray="2,4">
for _, y := range chart.GridY {
<line x1="40" y1={fmt.Sprintf("%.1f", y)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%.1f", y)} />
}
for _, x := range chart.GridX {
<line x1={fmt.Sprintf("%.1f", x)} y1="10" x2={fmt.Sprintf("%.1f", x)} y2={fmt.Sprintf("%d", chart.Height+10)} />
}
</g>
<!-- axes -->
<line x1="40" y1={fmt.Sprintf("%d", chart.Height+10)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<line x1="40" y1="10" x2="40" y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<!-- data -->
<polyline points={chart.PointsVm} fill="none" stroke="#2563eb" stroke-width="2.5"></polyline>
<polyline points={chart.PointsVcpu} fill="none" stroke="#16a34a" stroke-width="2.5"></polyline>
<polyline points={chart.PointsRam} fill="none" stroke="#ea580c" stroke-width="2.5"></polyline>
<!-- tick labels -->
<g font-size="10" fill="#475569" text-anchor="end">
for _, tick := range chart.YTicks {
<text x="36" y={fmt.Sprintf("%.1f", tick.Pos+3)}>{tick.Label}</text>
}
</g>
<g font-size="10" fill="#475569" text-anchor="middle">
for _, tick := range chart.XTicks {
<text x={fmt.Sprintf("%.1f", tick.Pos)} y={fmt.Sprintf("%d", chart.Height+24)}>{tick.Label}</text>
}
</g>
<!-- legend -->
<g font-size="12" fill="#475569" transform={"translate(40 " + fmt.Sprintf("%d", chart.Height+54) + ")"}>
<rect x="0" y="0" width="14" height="8" fill="#2563eb"></rect><text x="22" y="12">VMs</text>
<rect x="90" y="0" width="14" height="8" fill="#16a34a"></rect><text x="112" y="12">vCPU</text>
<rect x="180" y="0" width="14" height="8" fill="#ea580c"></rect><text x="202" y="12">RAM (GB)</text>
</g>
<!-- axis labels -->
<text x="15" y="20" transform={"rotate(-90 15 20)"} font-size="12" fill="#475569">Totals</text>
<text x={fmt.Sprintf("%d", chart.Width/2)} y={fmt.Sprintf("%d", chart.Height+70)} font-size="12" fill="#475569">Snapshot sequence (newest right)</text>
</svg>
<div class="web3-chart-frame">
<canvas id="vcenter-totals-chart" class="web3-chart-canvas" role="img" aria-label="Totals over time" data-chart-config={ chart.ConfigJSON }></canvas>
<div id="vcenter-totals-tooltip" class="web3-chart-tooltip" aria-hidden="true"></div>
</div>
<script>
window.Web3Charts.renderFromDataset({
canvasId: "vcenter-totals-chart",
tooltipId: "vcenter-totals-tooltip",
})
</script>
</div>
}
<div class="overflow-hidden border border-slate-200 rounded">
<div class="web2-table-shell">
<table class="web2-table">
<thead>
<tr>
@@ -268,10 +229,10 @@ templ VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart Vcen
<tbody>
for _, entry := range entries {
<tr>
<td>{entry.Snapshot}</td>
<td class="text-right">{entry.VmCount}</td>
<td class="text-right">{entry.VcpuTotal}</td>
<td class="text-right">{entry.RamTotalGB}</td>
<td>{ entry.Snapshot }</td>
<td class="text-right">{ entry.VmCount }</td>
<td class="text-right">{ entry.VcpuTotal }</td>
<td class="text-right">{ entry.RamTotalGB }</td>
</tr>
}
</tbody>
+151 -637
View File
@@ -1,6 +1,6 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
// templ: version: v0.3.1001
package views
//lint:file-ignore SA4006 This context is only used if a nested component is present.
@@ -34,31 +34,16 @@ type VcenterTotalsEntry struct {
}
type VcenterTotalsMeta struct {
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
MonthlyLink string
HourlyClass string
DailyClass string
MonthlyClass string
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
HourlyClass string
DailyClass string
}
type VcenterChartData struct {
PointsVm string
PointsVcpu string
PointsRam string
Width int
Height int
GridX []float64
GridY []float64
YTicks []ChartTick
XTicks []ChartTick
}
type ChartTick struct {
Pos float64
Label string
ConfigJSON string
}
func SnapshotHourlyList(entries []SnapshotEntry) templ.Component {
@@ -177,114 +162,102 @@ func SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) te
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">Snapshot Library</div><h1 class=\"mt-3 text-4xl font-bold\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell web2-card-grid\"><section class=\"web2-header web2-page-head\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(title)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 78, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
templ_7745c5c3_Err = core.PageHeader(
"Snapshot Library",
title,
subtitle,
[]core.ActionLink{
{
Label: "Back to Dashboard",
Href: "/",
Class: "web2-button secondary",
},
},
).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h1><p class=\"mt-2 text-sm text-slate-600\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</section><section class=\"web2-card\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(subtitle)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 79, Col: 55}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
templ_7745c5c3_Err = core.SectionHead("Available Exports", fmt.Sprintf("%d files", len(entries))).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</p></div><a class=\"web2-button\" href=\"/\">Back to Dashboard</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">Available Exports</h2><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 88, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " files</span></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>Records</th><th class=\"text-right\">Download</th></tr></thead> <tbody>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "<div class=\"web2-table-shell\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>Records</th><th class=\"text-right\">Download</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for i, entry := range entries {
if entry.Group != "" && (i == 0 || entries[i-1].Group != entry.Group) {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<tr class=\"web2-group-row\"><td colspan=\"3\" class=\"font-semibold text-slate-700\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "<tr class=\"web2-group-row\"><td colspan=\"3\" class=\"font-semibold\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Group)
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Group)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 103, Col: 76}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 88, Col: 62}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</td></tr>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " <tr><td><div class=\"flex flex-col\"><span class=\"text-sm font-semibold text-slate-700\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " <tr><td><div class=\"flex flex-col\"><span class=\"text-sm font-semibold\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Label)
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 109, Col: 75}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 94, Col: 61}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</span></div></td><td><span class=\"web2-badge\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "</span></div></td><td><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Count)
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Count)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 113, Col: 48}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 98, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " records</span></td><td class=\"text-right\"><a class=\"web2-link\" href=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, " records</span></td><td class=\"text-right\"><a class=\"web2-link\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 templ.SafeURL
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinURLErrs(entry.Link)
var templ_7745c5c3_Var8 templ.SafeURL
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinURLErrs(entry.Link)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 116, Col: 48}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 101, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\">Download XLSX</a></td></tr>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\">Download XLSX</a></td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</tbody></table></div></section></main></body>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -292,7 +265,7 @@ func SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) te
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</html>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -316,12 +289,12 @@ func VcenterList(links []VcenterLink) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var12 := templ.GetChildren(ctx)
if templ_7745c5c3_Var12 == nil {
templ_7745c5c3_Var12 = templ.NopComponent
templ_7745c5c3_Var9 := templ.GetChildren(ctx)
if templ_7745c5c3_Var9 == nil {
templ_7745c5c3_Var9 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<!doctype html><html lang=\"en\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "<!doctype html><html lang=\"en\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -329,34 +302,48 @@ func VcenterList(links []VcenterLink) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCenter Inventory</div><h1 class=\"mt-3 text-4xl font-bold\">Monitored vCenters</h1><p class=\"mt-2 text-sm text-slate-600\">Select a vCenter to view snapshot totals over time.</p></div><a class=\"web2-button\" href=\"/\">Back to Dashboard</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">vCenters</h2><span class=\"web2-badge\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell web2-card-grid\"><section class=\"web2-header web2-page-head\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(len(links))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 150, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
templ_7745c5c3_Err = core.PageHeader(
"vCenter Inventory",
"Monitored vCenters",
"Select a vCenter to view snapshot totals over time.",
[]core.ActionLink{
{
Label: "Back to Dashboard",
Href: "/",
Class: "web2-button secondary",
},
},
).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " total</span></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>vCenter</th><th class=\"text-right\">Totals</th></tr></thead> <tbody>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "</section><section class=\"web2-card\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.SectionHead("vCenters", fmt.Sprintf("%d total", len(links))).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<div class=\"web2-table-shell\"><table class=\"web2-table\"><thead><tr><th>vCenter</th><th class=\"text-right\">Totals</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, link := range links {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<tr><td class=\"font-semibold text-slate-700\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<tr><td class=\"font-semibold\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(link.Name)
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(link.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 163, Col: 61}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 148, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -364,12 +351,12 @@ func VcenterList(links []VcenterLink) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 templ.SafeURL
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinURLErrs(link.Link)
var templ_7745c5c3_Var11 templ.SafeURL
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinURLErrs(link.Link)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 165, Col: 47}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 150, Col: 48}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -410,9 +397,9 @@ func VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart Vcent
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var16 := templ.GetChildren(ctx)
if templ_7745c5c3_Var16 == nil {
templ_7745c5c3_Var16 = templ.NopComponent
templ_7745c5c3_Var12 := templ.GetChildren(ctx)
if templ_7745c5c3_Var12 == nil {
templ_7745c5c3_Var12 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "<!doctype html><html lang=\"en\">")
@@ -423,610 +410,137 @@ func VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart Vcent
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto\" style=\"max-width: 1400px;\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCenter Totals</div><h1 class=\"mt-3 text-4xl font-bold\">Totals for ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell web2-shell-wide web2-card-grid\"><section class=\"web2-header web2-page-head\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(vcenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 189, Col: 63}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
templ_7745c5c3_Err = core.PageHeader(
"vCenter Totals",
"Totals for "+vcenter,
meta.TypeLabel+" snapshots of VM count, vCPU, and RAM over time.",
[]core.ActionLink{
{
Label: "All vCenters",
Href: "/vcenters",
Class: "web2-button secondary",
},
{
Label: "Dashboard",
Href: "/",
Class: "web2-button",
},
},
).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</h1><p class=\"mt-2 text-sm text-slate-600\">")
templ_7745c5c3_Err = core.SegmentedActions(
[]core.SegmentedLink{
{
Label: "Hourly Detail (45d)",
Href: meta.HourlyLink,
Class: meta.HourlyClass,
},
{
Label: "Daily Aggregated",
Href: meta.DailyLink,
Class: meta.DailyClass,
},
},
).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(meta.TypeLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 190, Col: 62}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</section><section class=\"web2-card\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, " snapshots of VM count, vCPU, and RAM over time.</p></div><div class=\"flex gap-3\"><a class=\"web2-button secondary\" href=\"/vcenters\">All vCenters</a> <a class=\"web2-button\" href=\"/\">Dashboard</a></div></div><div class=\"web3-button-group mt-8 mb-3\">")
templ_7745c5c3_Err = core.SectionHead(meta.TypeLabel+" Snapshots", fmt.Sprintf("%d records", len(entries))).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 = []any{meta.HourlyClass}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var19...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<a class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var19).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 templ.SafeURL
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinURLErrs(meta.HourlyLink)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 198, Col: 56}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\">Hourly</a> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 = []any{meta.DailyClass}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var22...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<a class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var22).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 templ.SafeURL
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinURLErrs(meta.DailyLink)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 199, Col: 54}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "\">Daily</a> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 = []any{meta.MonthlyClass}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var25...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<a class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var25).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 templ.SafeURL
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinURLErrs(meta.MonthlyLink)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 200, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "\">Monthly</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(meta.TypeLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 206, Col: 56}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, " Snapshots</h2><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 207, Col: 45}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, " records</span></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if chart.PointsVm != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<div class=\"mb-6 overflow-auto\"><svg width=\"100%\" height=\"")
if chart.ConfigJSON != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "<div class=\"mb-6 overflow-auto\"><div class=\"web3-chart-frame\"><canvas id=\"vcenter-totals-chart\" class=\"web3-chart-canvas\" role=\"img\" aria-label=\"Totals over time\" data-chart-config=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var30 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+80))
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(chart.ConfigJSON)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 211, Col: 67}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 208, Col: 145}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "\" viewBox=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var31 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs("0 0 " + fmt.Sprintf("%d", chart.Width) + " " + fmt.Sprintf("%d", chart.Height+70))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 211, Col: 160}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "\" role=\"img\" aria-label=\"Totals over time\"><defs><linearGradient id=\"grid\" x1=\"0\" y1=\"0\" x2=\"0\" y2=\"1\"><stop offset=\"0%\" stop-color=\"#e2e8f0\" stop-opacity=\"0.6\"></stop></linearGradient></defs> <rect x=\"40\" y=\"10\" width=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var32 string
templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 217, Col: 68}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "\" height=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var33 string
templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 217, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\" fill=\"white\" stroke=\"#e2e8f0\"></rect><!-- grid lines --><g stroke=\"#e2e8f0\" stroke-width=\"1\" stroke-dasharray=\"2,4\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, y := range chart.GridY {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<line x1=\"40\" y1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var34 string
templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 221, Col: 51}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var35 string
templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 221, Col: 90}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var36 string
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 221, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "\"></line> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
for _, x := range chart.GridX {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<line x1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var37 string
templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 224, Col: 43}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "\" y1=\"10\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var38 string
templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 224, Col: 79}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var39 string
templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 224, Col: 119}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "\"></line>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "</g><!-- axes --><line x1=\"40\" y1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var40 string
templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 228, Col: 61}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var41 string
templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 228, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var42 string
templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 228, Col: 140}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <line x1=\"40\" y1=\"10\" x2=\"40\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var43 string
templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 229, Col: 77}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line><!-- data --><polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var44 string
templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVm)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 231, Col: 40}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "\" fill=\"none\" stroke=\"#2563eb\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var45 string
templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVcpu)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 232, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "\" fill=\"none\" stroke=\"#16a34a\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var46 string
templ_7745c5c3_Var46, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsRam)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 233, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var46))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "\" fill=\"none\" stroke=\"#ea580c\" stroke-width=\"2.5\"></polyline><!-- tick labels --><g font-size=\"10\" fill=\"#475569\" text-anchor=\"end\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.YTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<text x=\"36\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var47 string
templ_7745c5c3_Var47, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos+3))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 237, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var47))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var48 string
templ_7745c5c3_Var48, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 237, Col: 71}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var48))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "</g> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"middle\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.XTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var49 string
templ_7745c5c3_Var49, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 242, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var49))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var50 string
templ_7745c5c3_Var50, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+24))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 242, Col: 88}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var50))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var51 string
templ_7745c5c3_Var51, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 242, Col: 101}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var51))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "</g><!-- legend --><g font-size=\"12\" fill=\"#475569\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var52 string
templ_7745c5c3_Var52, templ_7745c5c3_Err = templ.JoinStringErrs("translate(40 " + fmt.Sprintf("%d", chart.Height+54) + ")")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 246, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var52))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\"><rect x=\"0\" y=\"0\" width=\"14\" height=\"8\" fill=\"#2563eb\"></rect><text x=\"22\" y=\"12\">VMs</text> <rect x=\"90\" y=\"0\" width=\"14\" height=\"8\" fill=\"#16a34a\"></rect><text x=\"112\" y=\"12\">vCPU</text> <rect x=\"180\" y=\"0\" width=\"14\" height=\"8\" fill=\"#ea580c\"></rect><text x=\"202\" y=\"12\">RAM (GB)</text></g><!-- axis labels --><text x=\"15\" y=\"20\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var53 string
templ_7745c5c3_Var53, templ_7745c5c3_Err = templ.JoinStringErrs("rotate(-90 15 20)")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 252, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var53))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "\" font-size=\"12\" fill=\"#475569\">Totals</text> <text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var54 string
templ_7745c5c3_Var54, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width/2))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 253, Col: 50}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var54))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var55 string
templ_7745c5c3_Var55, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+70))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 253, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var55))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "\" font-size=\"12\" fill=\"#475569\">Snapshot sequence (newest right)</text></svg></div>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "\"></canvas><div id=\"vcenter-totals-tooltip\" class=\"web3-chart-tooltip\" aria-hidden=\"true\"></div></div><script>\n\t\t\t\t\t\t\t\twindow.Web3Charts.renderFromDataset({\n\t\t\t\t\t\t\t\t\tcanvasId: \"vcenter-totals-chart\",\n\t\t\t\t\t\t\t\t\ttooltipId: \"vcenter-totals-tooltip\",\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t</script></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot Time</th><th class=\"text-right\">VMs</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th></tr></thead> <tbody>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "<div class=\"web2-table-shell\"><table class=\"web2-table\"><thead><tr><th>Snapshot Time</th><th class=\"text-right\">VMs</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, entry := range entries {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<tr><td>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var56 string
templ_7745c5c3_Var56, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Snapshot)
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Snapshot)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 271, Col: 29}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 232, Col: 30}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var56))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "</td><td class=\"text-right\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var57 string
templ_7745c5c3_Var57, templ_7745c5c3_Err = templ.JoinStringErrs(entry.VmCount)
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(entry.VmCount)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 272, Col: 47}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 233, Col: 48}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var57))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "</td><td class=\"text-right\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var58 string
templ_7745c5c3_Var58, templ_7745c5c3_Err = templ.JoinStringErrs(entry.VcpuTotal)
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(entry.VcpuTotal)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 273, Col: 49}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 234, Col: 50}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var58))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "</td><td class=\"text-right\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var59 string
templ_7745c5c3_Var59, templ_7745c5c3_Err = templ.JoinStringErrs(entry.RamTotalGB)
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(entry.RamTotalGB)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 274, Col: 50}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 235, Col: 51}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var59))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "</td></tr>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "</tbody></table></div></section></main></body>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -1034,7 +548,7 @@ func VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart Vcent
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "</html>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
+127 -115
View File
@@ -6,131 +6,143 @@ import (
)
type VmTraceEntry struct {
Snapshot string
RawTime int64
Name string
VmId string
VmUuid string
Vcenter string
ResourcePool string
VcpuCount int64
RamGB int64
Snapshot string
RawTime int64
Name string
VmId string
VmUuid string
Vcenter string
ResourcePool string
VcpuCount int64
RamGB int64
ProvisionedDisk float64
CreationTime string
DeletionTime string
CreationTime string
DeletionTime string
}
type VmTraceChart struct {
PointsVcpu string
PointsRam string
PointsTin string
PointsBronze string
PointsSilver string
PointsGold string
Width int
Height int
GridX []float64
GridY []float64
XTicks []ChartTick
YTicks []ChartTick
ConfigJSON string
}
templ VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, entries []VmTraceEntry, chart VmTraceChart) {
type VmTraceMeta struct {
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
HourlyClass string
DailyClass string
}
type VmTraceDiagnosticLine struct {
Label string
Value string
}
type VmTraceDiagnostics struct {
Visible bool
Lines []VmTraceDiagnosticLine
}
templ VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, creationApprox bool, entries []VmTraceEntry, chart VmTraceChart, meta VmTraceMeta, diagnostics VmTraceDiagnostics) {
<!DOCTYPE html>
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto" style="max-width: 1400px;">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">VM Trace</div>
<h1 class="mt-3 text-4xl font-bold">Snapshot history{display_query}</h1>
<p class="mt-2 text-sm text-slate-600">Timeline of vCPU, RAM, and resource pool changes across snapshots.</p>
</div>
<div class="flex gap-3 flex-wrap">
<a class="web2-button" href="/">Dashboard</a>
</div>
<main class="flex-grow web2-shell web2-shell-wide web2-card-grid">
<section class="web2-header web2-page-head">
@core.PageHeader(
"VM Trace",
"Snapshot history"+display_query,
"Timeline of vCPU, RAM, and resource pool changes across "+meta.TypeLabel+" snapshots.",
[]core.ActionLink{
{
Label: "Dashboard",
Href: "/",
Class: "web2-button secondary",
},
},
)
<form method="get" action="/vm/trace" class="web2-form-grid">
<input type="hidden" name="view" value={ meta.ViewType }/>
<div class="web2-field">
<label class="web2-label" for="vm_id">VM ID</label>
<input class="web2-input" type="text" id="vm_id" name="vm_id" value={ vm_id } placeholder="vm-12345"/>
</div>
<form method="get" action="/vm/trace" class="mt-4 grid gap-3 md:grid-cols-3">
<div class="flex flex-col gap-1">
<label class="text-sm text-slate-600" for="vm_id">VM ID</label>
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="vm_id" name="vm_id" value={vm_id} placeholder="vm-12345"/>
</div>
<div class="flex flex-col gap-1">
<label class="text-sm text-slate-600" for="vm_uuid">VM UUID</label>
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="vm_uuid" name="vm_uuid" value={vm_uuid} placeholder="uuid..."/>
</div>
<div class="flex flex-col gap-1">
<label class="text-sm text-slate-600" for="name">Name</label>
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="name" name="name" value={vm_name} placeholder="VM name"/>
</div>
<div class="md:col-span-3 flex gap-2">
<button class="web3-button active" type="submit">Load VM Trace</button>
<a class="web3-button" href="/vm/trace">Clear</a>
</div>
</form>
</section>
<div class="web2-field">
<label class="web2-label" for="vm_uuid">VM UUID</label>
<input class="web2-input" type="text" id="vm_uuid" name="vm_uuid" value={ vm_uuid } placeholder="uuid..."/>
</div>
<div class="web2-field">
<label class="web2-label" for="name">Name</label>
<input class="web2-input" type="text" id="name" name="name" value={ vm_name } placeholder="VM name"/>
</div>
<div class="web2-form-actions web2-form-actions-full">
<button class="web3-button active" type="submit">Load VM Trace</button>
<a class="web3-button" href="/vm/trace">Clear</a>
</div>
</form>
@core.SegmentedActions(
[]core.SegmentedLink{
{
Label: "Hourly Detail",
Href: meta.HourlyLink,
Class: meta.HourlyClass,
},
{
Label: "Daily Aggregated",
Href: meta.DailyLink,
Class: meta.DailyClass,
},
},
)
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">Snapshot Timeline</h2>
<span class="web2-badge">{len(entries)} samples</span>
</div>
if chart.PointsVcpu != "" {
@core.SectionHead(meta.TypeLabel+" Timeline", fmt.Sprintf("%d samples", len(entries)))
if chart.ConfigJSON != "" {
<div class="mb-6 overflow-auto">
<svg width="100%" height="360" viewBox={"0 0 " + fmt.Sprintf("%d", chart.Width) + " 320"} role="img" aria-label="VM timeline">
<rect x="40" y="10" width={fmt.Sprintf("%d", chart.Width-60)} height={fmt.Sprintf("%d", chart.Height)} fill="white" stroke="#e2e8f0"></rect>
<g stroke="#e2e8f0" stroke-width="1" stroke-dasharray="2,4">
for _, y := range chart.GridY {
<line x1="40" y1={fmt.Sprintf("%.1f", y)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%.1f", y)} />
}
for _, x := range chart.GridX {
<line x1={fmt.Sprintf("%.1f", x)} y1="10" x2={fmt.Sprintf("%.1f", x)} y2={fmt.Sprintf("%d", chart.Height+10)} />
}
</g>
<line x1="40" y1={fmt.Sprintf("%d", chart.Height+10)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<line x1="40" y1="10" x2="40" y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<polyline points={chart.PointsVcpu} fill="none" stroke="#2563eb" stroke-width="2.5"></polyline>
<polyline points={chart.PointsRam} fill="none" stroke="#16a34a" stroke-width="2.5"></polyline>
<polyline points={chart.PointsTin} fill="none" stroke="#0ea5e9" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<polyline points={chart.PointsBronze} fill="none" stroke="#a855f7" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<polyline points={chart.PointsSilver} fill="none" stroke="#94a3b8" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<polyline points={chart.PointsGold} fill="none" stroke="#f59e0b" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<g font-size="10" fill="#475569" text-anchor="end">
for _, tick := range chart.YTicks {
<text x="36" y={fmt.Sprintf("%.1f", tick.Pos+3)}>{tick.Label}</text>
}
</g>
<g font-size="10" fill="#475569" text-anchor="middle">
for _, tick := range chart.XTicks {
<text x={fmt.Sprintf("%.1f", tick.Pos)} y={fmt.Sprintf("%d", chart.Height+24)}>{tick.Label}</text>
}
</g>
<g font-size="12" fill="#475569" transform={"translate(40 " + fmt.Sprintf("%d", chart.Height+50) + ")"}>
<rect x="0" y="0" width="14" height="8" fill="#2563eb"></rect><text x="22" y="12">vCPU</text>
<rect x="90" y="0" width="14" height="8" fill="#16a34a"></rect><text x="112" y="12">RAM (GB)</text>
<rect x="200" y="0" width="14" height="8" fill="#0ea5e9"></rect><text x="222" y="12">Tin</text>
<rect x="260" y="0" width="14" height="8" fill="#a855f7"></rect><text x="282" y="12">Bronze</text>
<rect x="340" y="0" width="14" height="8" fill="#94a3b8"></rect><text x="362" y="12">Silver</text>
<rect x="420" y="0" width="14" height="8" fill="#f59e0b"></rect><text x="442" y="12">Gold</text>
</g>
<text x="15" y="20" transform={"rotate(-90 15 20)"} font-size="12" fill="#475569">Resources / Pool</text>
<text x={fmt.Sprintf("%d", chart.Width/2)} y={fmt.Sprintf("%d", chart.Height+70)} font-size="12" fill="#475569">Snapshots (oldest left, newest right)</text>
</svg>
<div class="web3-chart-frame">
<canvas id="vm-trace-chart" class="web3-chart-canvas" role="img" aria-label="VM timeline" data-chart-config={ chart.ConfigJSON }></canvas>
<div id="vm-trace-tooltip" class="web3-chart-tooltip" aria-hidden="true"></div>
</div>
<script>
window.Web3Charts.renderFromDataset({
canvasId: "vm-trace-chart",
tooltipId: "vm-trace-tooltip",
})
</script>
</div>
}
<div class="grid gap-3 md:grid-cols-2 mb-4">
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.15em] text-slate-500">Creation time</p>
<p class="mt-2 text-base font-semibold text-slate-800">{creationLabel}</p>
<div class="web2-subcard">
<p class="web2-subcard-label">Creation Time</p>
<p class="web2-subcard-value">{ creationLabel }</p>
if creationApprox {
<p class="web2-muted web2-caption mt-1">Approximate (earliest snapshot)</p>
}
</div>
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.15em] text-slate-500">Deletion time</p>
<p class="mt-2 text-base font-semibold text-slate-800">{deletionLabel}</p>
<div class="web2-subcard">
<p class="web2-subcard-label">Deletion Time</p>
<p class="web2-subcard-value">{ deletionLabel }</p>
</div>
</div>
<div class="overflow-hidden border border-slate-200 rounded">
if diagnostics.Visible && len(diagnostics.Lines) > 0 {
<details class="web2-subcard mb-4">
<summary class="web2-details-summary">Lifecycle diagnostics</summary>
<div class="mt-3 web2-table-shell">
<table class="web2-table">
<tbody>
for _, line := range diagnostics.Lines {
<tr>
<td class="font-semibold w-72">{ line.Label }</td>
<td class="web2-muted">{ line.Value }</td>
</tr>
}
</tbody>
</table>
</div>
</details>
}
<div class="web2-table-shell">
<table class="web2-table">
<thead>
<tr>
@@ -148,15 +160,15 @@ templ VmTracePage(query string, display_query string, vm_id string, vm_uuid stri
<tbody>
for _, e := range entries {
<tr>
<td>{e.Snapshot}</td>
<td>{e.Name}</td>
<td>{e.VmId}</td>
<td>{e.VmUuid}</td>
<td>{e.Vcenter}</td>
<td>{e.ResourcePool}</td>
<td class="text-right">{e.VcpuCount}</td>
<td class="text-right">{e.RamGB}</td>
<td class="text-right">{fmt.Sprintf("%.1f", e.ProvisionedDisk)}</td>
<td>{ e.Snapshot }</td>
<td>{ e.Name }</td>
<td>{ e.VmId }</td>
<td>{ e.VmUuid }</td>
<td>{ e.Vcenter }</td>
<td>{ e.ResourcePool }</td>
<td class="text-right">{ e.VcpuCount }</td>
<td class="text-right">{ e.RamGB }</td>
<td class="text-right">{ fmt.Sprintf("%.1f", e.ProvisionedDisk) }</td>
</tr>
}
</tbody>
+224 -516
View File
@@ -1,6 +1,6 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
// templ: version: v0.3.1001
package views
//lint:file-ignore SA4006 This context is only used if a nested component is present.
@@ -29,21 +29,29 @@ type VmTraceEntry struct {
}
type VmTraceChart struct {
PointsVcpu string
PointsRam string
PointsTin string
PointsBronze string
PointsSilver string
PointsGold string
Width int
Height int
GridX []float64
GridY []float64
XTicks []ChartTick
YTicks []ChartTick
ConfigJSON string
}
func VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, entries []VmTraceEntry, chart VmTraceChart) templ.Component {
type VmTraceMeta struct {
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
HourlyClass string
DailyClass string
}
type VmTraceDiagnosticLine struct {
Label string
Value string
}
type VmTraceDiagnostics struct {
Visible bool
Lines []VmTraceDiagnosticLine
}
func VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, creationApprox bool, entries []VmTraceEntry, chart VmTraceChart, meta VmTraceMeta, diagnostics VmTraceDiagnostics) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
@@ -72,635 +80,335 @@ func VmTracePage(query string, display_query string, vm_id string, vm_uuid strin
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto\" style=\"max-width: 1400px;\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">VM Trace</div><h1 class=\"mt-3 text-4xl font-bold\">Snapshot history")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell web2-shell-wide web2-card-grid\"><section class=\"web2-header web2-page-head\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.PageHeader(
"VM Trace",
"Snapshot history"+display_query,
"Timeline of vCPU, RAM, and resource pool changes across "+meta.TypeLabel+" snapshots.",
[]core.ActionLink{
{
Label: "Dashboard",
Href: "/",
Class: "web2-button secondary",
},
},
).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "<form method=\"get\" action=\"/vm/trace\" class=\"web2-form-grid\"><input type=\"hidden\" name=\"view\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(display_query)
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(meta.ViewType)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 48, Col: 74}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 66, Col: 60}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h1><p class=\"mt-2 text-sm text-slate-600\">Timeline of vCPU, RAM, and resource pool changes across snapshots.</p></div><div class=\"flex gap-3 flex-wrap\"><a class=\"web2-button\" href=\"/\">Dashboard</a></div></div><form method=\"get\" action=\"/vm/trace\" class=\"mt-4 grid gap-3 md:grid-cols-3\"><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"vm_id\">VM ID</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"vm_id\" name=\"vm_id\" value=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\"><div class=\"web2-field\"><label class=\"web2-label\" for=\"vm_id\">VM ID</label> <input class=\"web2-input\" type=\"text\" id=\"vm_id\" name=\"vm_id\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(vm_id)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 58, Col: 123}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 69, Col: 82}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" placeholder=\"vm-12345\"></div><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"vm_uuid\">VM UUID</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"vm_uuid\" name=\"vm_uuid\" value=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" placeholder=\"vm-12345\"></div><div class=\"web2-field\"><label class=\"web2-label\" for=\"vm_uuid\">VM UUID</label> <input class=\"web2-input\" type=\"text\" id=\"vm_uuid\" name=\"vm_uuid\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(vm_uuid)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 62, Col: 129}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 73, Col: 88}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" placeholder=\"uuid...\"></div><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"name\">Name</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"name\" name=\"name\" value=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" placeholder=\"uuid...\"></div><div class=\"web2-field\"><label class=\"web2-label\" for=\"name\">Name</label> <input class=\"web2-input\" type=\"text\" id=\"name\" name=\"name\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(vm_name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 66, Col: 123}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 77, Col: 82}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" placeholder=\"VM name\"></div><div class=\"md:col-span-3 flex gap-2\"><button class=\"web3-button active\" type=\"submit\">Load VM Trace</button> <a class=\"web3-button\" href=\"/vm/trace\">Clear</a></div></form></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">Snapshot Timeline</h2><span class=\"web2-badge\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "\" placeholder=\"VM name\"></div><div class=\"web2-form-actions web2-form-actions-full\"><button class=\"web3-button active\" type=\"submit\">Load VM Trace</button> <a class=\"web3-button\" href=\"/vm/trace\">Clear</a></div></form>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 78, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
templ_7745c5c3_Err = core.SegmentedActions(
[]core.SegmentedLink{
{
Label: "Hourly Detail",
Href: meta.HourlyLink,
Class: meta.HourlyClass,
},
{
Label: "Daily Aggregated",
Href: meta.DailyLink,
Class: meta.DailyClass,
},
},
).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " samples</span></div>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "</section><section class=\"web2-card\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if chart.PointsVcpu != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<div class=\"mb-6 overflow-auto\"><svg width=\"100%\" height=\"360\" viewBox=\"")
templ_7745c5c3_Err = core.SectionHead(meta.TypeLabel+" Timeline", fmt.Sprintf("%d samples", len(entries))).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if chart.ConfigJSON != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "<div class=\"mb-6 overflow-auto\"><div class=\"web3-chart-frame\"><canvas id=\"vm-trace-chart\" class=\"web3-chart-canvas\" role=\"img\" aria-label=\"VM timeline\" data-chart-config=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs("0 0 " + fmt.Sprintf("%d", chart.Width) + " 320")
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(chart.ConfigJSON)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 82, Col: 95}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 104, Col: 134}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" role=\"img\" aria-label=\"VM timeline\"><rect x=\"40\" y=\"10\" width=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\"></canvas><div id=\"vm-trace-tooltip\" class=\"web3-chart-tooltip\" aria-hidden=\"true\"></div></div><script>\n\t\t\t\t\t\t\t\twindow.Web3Charts.renderFromDataset({\n\t\t\t\t\t\t\t\t\tcanvasId: \"vm-trace-chart\",\n\t\t\t\t\t\t\t\t\ttooltipId: \"vm-trace-tooltip\",\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t</script></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 83, Col: 68}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "<div class=\"grid gap-3 md:grid-cols-2 mb-4\"><div class=\"web2-subcard\"><p class=\"web2-subcard-label\">Creation Time</p><p class=\"web2-subcard-value\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(creationLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 118, Col: 52}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if creationApprox {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "<p class=\"web2-muted web2-caption mt-1\">Approximate (earliest snapshot)</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" height=\"")
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "</div><div class=\"web2-subcard\"><p class=\"web2-subcard-label\">Deletion Time</p><p class=\"web2-subcard-value\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(deletionLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 125, Col: 52}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "</p></div></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if diagnostics.Visible && len(diagnostics.Lines) > 0 {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<details class=\"web2-subcard mb-4\"><summary class=\"web2-details-summary\">Lifecycle diagnostics</summary><div class=\"mt-3 web2-table-shell\"><table class=\"web2-table\"><tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 83, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\" fill=\"white\" stroke=\"#e2e8f0\"></rect> <g stroke=\"#e2e8f0\" stroke-width=\"1\" stroke-dasharray=\"2,4\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, y := range chart.GridY {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "<line x1=\"40\" y1=\"")
for _, line := range diagnostics.Lines {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<tr><td class=\"font-semibold w-72\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(line.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 136, Col: 55}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</td><td class=\"web2-muted\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(line.Value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 50}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 137, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 117}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "\"></line> ")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
for _, x := range chart.GridX {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<line x1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "\" y1=\"10\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 78}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\"></line>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</tbody></table></div></details>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</g> <line x1=\"40\" y1=\"")
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "<div class=\"web2-table-shell\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>VM Name</th><th>VmId</th><th>VmUuid</th><th>Vcenter</th><th>Resource Pool</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th><th class=\"text-right\">Disk</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, e := range entries {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(e.Snapshot)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 163, Col: 26}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(e.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 164, Col: 22}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmId)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 165, Col: 22}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmUuid)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 166, Col: 24}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(e.Vcenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 167, Col: 25}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(e.ResourcePool)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 60}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 168, Col: 30}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "\" x2=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(e.VcpuCount)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 99}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 169, Col: 46}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\" y2=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(e.RamGB)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 139}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 170, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <line x1=\"40\" y1=\"10\" x2=\"40\" y2=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", e.ProvisionedDisk))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 93, Col: 76}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 171, Col: 73}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVcpu)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 94, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "\" fill=\"none\" stroke=\"#2563eb\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsRam)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 95, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "\" fill=\"none\" stroke=\"#16a34a\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsTin)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 96, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" fill=\"none\" stroke=\"#0ea5e9\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsBronze)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 97, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\" fill=\"none\" stroke=\"#a855f7\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsSilver)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 98, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "\" fill=\"none\" stroke=\"#94a3b8\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsGold)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 99, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" fill=\"none\" stroke=\"#f59e0b\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"end\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.YTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<text x=\"36\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos+3))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 102, Col: 57}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 102, Col: 70}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</g> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"middle\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.XTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 48}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+24))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 87}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var30 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</g> <g font-size=\"12\" fill=\"#475569\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var31 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs("translate(40 " + fmt.Sprintf("%d", chart.Height+50) + ")")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 110, Col: 110}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "\"><rect x=\"0\" y=\"0\" width=\"14\" height=\"8\" fill=\"#2563eb\"></rect><text x=\"22\" y=\"12\">vCPU</text> <rect x=\"90\" y=\"0\" width=\"14\" height=\"8\" fill=\"#16a34a\"></rect><text x=\"112\" y=\"12\">RAM (GB)</text> <rect x=\"200\" y=\"0\" width=\"14\" height=\"8\" fill=\"#0ea5e9\"></rect><text x=\"222\" y=\"12\">Tin</text> <rect x=\"260\" y=\"0\" width=\"14\" height=\"8\" fill=\"#a855f7\"></rect><text x=\"282\" y=\"12\">Bronze</text> <rect x=\"340\" y=\"0\" width=\"14\" height=\"8\" fill=\"#94a3b8\"></rect><text x=\"362\" y=\"12\">Silver</text> <rect x=\"420\" y=\"0\" width=\"14\" height=\"8\" fill=\"#f59e0b\"></rect><text x=\"442\" y=\"12\">Gold</text></g> <text x=\"15\" y=\"20\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var32 string
templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs("rotate(-90 15 20)")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 118, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\" font-size=\"12\" fill=\"#475569\">Resources / Pool</text> <text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var33 string
templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width/2))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 119, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var34 string
templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+70))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 119, Col: 88}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" font-size=\"12\" fill=\"#475569\">Snapshots (oldest left, newest right)</text></svg></div>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<div class=\"grid gap-3 md:grid-cols-2 mb-4\"><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.15em] text-slate-500\">Creation time</p><p class=\"mt-2 text-base font-semibold text-slate-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var35 string
templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(creationLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 126, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</p></div><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.15em] text-slate-500\">Deletion time</p><p class=\"mt-2 text-base font-semibold text-slate-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var36 string
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(deletionLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 130, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "</p></div></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>VM Name</th><th>VmId</th><th>VmUuid</th><th>Vcenter</th><th>Resource Pool</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th><th class=\"text-right\">Disk</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, e := range entries {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var37 string
templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(e.Snapshot)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 151, Col: 25}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var38 string
templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(e.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 152, Col: 21}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var39 string
templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmId)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 153, Col: 21}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var40 string
templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmUuid)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 154, Col: 23}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var41 string
templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(e.Vcenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 155, Col: 24}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var42 string
templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(e.ResourcePool)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 156, Col: 29}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var43 string
templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(e.VcpuCount)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 157, Col: 45}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var44 string
templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(e.RamGB)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 158, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var45 string
templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", e.ProvisionedDisk))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 159, Col: 72}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</tbody></table></div></section></main></body>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -708,7 +416,7 @@ func VmTracePage(query string, display_query string, vm_id string, vm_uuid strin
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</html>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
+64 -11
View File
@@ -23,8 +23,9 @@ type Database interface {
}
type Config struct {
Driver string
DSN string
Driver string
DSN string
EnableExperimentalPostgres bool
}
func New(logger *slog.Logger, cfg Config) (Database, error) {
@@ -40,6 +41,10 @@ func New(logger *slog.Logger, cfg Config) (Database, error) {
}
return db, nil
case "postgres":
// The sqlc query set is SQLite-first. Keep Postgres opt-in until full parity is validated.
if !cfg.EnableExperimentalPostgres {
return nil, fmt.Errorf("postgres driver is disabled by default; set settings.enable_experimental_postgres=true to enable experimental mode")
}
db, err := newPostgresDB(logger, cfg.DSN)
if err != nil {
return nil, err
@@ -114,8 +119,56 @@ func normalizeDriver(driver string) string {
}
}
// ResolveDriver determines the effective database driver.
// If driver is unset and DSN looks like PostgreSQL, it infers postgres.
func ResolveDriver(configuredDriver, dsn string) (driver string, inferredFromDSN bool, err error) {
normalized := strings.ToLower(strings.TrimSpace(configuredDriver))
switch normalized {
case "sqlite3":
normalized = "sqlite"
case "postgresql":
normalized = "postgres"
}
if normalized == "" {
if looksLikePostgresDSN(dsn) {
return "postgres", true, nil
}
return "sqlite", false, nil
}
if normalized == "sqlite" && looksLikePostgresDSN(dsn) {
return "", false, fmt.Errorf("database_driver is sqlite but database_url looks like a postgres DSN; set settings.database_driver=postgres")
}
return normalized, false, nil
}
func looksLikePostgresDSN(dsn string) bool {
trimmed := strings.ToLower(strings.TrimSpace(dsn))
if trimmed == "" {
return false
}
if strings.HasPrefix(trimmed, "postgres://") || strings.HasPrefix(trimmed, "postgresql://") {
return true
}
// Also support key=value style PostgreSQL DSNs.
if strings.Contains(trimmed, "=") {
hasHost := strings.Contains(trimmed, "host=")
hasUser := strings.Contains(trimmed, "user=")
hasDB := strings.Contains(trimmed, "dbname=")
hasSSL := strings.Contains(trimmed, "sslmode=")
if (hasHost && hasUser) || (hasHost && hasDB) || (hasUser && hasDB) || (hasHost && hasSSL) {
return true
}
}
return false
}
// ConvertToSQLParams is a utility function that generically converts a struct to a corresponding sqlc-generated struct
func ConvertToSQLParams(input interface{}, output interface{}) {
func ConvertToSQLParams(input any, output any) {
inputVal := reflect.ValueOf(input).Elem()
outputVal := reflect.ValueOf(output).Elem()
@@ -129,15 +182,15 @@ func ConvertToSQLParams(input interface{}, output interface{}) {
// Handle fields of type sql.NullString, sql.NullInt64, and normal string/int64 fields
switch outputField.Type() {
case reflect.TypeOf(sql.NullString{}):
case reflect.TypeFor[sql.NullString]():
// Handle sql.NullString
if inputField.Kind() == reflect.Ptr && inputField.IsNil() {
if inputField.Kind() == reflect.Pointer && inputField.IsNil() {
outputField.Set(reflect.ValueOf(sql.NullString{Valid: false}))
} else {
outputField.Set(reflect.ValueOf(sql.NullString{String: inputField.String(), Valid: true}))
}
case reflect.TypeOf(sql.NullInt64{}):
case reflect.TypeFor[sql.NullInt64]():
// Handle sql.NullInt64
if inputField.Int() == 0 {
outputField.Set(reflect.ValueOf(sql.NullInt64{Valid: false}))
@@ -145,7 +198,7 @@ func ConvertToSQLParams(input interface{}, output interface{}) {
outputField.Set(reflect.ValueOf(sql.NullInt64{Int64: inputField.Int(), Valid: true}))
}
case reflect.TypeOf(sql.NullFloat64{}):
case reflect.TypeFor[sql.NullFloat64]():
// Handle sql.NullFloat64
if inputField.Float() == 0 {
outputField.Set(reflect.ValueOf(sql.NullFloat64{Valid: false}))
@@ -153,19 +206,19 @@ func ConvertToSQLParams(input interface{}, output interface{}) {
outputField.Set(reflect.ValueOf(sql.NullFloat64{Float64: inputField.Float(), Valid: true}))
}
case reflect.TypeOf(""):
case reflect.TypeFor[string]():
// Handle normal string fields
if inputField.Kind() == reflect.Ptr && inputField.IsNil() {
if inputField.Kind() == reflect.Pointer && inputField.IsNil() {
outputField.SetString("") // Set to empty string if input is nil
} else {
outputField.SetString(inputField.String())
}
case reflect.TypeOf(int64(0)):
case reflect.TypeFor[int64]():
// Handle normal int64 fields
outputField.SetInt(inputField.Int())
case reflect.TypeOf(float64(0)):
case reflect.TypeFor[float64]():
// Handle normal float64 fields
outputField.SetFloat(inputField.Float())
+78
View File
@@ -0,0 +1,78 @@
package db
import "testing"
func TestResolveDriver(t *testing.T) {
tests := []struct {
name string
configuredDriver string
dsn string
wantDriver string
wantInferred bool
wantErr bool
}{
{
name: "explicit postgres uri",
configuredDriver: "postgres",
dsn: "postgres://user:pass@localhost:5432/vctp?sslmode=disable",
wantDriver: "postgres",
},
{
name: "postgresql alias",
configuredDriver: "postgresql",
dsn: "postgres://user:pass@localhost:5432/vctp?sslmode=disable",
wantDriver: "postgres",
},
{
name: "infer postgres uri",
dsn: "postgres://user:pass@localhost:5432/vctp?sslmode=disable",
wantDriver: "postgres",
wantInferred: true,
},
{
name: "infer postgres key value dsn",
dsn: "host=localhost port=5432 user=postgres password=secret dbname=vctp sslmode=disable",
wantDriver: "postgres",
wantInferred: true,
},
{
name: "default sqlite",
dsn: "/var/lib/vctp/db.sqlite3",
wantDriver: "sqlite",
},
{
name: "sqlite alias",
configuredDriver: "sqlite3",
dsn: "/var/lib/vctp/db.sqlite3",
wantDriver: "sqlite",
},
{
name: "reject sqlite postgres mismatch",
configuredDriver: "sqlite",
dsn: "postgres://user:pass@localhost:5432/vctp?sslmode=disable",
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
driver, inferred, err := ResolveDriver(tc.configuredDriver, tc.dsn)
if tc.wantErr {
if err == nil {
t.Fatalf("expected error, got nil")
}
return
}
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if driver != tc.wantDriver {
t.Fatalf("driver mismatch: got %q want %q", driver, tc.wantDriver)
}
if inferred != tc.wantInferred {
t.Fatalf("inferred mismatch: got %t want %t", inferred, tc.wantInferred)
}
})
}
}
+2317 -101
View File
File diff suppressed because it is too large Load Diff
+742
View File
@@ -0,0 +1,742 @@
package db
import (
"context"
"database/sql"
"errors"
"fmt"
"testing"
"time"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite"
)
func newTestSQLiteDB(t *testing.T) *sqlx.DB {
t.Helper()
dbConn, err := sqlx.Open("sqlite", ":memory:")
if err != nil {
t.Fatalf("failed to open sqlite test db: %v", err)
}
t.Cleanup(func() {
_ = dbConn.Close()
})
return dbConn
}
func indexExists(t *testing.T, dbConn *sqlx.DB, name string) bool {
t.Helper()
var count int
if err := dbConn.Get(&count, `SELECT COUNT(1) FROM sqlite_master WHERE type='index' AND name=?`, name); err != nil {
t.Fatalf("failed to query index %s: %v", name, err)
}
return count > 0
}
func TestEnsureOncePerDBRetriesUntilSuccess(t *testing.T) {
dbConn := newTestSQLiteDB(t)
attempts := 0
run := func() error {
attempts++
if attempts == 1 {
return errors.New("transient failure")
}
return nil
}
if err := ensureOncePerDB(dbConn, "test_once", run); err == nil {
t.Fatal("expected first ensureOncePerDB call to fail")
}
if attempts != 1 {
t.Fatalf("expected 1 attempt after first call, got %d", attempts)
}
if err := ensureOncePerDB(dbConn, "test_once", run); err != nil {
t.Fatalf("expected second ensureOncePerDB call to succeed, got %v", err)
}
if attempts != 2 {
t.Fatalf("expected 2 attempts after retry, got %d", attempts)
}
if err := ensureOncePerDB(dbConn, "test_once", run); err != nil {
t.Fatalf("expected third ensureOncePerDB call to reuse success, got %v", err)
}
if attempts != 2 {
t.Fatalf("expected no additional attempts after success, got %d", attempts)
}
}
func TestCleanupHourlySnapshotIndexesOlderThan(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
oldTable := "inventory_hourly_1700000000"
newTable := "inventory_hourly_1800000000"
for _, table := range []string{oldTable, newTable} {
if err := EnsureSnapshotTable(ctx, dbConn, table); err != nil {
t.Fatalf("failed to create snapshot table %s: %v", table, err)
}
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s_snapshottime_idx ON %s ("SnapshotTime")`, table, table)); err != nil {
t.Fatalf("failed to create snapshottime index for %s: %v", table, err)
}
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s_resourcepool_idx ON %s ("ResourcePool")`, table, table)); err != nil {
t.Fatalf("failed to create resourcepool index for %s: %v", table, err)
}
}
cutoff := time.Unix(1750000000, 0)
dropped, err := CleanupHourlySnapshotIndexesOlderThan(ctx, dbConn, cutoff)
if err != nil {
t.Fatalf("cleanup failed: %v", err)
}
if dropped != 3 {
t.Fatalf("expected 3 old indexes dropped, got %d", dropped)
}
oldIndexes := []string{
oldTable + "_vm_vcenter_idx",
oldTable + "_snapshottime_idx",
oldTable + "_resourcepool_idx",
}
for _, idx := range oldIndexes {
if indexExists(t, dbConn, idx) {
t.Fatalf("expected old index %s to be removed", idx)
}
}
newIndexes := []string{
newTable + "_vm_vcenter_idx",
newTable + "_snapshottime_idx",
newTable + "_resourcepool_idx",
}
for _, idx := range newIndexes {
if !indexExists(t, dbConn, idx) {
t.Fatalf("expected recent index %s to remain", idx)
}
}
}
func TestFetchVmTraceAndLifecycleUseCacheTables(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if err := EnsureVmHourlyStats(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_hourly_stats: %v", err)
}
if err := EnsureVmLifecycleCache(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_lifecycle_cache: %v", err)
}
insertSQL := `
INSERT INTO vm_hourly_stats (
"SnapshotTime","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`
rows := [][]any{
{int64(1000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(900), int64(0), "Tin", "dc", "cluster", "folder", 100.0, int64(2), int64(4), "FALSE", "TRUE", "FALSE"},
{int64(2000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(900), int64(0), "Gold", "dc", "cluster", "folder", 150.0, int64(4), int64(8), "FALSE", "TRUE", "FALSE"},
}
for _, args := range rows {
if _, err := dbConn.ExecContext(ctx, insertSQL, args...); err != nil {
t.Fatalf("failed to insert hourly cache row: %v", err)
}
}
if err := UpsertVmLifecycleCache(ctx, dbConn, "vc-a", "vm-1", "uuid-1", "demo-vm", "cluster", time.Unix(1000, 0), sql.NullInt64{Int64: 900, Valid: true}); err != nil {
t.Fatalf("failed to upsert lifecycle cache: %v", err)
}
if err := MarkVmDeletedWithDetails(ctx, dbConn, "vc-a", "vm-1", "uuid-1", "demo-vm", "cluster", 2500); err != nil {
t.Fatalf("failed to mark vm deleted: %v", err)
}
traceRows, err := FetchVmTrace(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmTrace failed: %v", err)
}
if len(traceRows) != 2 {
t.Fatalf("expected 2 trace rows, got %d", len(traceRows))
}
if traceRows[0].SnapshotTime != 1000 || traceRows[1].SnapshotTime != 2000 {
t.Fatalf("trace rows are not sorted by snapshot time: %#v", traceRows)
}
traceRowsByName, err := FetchVmTrace(ctx, dbConn, "", "", "DEMO-VM")
if err != nil {
t.Fatalf("FetchVmTrace by name failed: %v", err)
}
if len(traceRowsByName) != 2 {
t.Fatalf("expected 2 trace rows by name, got %d", len(traceRowsByName))
}
emptyTraceRows, err := FetchVmTrace(ctx, dbConn, "", "", "")
if err != nil {
t.Fatalf("FetchVmTrace with empty identifier failed: %v", err)
}
if len(emptyTraceRows) != 0 {
t.Fatalf("expected 0 trace rows for empty identifier, got %d", len(emptyTraceRows))
}
lifecycle, err := FetchVmLifecycle(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmLifecycle failed: %v", err)
}
if lifecycle.FirstSeen != 900 {
t.Fatalf("expected FirstSeen=900 (earliest known from lifecycle cache), got %d", lifecycle.FirstSeen)
}
if lifecycle.LastSeen != 2000 {
t.Fatalf("expected LastSeen=2000, got %d", lifecycle.LastSeen)
}
if lifecycle.CreationTime != 900 || lifecycle.CreationApprox {
t.Fatalf("expected exact CreationTime=900, got time=%d approx=%v", lifecycle.CreationTime, lifecycle.CreationApprox)
}
if lifecycle.DeletionTime != 2500 {
t.Fatalf("expected DeletionTime=2500 from lifecycle cache, got %d", lifecycle.DeletionTime)
}
lifecycleByName, err := FetchVmLifecycle(ctx, dbConn, "", "", "DEMO-VM")
if err != nil {
t.Fatalf("FetchVmLifecycle by name failed: %v", err)
}
if lifecycleByName.FirstSeen != 900 || lifecycleByName.LastSeen != 2000 {
t.Fatalf("unexpected lifecycle for name lookup: %#v", lifecycleByName)
}
emptyLifecycle, err := FetchVmLifecycle(ctx, dbConn, "", "", "")
if err != nil {
t.Fatalf("FetchVmLifecycle with empty identifier failed: %v", err)
}
if emptyLifecycle.FirstSeen != 0 || emptyLifecycle.LastSeen != 0 || emptyLifecycle.CreationTime != 0 || emptyLifecycle.DeletionTime != 0 {
t.Fatalf("expected empty lifecycle for empty identifier, got %#v", emptyLifecycle)
}
}
func TestFetchVmTraceDailyFromRollup(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if err := EnsureVmDailyRollup(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_daily_rollup: %v", err)
}
if err := UpsertVmDailyRollup(ctx, dbConn, 1700000000, VmDailyRollupRow{
Vcenter: "vc-a",
VmId: "vm-1",
VmUuid: "uuid-1",
Name: "demo-vm",
CreationTime: 1699999000,
SamplesPresent: 8,
SumVcpu: 32,
SumRam: 64,
LastVcpuCount: 4,
LastRamGB: 8,
LastResourcePool: "Tin",
}); err != nil {
t.Fatalf("failed to insert daily rollup row 1: %v", err)
}
if err := UpsertVmDailyRollup(ctx, dbConn, 1700086400, VmDailyRollupRow{
Vcenter: "vc-a",
VmId: "vm-1",
VmUuid: "uuid-1",
Name: "demo-vm",
CreationTime: 1699999000,
SamplesPresent: 4,
SumVcpu: 20,
SumRam: 36,
LastVcpuCount: 5,
LastRamGB: 9,
LastResourcePool: "Gold",
LastProvisionedDisk: 150.5,
}); err != nil {
t.Fatalf("failed to insert daily rollup row 2: %v", err)
}
rows, err := FetchVmTraceDaily(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmTraceDaily failed: %v", err)
}
if len(rows) != 2 {
t.Fatalf("expected 2 daily trace rows, got %d", len(rows))
}
if rows[0].SnapshotTime != 1700000000 || rows[0].VcpuCount != 4 || rows[0].RamGB != 8 {
t.Fatalf("unexpected first daily row: %#v", rows[0])
}
if rows[1].SnapshotTime != 1700086400 || rows[1].VcpuCount != 5 || rows[1].RamGB != 9 || rows[1].ProvisionedDisk != 150.5 {
t.Fatalf("unexpected second daily row: %#v", rows[1])
}
}
func TestFetchVmTraceDailyFallbackToSummaryTables(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if _, err := dbConn.ExecContext(ctx, `
CREATE TABLE snapshot_registry (
snapshot_type TEXT,
table_name TEXT,
snapshot_time BIGINT
)`); err != nil {
t.Fatalf("failed to create snapshot_registry: %v", err)
}
summaryTable := "inventory_daily_summary_20260106"
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`
CREATE TABLE %s (
"Name" TEXT,
"Vcenter" TEXT,
"VmId" TEXT,
"VmUuid" TEXT,
"ResourcePool" TEXT,
"AvgVcpuCount" REAL,
"AvgRamGB" REAL,
"AvgProvisionedDisk" REAL,
"CreationTime" BIGINT,
"DeletionTime" BIGINT
)`, summaryTable)); err != nil {
t.Fatalf("failed to create summary table: %v", err)
}
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`
INSERT INTO %s ("Name","Vcenter","VmId","VmUuid","ResourcePool","AvgVcpuCount","AvgRamGB","AvgProvisionedDisk","CreationTime","DeletionTime")
VALUES (?,?,?,?,?,?,?,?,?,?)
`, summaryTable), "demo-vm", "vc-a", "vm-1", "uuid-1", "Silver", 3.2, 6.7, 123.4, int64(1699999000), int64(0)); err != nil {
t.Fatalf("failed to insert summary row: %v", err)
}
if _, err := dbConn.ExecContext(ctx, `INSERT INTO snapshot_registry (snapshot_type, table_name, snapshot_time) VALUES (?,?,?)`, "daily", summaryTable, int64(1700500000)); err != nil {
t.Fatalf("failed to insert snapshot_registry row: %v", err)
}
rows, err := FetchVmTraceDaily(ctx, dbConn, "", "uuid-1", "")
if err != nil {
t.Fatalf("FetchVmTraceDaily fallback failed: %v", err)
}
if len(rows) != 1 {
t.Fatalf("expected 1 fallback daily row, got %d", len(rows))
}
if rows[0].SnapshotTime != 1700500000 || rows[0].VcpuCount != 3 || rows[0].RamGB != 6 {
t.Fatalf("unexpected fallback daily row: %#v", rows[0])
}
}
func TestClearVcenterReferenceCache(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if err := EnsureVcenterReferenceCacheTables(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vcenter reference cache tables: %v", err)
}
if err := UpsertVcenterFolderCache(ctx, dbConn, "vc-a", "group-v123", "/Datacenters/DC1/vm/Prod", 1000); err != nil {
t.Fatalf("failed to upsert folder cache: %v", err)
}
if err := UpsertVcenterResourcePoolCache(ctx, dbConn, "vc-a", "resgroup-1", "Gold", 1000); err != nil {
t.Fatalf("failed to upsert resource pool cache: %v", err)
}
if err := UpsertVcenterHostCache(ctx, dbConn, "vc-a", "host-123", "Cluster-1", "DC1", 1000); err != nil {
t.Fatalf("failed to upsert host cache: %v", err)
}
if err := ClearVcenterReferenceCache(ctx, dbConn, "vc-a"); err != nil {
t.Fatalf("failed to clear vcenter reference cache: %v", err)
}
var folderCount int
if err := dbConn.Get(&folderCount, `SELECT COUNT(1) FROM vcenter_folder_cache WHERE "Vcenter" = ?`, "vc-a"); err != nil {
t.Fatalf("failed to count folder cache rows: %v", err)
}
if folderCount != 0 {
t.Fatalf("expected 0 folder cache rows after clear, got %d", folderCount)
}
var poolCount int
if err := dbConn.Get(&poolCount, `SELECT COUNT(1) FROM vcenter_resource_pool_cache WHERE "Vcenter" = ?`, "vc-a"); err != nil {
t.Fatalf("failed to count resource pool cache rows: %v", err)
}
if poolCount != 0 {
t.Fatalf("expected 0 resource pool cache rows after clear, got %d", poolCount)
}
var hostCount int
if err := dbConn.Get(&hostCount, `SELECT COUNT(1) FROM vcenter_host_cache WHERE "Vcenter" = ?`, "vc-a"); err != nil {
t.Fatalf("failed to count host cache rows: %v", err)
}
if hostCount != 0 {
t.Fatalf("expected 0 host cache rows after clear, got %d", hostCount)
}
}
func TestFetchVmLifecycleIgnoresStaleDeletionFromHourlyCache(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if err := EnsureVmHourlyStats(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_hourly_stats: %v", err)
}
insertSQL := `
INSERT INTO vm_hourly_stats (
"SnapshotTime","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`
// First row carries an old deletion marker, later row proves VM is still present.
rows := [][]any{
{int64(1700000000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(1699999000), int64(1700003600), "Tin", "dc", "cluster", "folder", 100.0, int64(2), int64(4), "FALSE", "TRUE", "FALSE"},
{int64(1700100000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(1699999000), int64(0), "Gold", "dc", "cluster", "folder", 120.0, int64(4), int64(8), "FALSE", "TRUE", "FALSE"},
}
for _, args := range rows {
if _, err := dbConn.ExecContext(ctx, insertSQL, args...); err != nil {
t.Fatalf("failed to insert hourly cache row: %v", err)
}
}
lifecycle, err := FetchVmLifecycle(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmLifecycle failed: %v", err)
}
if lifecycle.LastSeen != 1700100000 {
t.Fatalf("expected LastSeen=1700100000, got %d", lifecycle.LastSeen)
}
if lifecycle.DeletionTime != 0 {
t.Fatalf("expected stale deletion to be ignored, got %d", lifecycle.DeletionTime)
}
lifecycleDiag, diag, err := FetchVmLifecycleWithDiagnostics(ctx, dbConn, "vm-1", "", "")
if err != nil {
t.Fatalf("FetchVmLifecycleWithDiagnostics failed: %v", err)
}
if lifecycleDiag.DeletionTime != 0 {
t.Fatalf("expected stale deletion to be ignored in diagnostics path, got %d", lifecycleDiag.DeletionTime)
}
if !diag.HourlyCache.StaleDeletionIgnored {
t.Fatalf("expected hourly cache diagnostics to flag stale deletion ignore, got %#v", diag.HourlyCache)
}
if diag.HourlyCache.DeletionMax != 1700003600 {
t.Fatalf("expected hourly cache deletion max 1700003600, got %d", diag.HourlyCache.DeletionMax)
}
if diag.FinalLifecycle.LastSeen != 1700100000 || diag.FinalLifecycle.DeletionTime != 0 {
t.Fatalf("unexpected final diagnostics lifecycle: %#v", diag.FinalLifecycle)
}
}
func TestParseHourlySnapshotUnix(t *testing.T) {
cases := []struct {
table string
ok bool
val int64
}{
{table: "inventory_hourly_1700000000", ok: true, val: 1700000000},
{table: "inventory_hourly_bad", ok: false, val: 0},
{table: "inventory_daily_summary_20260101", ok: false, val: 0},
}
for _, tc := range cases {
got, ok := parseHourlySnapshotUnix(tc.table)
if ok != tc.ok || got != tc.val {
t.Fatalf("parseHourlySnapshotUnix(%q) = (%d,%v), expected (%d,%v)", tc.table, got, ok, tc.val, tc.ok)
}
}
}
func TestVcenterLatestTotalsAndListFallback(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if err := EnsureVcenterLatestTotalsTable(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vcenter_latest_totals: %v", err)
}
if err := InsertVcenterTotals(ctx, dbConn, "vc-a", time.Unix(200, 0), 10, 20, 30); err != nil {
t.Fatalf("failed to insert totals for vc-a: %v", err)
}
// Older snapshot should not replace latest totals.
if err := InsertVcenterTotals(ctx, dbConn, "vc-a", time.Unix(100, 0), 1, 2, 3); err != nil {
t.Fatalf("failed to insert older totals for vc-a: %v", err)
}
if err := InsertVcenterTotals(ctx, dbConn, "vc-b", time.Unix(300, 0), 11, 21, 31); err != nil {
t.Fatalf("failed to insert totals for vc-b: %v", err)
}
vcenters, err := ListVcenters(ctx, dbConn)
if err != nil {
t.Fatalf("ListVcenters failed: %v", err)
}
if len(vcenters) != 2 || vcenters[0] != "vc-a" || vcenters[1] != "vc-b" {
t.Fatalf("unexpected vcenter list: %#v", vcenters)
}
var latest struct {
SnapshotTime int64 `db:"SnapshotTime"`
VmCount int64 `db:"VmCount"`
VcpuTotal int64 `db:"VcpuTotal"`
RamTotalGB int64 `db:"RamTotalGB"`
}
if err := dbConn.GetContext(ctx, &latest, `
SELECT "SnapshotTime","VmCount","VcpuTotal","RamTotalGB"
FROM vcenter_latest_totals
WHERE "Vcenter" = ?
`, "vc-a"); err != nil {
t.Fatalf("failed to query latest totals for vc-a: %v", err)
}
if latest.SnapshotTime != 200 || latest.VmCount != 10 || latest.VcpuTotal != 20 || latest.RamTotalGB != 30 {
t.Fatalf("unexpected latest totals for vc-a: %#v", latest)
}
}
func TestListVcenterHourlyTotalsSince(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
base := time.Unix(1_700_000_000, 0)
if err := InsertVcenterTotals(ctx, dbConn, "vc-a", base.AddDate(0, 0, -60), 1, 2, 3); err != nil {
t.Fatalf("failed to insert old totals: %v", err)
}
if err := InsertVcenterTotals(ctx, dbConn, "vc-a", base.AddDate(0, 0, -10), 10, 20, 30); err != nil {
t.Fatalf("failed to insert recent totals: %v", err)
}
if err := InsertVcenterTotals(ctx, dbConn, "vc-b", base.AddDate(0, 0, -5), 100, 200, 300); err != nil {
t.Fatalf("failed to insert other-vcenter totals: %v", err)
}
rows, err := ListVcenterHourlyTotalsSince(ctx, dbConn, "vc-a", base.AddDate(0, 0, -45))
if err != nil {
t.Fatalf("ListVcenterHourlyTotalsSince failed: %v", err)
}
if len(rows) != 1 {
t.Fatalf("expected 1 row for vc-a since cutoff, got %d", len(rows))
}
if rows[0].SnapshotTime != base.AddDate(0, 0, -10).Unix() || rows[0].VmCount != 10 {
t.Fatalf("unexpected row returned: %#v", rows[0])
}
}
func TestInsertVcenterTotalsUpsertsHourlyAggregate(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
snapshotTime := time.Unix(1_700_000_500, 0)
if err := InsertVcenterTotals(ctx, dbConn, "vc-a", snapshotTime, 12, 24, 48); err != nil {
t.Fatalf("InsertVcenterTotals failed: %v", err)
}
rows, err := ListVcenterAggregateTotals(ctx, dbConn, "vc-a", "hourly", 10)
if err != nil {
t.Fatalf("ListVcenterAggregateTotals failed: %v", err)
}
if len(rows) != 1 {
t.Fatalf("expected 1 hourly aggregate row, got %d", len(rows))
}
if rows[0].SnapshotTime != snapshotTime.Unix() || rows[0].VmCount != 12 || rows[0].VcpuTotal != 24 || rows[0].RamTotalGB != 48 {
t.Fatalf("unexpected hourly aggregate row: %#v", rows[0])
}
}
func TestListVcenterHourlyTotalsSinceUsesAggregateCache(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
base := time.Unix(1_700_000_000, 0)
if err := UpsertVcenterAggregateTotal(ctx, dbConn, "hourly", "vc-a", base.Unix(), 7, 14, 21); err != nil {
t.Fatalf("UpsertVcenterAggregateTotal failed: %v", err)
}
rows, err := ListVcenterHourlyTotalsSince(ctx, dbConn, "vc-a", base.Add(-24*time.Hour))
if err != nil {
t.Fatalf("ListVcenterHourlyTotalsSince failed: %v", err)
}
if len(rows) != 1 {
t.Fatalf("expected 1 cached row, got %d", len(rows))
}
if rows[0].SnapshotTime != base.Unix() || rows[0].VmCount != 7 || rows[0].VcpuTotal != 14 || rows[0].RamTotalGB != 21 {
t.Fatalf("unexpected cached hourly row: %#v", rows[0])
}
}
func TestReplaceVcenterAggregateTotalsFromSummary(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
summaryTable := "inventory_daily_summary_20260101"
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`
CREATE TABLE %s (
"Vcenter" TEXT NOT NULL,
"Name" TEXT,
"VmId" TEXT,
"VmUuid" TEXT,
"AvgVcpuCount" REAL,
"VcpuCount" BIGINT,
"AvgRamGB" REAL,
"RamGB" BIGINT
)`, summaryTable)); err != nil {
t.Fatalf("failed to create summary table: %v", err)
}
insertSQL := fmt.Sprintf(`
INSERT INTO %s ("Vcenter","Name","VmId","VmUuid","AvgVcpuCount","AvgRamGB")
VALUES (?,?,?,?,?,?)
`, summaryTable)
rows := [][]any{
{"vc-a", "vm-1", "1", "u1", 2.0, 4.0},
{"vc-a", "vm-2", "2", "u2", 3.0, 5.0},
{"vc-b", "vm-3", "3", "u3", 1.0, 2.0},
}
for _, args := range rows {
if _, err := dbConn.ExecContext(ctx, insertSQL, args...); err != nil {
t.Fatalf("failed to insert summary row: %v", err)
}
}
upserted, err := ReplaceVcenterAggregateTotalsFromSummary(ctx, dbConn, summaryTable, "daily", 1_700_010_000)
if err != nil {
t.Fatalf("ReplaceVcenterAggregateTotalsFromSummary failed: %v", err)
}
if upserted != 2 {
t.Fatalf("expected 2 vcenter aggregate rows, got %d", upserted)
}
vcA, err := ListVcenterAggregateTotals(ctx, dbConn, "vc-a", "daily", 10)
if err != nil {
t.Fatalf("ListVcenterAggregateTotals(vc-a) failed: %v", err)
}
if len(vcA) != 1 {
t.Fatalf("expected 1 vc-a daily row, got %d", len(vcA))
}
if vcA[0].SnapshotTime != 1_700_010_000 || vcA[0].VmCount != 2 || vcA[0].VcpuTotal != 5 || vcA[0].RamTotalGB != 9 {
t.Fatalf("unexpected vc-a daily aggregate row: %#v", vcA[0])
}
vcB, err := ListVcenterAggregateTotalsSince(ctx, dbConn, "vc-b", "daily", time.Unix(1_700_009_000, 0))
if err != nil {
t.Fatalf("ListVcenterAggregateTotalsSince(vc-b) failed: %v", err)
}
if len(vcB) != 1 || vcB[0].VmCount != 1 || vcB[0].VcpuTotal != 1 || vcB[0].RamTotalGB != 2 {
t.Fatalf("unexpected vc-b daily aggregate row: %#v", vcB)
}
}
func TestListVcenterTotalsByTypeDailyFallbackWarmsCache(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if _, err := dbConn.ExecContext(ctx, `
CREATE TABLE snapshot_registry (
snapshot_type TEXT,
table_name TEXT,
snapshot_time BIGINT
)`); err != nil {
t.Fatalf("failed to create snapshot_registry: %v", err)
}
summaryTable := "inventory_daily_summary_20260102"
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`
CREATE TABLE %s (
"Vcenter" TEXT NOT NULL,
"Name" TEXT,
"VmId" TEXT,
"VmUuid" TEXT,
"AvgVcpuCount" REAL,
"VcpuCount" BIGINT,
"AvgRamGB" REAL,
"RamGB" BIGINT
)`, summaryTable)); err != nil {
t.Fatalf("failed to create summary table: %v", err)
}
insertSQL := fmt.Sprintf(`
INSERT INTO %s ("Vcenter","Name","VmId","VmUuid","AvgVcpuCount","AvgRamGB")
VALUES (?,?,?,?,?,?)
`, summaryTable)
for _, args := range [][]any{
{"vc-a", "vm-1", "1", "u1", 4.0, 8.0},
{"vc-a", "vm-2", "2", "u2", 2.0, 6.0},
} {
if _, err := dbConn.ExecContext(ctx, insertSQL, args...); err != nil {
t.Fatalf("failed to insert summary row: %v", err)
}
}
if _, err := dbConn.ExecContext(ctx, `INSERT INTO snapshot_registry (snapshot_type, table_name, snapshot_time) VALUES (?,?,?)`, "daily", summaryTable, int64(1_700_020_000)); err != nil {
t.Fatalf("failed to insert snapshot_registry row: %v", err)
}
rows, err := ListVcenterTotalsByType(ctx, dbConn, "vc-a", "daily", 10)
if err != nil {
t.Fatalf("ListVcenterTotalsByType failed: %v", err)
}
if len(rows) != 1 {
t.Fatalf("expected 1 daily row, got %d", len(rows))
}
if rows[0].SnapshotTime != 1_700_020_000 || rows[0].VmCount != 2 || rows[0].VcpuTotal != 6 || rows[0].RamTotalGB != 14 {
t.Fatalf("unexpected daily totals row: %#v", rows[0])
}
cached, err := ListVcenterAggregateTotals(ctx, dbConn, "vc-a", "daily", 10)
if err != nil {
t.Fatalf("ListVcenterAggregateTotals failed: %v", err)
}
if len(cached) != 1 || cached[0].SnapshotTime != 1_700_020_000 || cached[0].VmCount != 2 {
t.Fatalf("expected warmed daily cache row, got %#v", cached)
}
}
func TestSyncVcenterAggregateTotalsFromRegistry(t *testing.T) {
ctx := context.Background()
dbConn := newTestSQLiteDB(t)
if _, err := dbConn.ExecContext(ctx, `
CREATE TABLE snapshot_registry (
snapshot_type TEXT,
table_name TEXT,
snapshot_time BIGINT
)`); err != nil {
t.Fatalf("failed to create snapshot_registry: %v", err)
}
table1 := "inventory_daily_summary_20260103"
table2 := "inventory_daily_summary_20260104"
for _, table := range []string{table1, table2} {
if _, err := dbConn.ExecContext(ctx, fmt.Sprintf(`
CREATE TABLE %s (
"Vcenter" TEXT NOT NULL,
"Name" TEXT,
"VmId" TEXT,
"VmUuid" TEXT,
"AvgVcpuCount" REAL,
"VcpuCount" BIGINT,
"AvgRamGB" REAL,
"RamGB" BIGINT
)`, table)); err != nil {
t.Fatalf("failed to create summary table %s: %v", table, err)
}
}
insert1 := fmt.Sprintf(`INSERT INTO %s ("Vcenter","Name","VmId","VmUuid","AvgVcpuCount","AvgRamGB") VALUES (?,?,?,?,?,?)`, table1)
insert2 := fmt.Sprintf(`INSERT INTO %s ("Vcenter","Name","VmId","VmUuid","AvgVcpuCount","AvgRamGB") VALUES (?,?,?,?,?,?)`, table2)
for _, args := range [][]any{
{"vc-a", "vm-1", "1", "u1", 2.0, 4.0},
{"vc-b", "vm-2", "2", "u2", 3.0, 5.0},
} {
if _, err := dbConn.ExecContext(ctx, insert1, args...); err != nil {
t.Fatalf("failed to insert row into %s: %v", table1, err)
}
}
if _, err := dbConn.ExecContext(ctx, insert2, "vc-a", "vm-3", "3", "u3", 4.0, 6.0); err != nil {
t.Fatalf("failed to insert row into %s: %v", table2, err)
}
if _, err := dbConn.ExecContext(ctx, `INSERT INTO snapshot_registry (snapshot_type, table_name, snapshot_time) VALUES (?,?,?)`, "daily", table1, int64(1_700_030_000)); err != nil {
t.Fatalf("failed to insert snapshot_registry row for table1: %v", err)
}
if _, err := dbConn.ExecContext(ctx, `INSERT INTO snapshot_registry (snapshot_type, table_name, snapshot_time) VALUES (?,?,?)`, "daily", table2, int64(1_700_040_000)); err != nil {
t.Fatalf("failed to insert snapshot_registry row for table2: %v", err)
}
snapshotsRefreshed, rowsUpserted, err := SyncVcenterAggregateTotalsFromRegistry(ctx, dbConn, "daily")
if err != nil {
t.Fatalf("SyncVcenterAggregateTotalsFromRegistry failed: %v", err)
}
if snapshotsRefreshed != 2 {
t.Fatalf("expected 2 snapshots refreshed, got %d", snapshotsRefreshed)
}
if rowsUpserted != 3 {
t.Fatalf("expected 3 rows upserted, got %d", rowsUpserted)
}
rows, err := ListVcenterAggregateTotals(ctx, dbConn, "vc-a", "daily", 10)
if err != nil {
t.Fatalf("ListVcenterAggregateTotals failed: %v", err)
}
if len(rows) != 2 {
t.Fatalf("expected 2 daily rows for vc-a, got %d", len(rows))
}
if rows[0].SnapshotTime != 1_700_040_000 || rows[0].VmCount != 1 || rows[0].VcpuTotal != 4 || rows[0].RamTotalGB != 6 {
t.Fatalf("unexpected latest vc-a daily row: %#v", rows[0])
}
if rows[1].SnapshotTime != 1_700_030_000 || rows[1].VmCount != 1 || rows[1].VcpuTotal != 2 || rows[1].RamTotalGB != 4 {
t.Fatalf("unexpected older vc-a daily row: %#v", rows[1])
}
}
+34
View File
@@ -0,0 +1,34 @@
package db
import (
"strings"
"testing"
)
func TestBuildDailySummaryInsertDoesNotGroupFinalAggJoin(t *testing.T) {
query, err := BuildDailySummaryInsert("inventory_daily_summary_20260101", "SELECT 1")
if err != nil {
t.Fatalf("BuildDailySummaryInsert failed: %v", err)
}
if !strings.Contains(query, `FROM agg
JOIN totals ON totals."Vcenter" = agg."Vcenter";`) {
t.Fatalf("expected final agg/totals join with terminator, query tail changed unexpectedly")
}
if strings.Contains(query, `FROM agg
JOIN totals ON totals."Vcenter" = agg."Vcenter"
GROUP BY`) {
t.Fatalf("unexpected final GROUP BY after agg/totals join; this breaks Postgres SQLSTATE 42803")
}
}
func TestBuildMonthlySummaryInsertCastsSampleSumToBigInt(t *testing.T) {
query, err := BuildMonthlySummaryInsert("inventory_monthly_summary_202601", "SELECT 1")
if err != nil {
t.Fatalf("BuildMonthlySummaryInsert failed: %v", err)
}
if !strings.Contains(query, `CAST(SUM("SamplesPresent") AS BIGINT) AS "SamplesPresent"`) {
t.Fatalf("expected monthly sample sum cast to BIGINT to avoid Postgres numeric assignment issues")
}
}
+5 -3
View File
@@ -7,7 +7,6 @@ import (
"strings"
"vctp/db/queries"
//_ "github.com/tursodatabase/libsql-client-go/libsql"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite"
)
@@ -38,12 +37,15 @@ func (d *LocalDB) Logger() *slog.Logger {
}
func (d *LocalDB) Close() error {
fmt.Println("Shutting database")
d.logger.Debug("test")
//fmt.Println("Shutting database")
d.logger.Debug("Shutting database")
return d.db.Close()
}
func newLocalDB(logger *slog.Logger, dsn string) (*LocalDB, error) {
if looksLikePostgresDSN(dsn) {
return nil, fmt.Errorf("database_driver is sqlite but database_url looks like a postgres DSN; set settings.database_driver=postgres")
}
// TODO - work out if https://kerkour.com/sqlite-for-servers is possible without using sqlx
/*
@@ -0,0 +1,24 @@
-- +goose Up
-- +goose StatementBegin
-- sqlc queries target lowercase table names. Postgres migrations historically
-- created quoted CamelCase tables, so expose lowercase compatibility views.
CREATE OR REPLACE VIEW inventory AS
SELECT * FROM "Inventory";
CREATE OR REPLACE VIEW updates AS
SELECT * FROM "Updates";
CREATE OR REPLACE VIEW events AS
SELECT * FROM "Events";
CREATE OR REPLACE VIEW inventory_history AS
SELECT * FROM "InventoryHistory";
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP VIEW IF EXISTS inventory_history;
DROP VIEW IF EXISTS events;
DROP VIEW IF EXISTS updates;
DROP VIEW IF EXISTS inventory;
-- +goose StatementEnd
+3 -3
View File
@@ -55,7 +55,7 @@ type rebindDBTX struct {
db *sqlx.DB
}
func (r rebindDBTX) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
func (r rebindDBTX) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) {
return r.db.ExecContext(ctx, rebindQuery(query), args...)
}
@@ -63,11 +63,11 @@ func (r rebindDBTX) PrepareContext(ctx context.Context, query string) (*sql.Stmt
return r.db.PrepareContext(ctx, rebindQuery(query))
}
func (r rebindDBTX) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
func (r rebindDBTX) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) {
return r.db.QueryContext(ctx, rebindQuery(query), args...)
}
func (r rebindDBTX) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
func (r rebindDBTX) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row {
return r.db.QueryRowContext(ctx, rebindQuery(query), args...)
}
+86 -21
View File
@@ -1,33 +1,65 @@
-- name: ListInventory :many
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
ORDER BY "Name";
-- name: GetReportInventory :many
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
ORDER BY "CreationTime";
-- name: GetInventoryByName :many
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "Name" = ?;
-- name: GetInventoryByVcenter :many
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "Vcenter" = ?;
-- name: GetInventoryVmId :one
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName');
-- name: GetInventoryVmUuid :one
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "VmUuid" = sqlc.arg('vmUuid') AND "Datacenter" = sqlc.arg('datacenterName');
-- name: GetInventoryVcUrl :many
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "Vcenter" = sqlc.arg('vc');
-- name: GetInventoryEventId :one
SELECT * FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "CloudId" = ? LIMIT 1;
-- name: CreateInventory :one
@@ -36,7 +68,10 @@ INSERT INTO inventory (
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING *;
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid";
-- name: InventoryUpdate :exec
UPDATE inventory
@@ -51,17 +86,26 @@ WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName');
-- name: InventoryCleanup :exec
DELETE FROM inventory
WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName')
RETURNING *;
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid";
-- name: InventoryCleanupVcenter :exec
DELETE FROM inventory
WHERE "Vcenter" = sqlc.arg('vc')
RETURNING *;
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid";
-- name: InventoryCleanupTemplates :exec
DELETE FROM inventory
WHERE "IsTemplate" = 'TRUE'
RETURNING *;
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid";
-- name: CreateUpdate :one
INSERT INTO updates (
@@ -69,25 +113,37 @@ INSERT INTO updates (
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING *;
RETURNING
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString";
-- name: GetReportUpdates :many
SELECT * FROM updates
SELECT
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
FROM updates
ORDER BY "UpdateTime";
-- name: GetVmUpdates :many
SELECT * FROM updates
SELECT
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
FROM updates
WHERE "UpdateType" = sqlc.arg('updateType') AND "InventoryId" = sqlc.arg('InventoryId');
-- name: CleanupUpdates :exec
DELETE FROM updates
WHERE "UpdateType" = sqlc.arg('updateType') AND "UpdateTime" <= sqlc.arg('updateTime')
RETURNING *;
RETURNING
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString";
-- name: CleanupUpdatesNullVm :exec
DELETE FROM updates
WHERE "InventoryId" IS NULL
RETURNING *;
RETURNING
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString";
-- name: CreateEvent :one
INSERT INTO events (
@@ -95,14 +151,22 @@ INSERT INTO events (
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING *;
RETURNING
"Eid", "CloudId", "Source", "EventTime", "ChainId", "VmId", "EventKey", "DatacenterName",
"ComputeResourceName", "UserName", "Processed", "DatacenterId", "ComputeResourceId", "VmName", "EventType";
-- name: ListEvents :many
SELECT * FROM events
SELECT
"Eid", "CloudId", "Source", "EventTime", "ChainId", "VmId", "EventKey", "DatacenterName",
"ComputeResourceName", "UserName", "Processed", "DatacenterId", "ComputeResourceId", "VmName", "EventType"
FROM events
ORDER BY "EventTime";
-- name: ListUnprocessedEvents :many
SELECT * FROM events
SELECT
"Eid", "CloudId", "Source", "EventTime", "ChainId", "VmId", "EventKey", "DatacenterName",
"ComputeResourceName", "UserName", "Processed", "DatacenterId", "ComputeResourceId", "VmName", "EventType"
FROM events
WHERE "Processed" = 0
AND "EventTime" > sqlc.arg('eventTime')
ORDER BY "EventTime";
@@ -118,7 +182,8 @@ INSERT INTO inventory_history (
) VALUES(
?, ?, ?, ?, ?, ?, ?
)
RETURNING *;
RETURNING
"Hid", "InventoryId", "ReportDate", "UpdateTime", "PreviousVcpus", "PreviousRam", "PreviousResourcePool", "PreviousProvisionedDisk";
-- name: SqliteTableExists :one
SELECT COUNT(1) AS count
+86 -21
View File
@@ -13,7 +13,9 @@ import (
const cleanupUpdates = `-- name: CleanupUpdates :exec
DELETE FROM updates
WHERE "UpdateType" = ?1 AND "UpdateTime" <= ?2
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString
RETURNING
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
`
type CleanupUpdatesParams struct {
@@ -29,7 +31,9 @@ func (q *Queries) CleanupUpdates(ctx context.Context, arg CleanupUpdatesParams)
const cleanupUpdatesNullVm = `-- name: CleanupUpdatesNullVm :exec
DELETE FROM updates
WHERE "InventoryId" IS NULL
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString
RETURNING
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
`
func (q *Queries) CleanupUpdatesNullVm(ctx context.Context) error {
@@ -43,7 +47,9 @@ INSERT INTO events (
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType
RETURNING
"Eid", "CloudId", "Source", "EventTime", "ChainId", "VmId", "EventKey", "DatacenterName",
"ComputeResourceName", "UserName", "Processed", "DatacenterId", "ComputeResourceId", "VmName", "EventType"
`
type CreateEventParams struct {
@@ -105,7 +111,10 @@ INSERT INTO inventory (
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
`
type CreateInventoryParams struct {
@@ -179,7 +188,8 @@ INSERT INTO inventory_history (
) VALUES(
?, ?, ?, ?, ?, ?, ?
)
RETURNING Hid, InventoryId, ReportDate, UpdateTime, PreviousVcpus, PreviousRam, PreviousResourcePool, PreviousProvisionedDisk
RETURNING
"Hid", "InventoryId", "ReportDate", "UpdateTime", "PreviousVcpus", "PreviousRam", "PreviousResourcePool", "PreviousProvisionedDisk"
`
type CreateInventoryHistoryParams struct {
@@ -222,7 +232,9 @@ INSERT INTO updates (
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString
RETURNING
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
`
type CreateUpdateParams struct {
@@ -278,7 +290,11 @@ func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Upd
}
const getInventoryByName = `-- name: GetInventoryByName :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "Name" = ?
`
@@ -326,7 +342,11 @@ func (q *Queries) GetInventoryByName(ctx context.Context, name string) ([]Invent
}
const getInventoryByVcenter = `-- name: GetInventoryByVcenter :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "Vcenter" = ?
`
@@ -374,7 +394,11 @@ func (q *Queries) GetInventoryByVcenter(ctx context.Context, vcenter string) ([]
}
const getInventoryEventId = `-- name: GetInventoryEventId :one
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "CloudId" = ? LIMIT 1
`
@@ -406,7 +430,11 @@ func (q *Queries) GetInventoryEventId(ctx context.Context, cloudid sql.NullStrin
}
const getInventoryVcUrl = `-- name: GetInventoryVcUrl :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "Vcenter" = ?1
`
@@ -454,7 +482,11 @@ func (q *Queries) GetInventoryVcUrl(ctx context.Context, vc string) ([]Inventory
}
const getInventoryVmId = `-- name: GetInventoryVmId :one
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "VmId" = ?1 AND "Datacenter" = ?2
`
@@ -491,7 +523,11 @@ func (q *Queries) GetInventoryVmId(ctx context.Context, arg GetInventoryVmIdPara
}
const getInventoryVmUuid = `-- name: GetInventoryVmUuid :one
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
WHERE "VmUuid" = ?1 AND "Datacenter" = ?2
`
@@ -528,7 +564,11 @@ func (q *Queries) GetInventoryVmUuid(ctx context.Context, arg GetInventoryVmUuid
}
const getReportInventory = `-- name: GetReportInventory :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
ORDER BY "CreationTime"
`
@@ -576,7 +616,10 @@ func (q *Queries) GetReportInventory(ctx context.Context) ([]Inventory, error) {
}
const getReportUpdates = `-- name: GetReportUpdates :many
SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM updates
SELECT
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
FROM updates
ORDER BY "UpdateTime"
`
@@ -619,7 +662,10 @@ func (q *Queries) GetReportUpdates(ctx context.Context) ([]Update, error) {
}
const getVmUpdates = `-- name: GetVmUpdates :many
SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM updates
SELECT
"Uid", "InventoryId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool",
"EventKey", "EventId", "NewProvisionedDisk", "UserName", "PlaceholderChange", "Name", "RawChangeString"
FROM updates
WHERE "UpdateType" = ?1 AND "InventoryId" = ?2
`
@@ -669,7 +715,10 @@ func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]U
const inventoryCleanup = `-- name: InventoryCleanup :exec
DELETE FROM inventory
WHERE "VmId" = ?1 AND "Datacenter" = ?2
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
`
type InventoryCleanupParams struct {
@@ -685,7 +734,10 @@ func (q *Queries) InventoryCleanup(ctx context.Context, arg InventoryCleanupPara
const inventoryCleanupTemplates = `-- name: InventoryCleanupTemplates :exec
DELETE FROM inventory
WHERE "IsTemplate" = 'TRUE'
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
`
func (q *Queries) InventoryCleanupTemplates(ctx context.Context) error {
@@ -696,7 +748,10 @@ func (q *Queries) InventoryCleanupTemplates(ctx context.Context) error {
const inventoryCleanupVcenter = `-- name: InventoryCleanupVcenter :exec
DELETE FROM inventory
WHERE "Vcenter" = ?1
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
`
func (q *Queries) InventoryCleanupVcenter(ctx context.Context, vc string) error {
@@ -739,7 +794,10 @@ func (q *Queries) InventoryUpdate(ctx context.Context, arg InventoryUpdateParams
}
const listEvents = `-- name: ListEvents :many
SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM events
SELECT
"Eid", "CloudId", "Source", "EventTime", "ChainId", "VmId", "EventKey", "DatacenterName",
"ComputeResourceName", "UserName", "Processed", "DatacenterId", "ComputeResourceId", "VmName", "EventType"
FROM events
ORDER BY "EventTime"
`
@@ -783,7 +841,11 @@ func (q *Queries) ListEvents(ctx context.Context) ([]Event, error) {
}
const listInventory = `-- name: ListInventory :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM inventory
ORDER BY "Name"
`
@@ -831,7 +893,10 @@ func (q *Queries) ListInventory(ctx context.Context) ([]Inventory, error) {
}
const listUnprocessedEvents = `-- name: ListUnprocessedEvents :many
SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM events
SELECT
"Eid", "CloudId", "Source", "EventTime", "ChainId", "VmId", "EventKey", "DatacenterName",
"ComputeResourceName", "UserName", "Processed", "DatacenterId", "ComputeResourceId", "VmName", "EventType"
FROM events
WHERE "Processed" = 0
AND "EventTime" > ?1
ORDER BY "EventTime"
+445
View File
@@ -0,0 +1,445 @@
package db
import (
"context"
"database/sql"
"fmt"
"log/slog"
"sort"
"strconv"
"strings"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite"
)
// SQLiteImportStats summarizes a one-shot SQLite-to-Postgres import.
type SQLiteImportStats struct {
SourceDSN string
TablesImported int
TablesSkipped int
RowsImported int64
}
type postgresColumn struct {
Name string `db:"column_name"`
DataType string `db:"data_type"`
}
type importColumn struct {
SourceName string
DestinationName string
DestinationType string
}
// ImportSQLiteIntoPostgres imports all supported tables from a SQLite database into a configured Postgres database.
func ImportSQLiteIntoPostgres(ctx context.Context, logger *slog.Logger, destination *sqlx.DB, sqliteDSN string) (SQLiteImportStats, error) {
stats := SQLiteImportStats{SourceDSN: strings.TrimSpace(sqliteDSN)}
if ctx == nil {
ctx = context.Background()
}
if logger == nil {
logger = slog.Default()
}
if destination == nil {
return stats, fmt.Errorf("destination database is nil")
}
driver := strings.ToLower(strings.TrimSpace(destination.DriverName()))
if driver != "pgx" && driver != "postgres" {
return stats, fmt.Errorf("sqlite import requires postgres destination; got %s", destination.DriverName())
}
if strings.TrimSpace(sqliteDSN) == "" {
return stats, fmt.Errorf("sqlite source path/DSN is required")
}
source, err := sqlx.Open("sqlite", normalizeSqliteDSN(sqliteDSN))
if err != nil {
return stats, fmt.Errorf("failed to open sqlite source: %w", err)
}
defer source.Close()
if err := source.PingContext(ctx); err != nil {
return stats, fmt.Errorf("failed to connect to sqlite source: %w", err)
}
tables, err := listSQLiteUserTables(ctx, source)
if err != nil {
return stats, err
}
sort.Strings(tables)
if len(tables) == 0 {
logger.Warn("sqlite import source has no user tables")
return stats, nil
}
importTables := make([]string, 0, len(tables))
for _, tableName := range tables {
if shouldSkipSQLiteImportTable(tableName) {
stats.TablesSkipped++
continue
}
if err := ensureDestinationImportTable(ctx, destination, tableName); err != nil {
return stats, err
}
importTables = append(importTables, tableName)
}
if len(importTables) == 0 {
logger.Warn("sqlite import found no tables to import after filtering")
return stats, nil
}
tx, err := destination.BeginTxx(ctx, nil)
if err != nil {
return stats, fmt.Errorf("failed to start postgres import transaction: %w", err)
}
defer tx.Rollback()
for _, tableName := range importTables {
rowsCopied, err := copySQLiteTableIntoPostgres(ctx, source, tx, tableName)
if err != nil {
return stats, err
}
stats.TablesImported++
stats.RowsImported += rowsCopied
logger.Info("sqlite import copied table", "table", tableName, "rows", rowsCopied)
}
if err := tx.Commit(); err != nil {
return stats, fmt.Errorf("failed to commit sqlite import transaction: %w", err)
}
return stats, nil
}
func listSQLiteUserTables(ctx context.Context, source *sqlx.DB) ([]string, error) {
rows, err := source.QueryxContext(ctx, `
SELECT name
FROM sqlite_master
WHERE type = 'table'
ORDER BY name
`)
if err != nil {
return nil, fmt.Errorf("failed to list sqlite tables: %w", err)
}
defer rows.Close()
var tables []string
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
return nil, fmt.Errorf("failed to scan sqlite table name: %w", err)
}
tables = append(tables, name)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("failed reading sqlite table names: %w", err)
}
return tables, nil
}
func shouldSkipSQLiteImportTable(tableName string) bool {
normalized := strings.ToLower(strings.TrimSpace(tableName))
if normalized == "" {
return true
}
if strings.HasPrefix(normalized, "sqlite_") {
return true
}
// Destination migration state is managed independently by goose.
if normalized == "goose_db_version" {
return true
}
return false
}
func ensureDestinationImportTable(ctx context.Context, destination *sqlx.DB, tableName string) error {
if TableExists(ctx, destination, tableName) {
return nil
}
switch {
case strings.HasPrefix(tableName, "inventory_hourly_"):
return EnsureSnapshotTable(ctx, destination, tableName)
case strings.HasPrefix(tableName, "inventory_daily_summary_"), strings.HasPrefix(tableName, "inventory_monthly_summary_"):
return EnsureSummaryTable(ctx, destination, tableName)
case tableName == "snapshot_runs":
return EnsureSnapshotRunTable(ctx, destination)
case tableName == "cron_status":
return EnsureCronStatusTable(ctx, destination)
case tableName == "vm_hourly_stats":
return EnsureVmHourlyStats(ctx, destination)
case tableName == "vm_lifecycle_cache":
return EnsureVmLifecycleCache(ctx, destination)
case tableName == "vm_daily_rollup":
return EnsureVmDailyRollup(ctx, destination)
case tableName == "vm_identity", tableName == "vm_renames":
return EnsureVmIdentityTables(ctx, destination)
case tableName == "vcenter_totals":
return EnsureVcenterTotalsTable(ctx, destination)
case tableName == "vcenter_latest_totals":
return EnsureVcenterLatestTotalsTable(ctx, destination)
case tableName == "vcenter_aggregate_totals":
return EnsureVcenterAggregateTotalsTable(ctx, destination)
case tableName == "vcenter_folder_cache", tableName == "vcenter_resource_pool_cache", tableName == "vcenter_host_cache":
return EnsureVcenterReferenceCacheTables(ctx, destination)
default:
return fmt.Errorf("source table %q does not exist in postgres and cannot be auto-created", tableName)
}
}
func copySQLiteTableIntoPostgres(ctx context.Context, source *sqlx.DB, destinationTX *sqlx.Tx, tableName string) (int64, error) {
sourceColumns, err := listSQLiteTableColumns(ctx, source, tableName)
if err != nil {
return 0, err
}
destinationColumns, err := listPostgresTableColumns(ctx, destinationTX, tableName)
if err != nil {
return 0, err
}
columns := intersectImportColumns(sourceColumns, destinationColumns)
if len(columns) == 0 {
return 0, fmt.Errorf("no overlapping columns between sqlite and postgres table %q", tableName)
}
if _, err := destinationTX.ExecContext(ctx, fmt.Sprintf(`TRUNCATE TABLE %s RESTART IDENTITY CASCADE`, quoteIdentifier(tableName))); err != nil {
return 0, fmt.Errorf("failed to truncate destination table %q: %w", tableName, err)
}
sourceColumnNames := make([]string, 0, len(columns))
destinationColumnNames := make([]string, 0, len(columns))
for _, col := range columns {
sourceColumnNames = append(sourceColumnNames, col.SourceName)
destinationColumnNames = append(destinationColumnNames, col.DestinationName)
}
selectSQL := fmt.Sprintf(
`SELECT %s FROM %s`,
joinQuotedIdentifiers(sourceColumnNames),
quoteIdentifier(tableName),
)
rows, err := source.QueryxContext(ctx, selectSQL)
if err != nil {
return 0, fmt.Errorf("failed to query source table %q: %w", tableName, err)
}
defer rows.Close()
insertSQL := fmt.Sprintf(
`INSERT INTO %s (%s) VALUES (%s)`,
quoteIdentifier(tableName),
joinQuotedIdentifiers(destinationColumnNames),
postgresPlaceholders(len(columns)),
)
stmt, err := destinationTX.PreparexContext(ctx, insertSQL)
if err != nil {
return 0, fmt.Errorf("failed to prepare insert for table %q: %w", tableName, err)
}
defer stmt.Close()
var rowsCopied int64
for rows.Next() {
rawValues := make([]any, len(columns))
scanTargets := make([]any, len(columns))
for i := range rawValues {
scanTargets[i] = &rawValues[i]
}
if err := rows.Scan(scanTargets...); err != nil {
return rowsCopied, fmt.Errorf("failed to scan row from sqlite table %q: %w", tableName, err)
}
args := make([]any, len(columns))
for i, col := range columns {
args[i] = coerceSQLiteValueForPostgres(rawValues[i], col.DestinationType)
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
return rowsCopied, fmt.Errorf("failed to insert row into postgres table %q: %w", tableName, err)
}
rowsCopied++
}
if err := rows.Err(); err != nil {
return rowsCopied, fmt.Errorf("failed to read rows from sqlite table %q: %w", tableName, err)
}
if err := resetPostgresSerialColumns(ctx, destinationTX, tableName); err != nil {
return rowsCopied, fmt.Errorf("failed to reset postgres sequences for table %q: %w", tableName, err)
}
return rowsCopied, nil
}
func listSQLiteTableColumns(ctx context.Context, source *sqlx.DB, tableName string) ([]string, error) {
rows, err := source.QueryxContext(ctx, fmt.Sprintf(`PRAGMA table_info(%s)`, quoteIdentifier(tableName)))
if err != nil {
return nil, fmt.Errorf("failed to inspect sqlite table %q: %w", tableName, err)
}
defer rows.Close()
columns := make([]string, 0)
for rows.Next() {
var (
cid int
name string
columnType string
notNull int
defaultVal sql.NullString
pk int
)
if err := rows.Scan(&cid, &name, &columnType, &notNull, &defaultVal, &pk); err != nil {
return nil, fmt.Errorf("failed to scan sqlite columns for %q: %w", tableName, err)
}
columns = append(columns, name)
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("failed reading sqlite columns for %q: %w", tableName, err)
}
return columns, nil
}
func listPostgresTableColumns(ctx context.Context, destinationTX *sqlx.Tx, tableName string) ([]postgresColumn, error) {
var columns []postgresColumn
if err := destinationTX.SelectContext(ctx, &columns, `
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_schema = 'public' AND table_name = $1
ORDER BY ordinal_position
`, tableName); err != nil {
return nil, fmt.Errorf("failed to inspect postgres columns for %q: %w", tableName, err)
}
if len(columns) == 0 {
return nil, fmt.Errorf("postgres table %q has no columns", tableName)
}
return columns, nil
}
func intersectImportColumns(sourceColumns []string, destinationColumns []postgresColumn) []importColumn {
sourceByLower := make(map[string]string, len(sourceColumns))
for _, sourceColumn := range sourceColumns {
sourceByLower[strings.ToLower(sourceColumn)] = sourceColumn
}
columns := make([]importColumn, 0, len(destinationColumns))
for _, destinationColumn := range destinationColumns {
sourceColumn, exists := sourceByLower[strings.ToLower(destinationColumn.Name)]
if !exists {
continue
}
columns = append(columns, importColumn{
SourceName: sourceColumn,
DestinationName: destinationColumn.Name,
DestinationType: destinationColumn.DataType,
})
}
return columns
}
func resetPostgresSerialColumns(ctx context.Context, destinationTX *sqlx.Tx, tableName string) error {
type serialColumn struct {
Name string `db:"column_name"`
}
var columns []serialColumn
if err := destinationTX.SelectContext(ctx, &columns, `
SELECT column_name
FROM information_schema.columns
WHERE table_schema = 'public'
AND table_name = $1
AND column_default LIKE 'nextval(%'
ORDER BY ordinal_position
`, tableName); err != nil {
return err
}
tableRef := fmt.Sprintf("public.%s", quoteIdentifier(tableName))
for _, column := range columns {
var sequence sql.NullString
if err := destinationTX.GetContext(ctx, &sequence, `SELECT pg_get_serial_sequence($1, $2)`, tableRef, column.Name); err != nil {
return err
}
if !sequence.Valid || strings.TrimSpace(sequence.String) == "" {
continue
}
setvalSQL := fmt.Sprintf(
`SELECT setval($1, COALESCE((SELECT MAX(%s) FROM %s), 0) + 1, false)`,
quoteIdentifier(column.Name),
quoteIdentifier(tableName),
)
if _, err := destinationTX.ExecContext(ctx, setvalSQL, sequence.String); err != nil {
return err
}
}
return nil
}
func coerceSQLiteValueForPostgres(value any, destinationType string) any {
if value == nil {
return nil
}
destinationType = strings.ToLower(strings.TrimSpace(destinationType))
if bytesValue, ok := value.([]byte); ok && destinationType != "bytea" {
value = string(bytesValue)
}
if destinationType == "boolean" {
if boolValue, ok := coerceBoolValue(value); ok {
return boolValue
}
}
return value
}
func coerceBoolValue(value any) (bool, bool) {
switch v := value.(type) {
case bool:
return v, true
case int64:
return v != 0, true
case int:
return v != 0, true
case float64:
return v != 0, true
case string:
return parseBoolString(v)
case []byte:
return parseBoolString(string(v))
default:
return false, false
}
}
func parseBoolString(raw string) (bool, bool) {
normalized := strings.ToLower(strings.TrimSpace(raw))
switch normalized {
case "1", "t", "true", "y", "yes", "on":
return true, true
case "0", "f", "false", "n", "no", "off":
return false, true
}
if parsedInt, err := strconv.ParseInt(normalized, 10, 64); err == nil {
return parsedInt != 0, true
}
return false, false
}
func quoteIdentifier(identifier string) string {
return `"` + strings.ReplaceAll(identifier, `"`, `""`) + `"`
}
func joinQuotedIdentifiers(identifiers []string) string {
if len(identifiers) == 0 {
return ""
}
quoted := make([]string, 0, len(identifiers))
for _, identifier := range identifiers {
quoted = append(quoted, quoteIdentifier(identifier))
}
return strings.Join(quoted, ", ")
}
func postgresPlaceholders(count int) string {
if count <= 0 {
return ""
}
placeholders := make([]string, 0, count)
for i := 1; i <= count; i++ {
placeholders = append(placeholders, fmt.Sprintf("$%d", i))
}
return strings.Join(placeholders, ", ")
}
+80
View File
@@ -0,0 +1,80 @@
package db
import (
"reflect"
"testing"
)
func TestShouldSkipSQLiteImportTable(t *testing.T) {
tests := []struct {
name string
tableName string
wantSkip bool
}{
{name: "empty", tableName: "", wantSkip: true},
{name: "sqlite sequence", tableName: "sqlite_sequence", wantSkip: true},
{name: "goose table", tableName: "goose_db_version", wantSkip: true},
{name: "normal table", tableName: "Inventory", wantSkip: false},
{name: "snapshot table", tableName: "inventory_hourly_1700000000", wantSkip: false},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := shouldSkipSQLiteImportTable(tc.tableName)
if got != tc.wantSkip {
t.Fatalf("skip mismatch: got %t want %t", got, tc.wantSkip)
}
})
}
}
func TestIntersectImportColumns(t *testing.T) {
source := []string{"Iid", "Name", "Vcenter", "CreationTime"}
dest := []postgresColumn{
{Name: "Iid", DataType: "bigint"},
{Name: "Name", DataType: "text"},
{Name: "Vcenter", DataType: "text"},
{Name: "DeletionTime", DataType: "bigint"},
}
got := intersectImportColumns(source, dest)
want := []importColumn{
{SourceName: "Iid", DestinationName: "Iid", DestinationType: "bigint"},
{SourceName: "Name", DestinationName: "Name", DestinationType: "text"},
{SourceName: "Vcenter", DestinationName: "Vcenter", DestinationType: "text"},
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("intersect mismatch:\n got: %#v\nwant: %#v", got, want)
}
}
func TestCoerceSQLiteValueForPostgresBoolean(t *testing.T) {
tests := []struct {
name string
input any
destinationType string
want any
}{
{name: "string true", input: "true", destinationType: "boolean", want: true},
{name: "string false", input: "0", destinationType: "boolean", want: false},
{name: "int true", input: int64(1), destinationType: "boolean", want: true},
{name: "int false", input: int64(0), destinationType: "boolean", want: false},
{name: "bytes text", input: []byte("hello"), destinationType: "text", want: "hello"},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := coerceSQLiteValueForPostgres(tc.input, tc.destinationType)
if !reflect.DeepEqual(got, tc.want) {
t.Fatalf("coerce mismatch: got %#v want %#v", got, tc.want)
}
})
}
}
func TestPostgresPlaceholders(t *testing.T) {
got := postgresPlaceholders(3)
if got != "$1, $2, $3" {
t.Fatalf("unexpected placeholders: %q", got)
}
}
+89
View File
@@ -0,0 +1,89 @@
# Design System Inspired by Airtable
## 1. Visual Theme & Atmosphere
Airtable's website is a clean, enterprise-friendly platform that communicates "sophisticated simplicity" through a white canvas with deep navy text (`#181d26`) and Airtable Blue (`#1b61c9`) as the primary interactive accent. The Haas font family (display + text variants) creates a Swiss-precision typography system with positive letter-spacing throughout.
**Key Characteristics:**
- White canvas with deep navy text (`#181d26`)
- Airtable Blue (`#1b61c9`) as primary CTA and link color
- Haas + Haas Groot Disp dual font system
- Positive letter-spacing on body text (0.08px0.28px)
- 12px radius buttons, 16px32px for cards
- Multi-layer blue-tinted shadow: `rgba(45,127,249,0.28) 0px 1px 3px`
- Semantic theme tokens: `--theme_*` CSS variable naming
## 2. Color Palette & Roles
### Primary
- **Deep Navy** (`#181d26`): Primary text
- **Airtable Blue** (`#1b61c9`): CTA buttons, links
- **White** (`#ffffff`): Primary surface
- **Spotlight** (`rgba(249,252,255,0.97)`): `--theme_button-text-spotlight`
### Semantic
- **Success Green** (`#006400`): `--theme_success-text`
- **Weak Text** (`rgba(4,14,32,0.69)`): `--theme_text-weak`
- **Secondary Active** (`rgba(7,12,20,0.82)`): `--theme_button-text-secondary-active`
### Neutral
- **Dark Gray** (`#333333`): Secondary text
- **Mid Blue** (`#254fad`): Link/accent blue variant
- **Border** (`#e0e2e6`): Card borders
- **Light Surface** (`#f8fafc`): Subtle surface
### Shadows
- **Blue-tinted** (`rgba(0,0,0,0.32) 0px 0px 1px, rgba(0,0,0,0.08) 0px 0px 2px, rgba(45,127,249,0.28) 0px 1px 3px, rgba(0,0,0,0.06) 0px 0px 0px 0.5px inset`)
- **Soft** (`rgba(15,48,106,0.05) 0px 0px 20px`)
## 3. Typography Rules
### Font Families
- **Primary**: `Haas`, fallbacks: `-apple-system, system-ui, Segoe UI, Roboto`
- **Display**: `Haas Groot Disp`, fallback: `Haas`
### Hierarchy
| Role | Font | Size | Weight | Line Height | Letter Spacing |
|------|------|------|--------|-------------|----------------|
| Display Hero | Haas | 48px | 400 | 1.15 | normal |
| Display Bold | Haas Groot Disp | 48px | 900 | 1.50 | normal |
| Section Heading | Haas | 40px | 400 | 1.25 | normal |
| Sub-heading | Haas | 32px | 400500 | 1.151.25 | normal |
| Card Title | Haas | 24px | 400 | 1.201.30 | 0.12px |
| Feature | Haas | 20px | 400 | 1.251.50 | 0.1px |
| Body | Haas | 18px | 400 | 1.35 | 0.18px |
| Body Medium | Haas | 16px | 500 | 1.30 | 0.080.16px |
| Button | Haas | 16px | 500 | 1.251.30 | 0.08px |
| Caption | Haas | 14px | 400500 | 1.251.35 | 0.070.28px |
## 4. Component Stylings
### Buttons
- **Primary Blue**: `#1b61c9`, white text, 16px 24px padding, 12px radius
- **White**: white bg, `#181d26` text, 12px radius, 1px border white
- **Cookie Consent**: `#1b61c9` bg, 2px radius (sharp)
### Cards: `1px solid #e0e2e6`, 16px24px radius
### Inputs: Standard Haas styling
## 5. Layout
- Spacing: 148px (8px base)
- Radius: 2px (small), 12px (buttons), 16px (cards), 24px (sections), 32px (large), 50% (circles)
## 6. Depth
- Blue-tinted multi-layer shadow system
- Soft ambient: `rgba(15,48,106,0.05) 0px 0px 20px`
## 7. Do's and Don'ts
### Do: Use Airtable Blue for CTAs, Haas with positive tracking, 12px radius buttons
### Don't: Skip positive letter-spacing, use heavy shadows
## 8. Responsive Behavior
Breakpoints: 4251664px (23 breakpoints)
## 9. Agent Prompt Guide
- Text: Deep Navy (`#181d26`)
- CTA: Airtable Blue (`#1b61c9`)
- Background: White (`#ffffff`)
- Border: `#e0e2e6`
+310
View File
@@ -0,0 +1,310 @@
# Design System Inspired by Cursor
## 1. Visual Theme & Atmosphere
Cursor's website is a study in warm minimalism meets code-editor elegance. The entire experience is built on a warm off-white canvas (`#f2f1ed`) with dark warm-brown text (`#26251e`) -- not pure black, not neutral gray, but a deeply warm near-black with a yellowish undertone that evokes old paper, ink, and craft. This warmth permeates every surface: backgrounds lean toward cream (`#e6e5e0`, `#ebeae5`), borders dissolve into transparent warm overlays using `oklab` color space, and even the error state (`#cf2d56`) carries warmth rather than clinical red. The result feels more like a premium print publication than a tech website.
The custom CursorGothic font is the typographic signature -- a gothic sans-serif with aggressive negative letter-spacing at display sizes (-2.16px at 72px) that creates a compressed, engineered feel. As a secondary voice, the jjannon serif font (with OpenType `"cswh"` contextual swash alternates) provides literary counterpoint for body copy and editorial passages. The monospace voice comes from berkeleyMono, a refined coding font that connects the marketing site to Cursor's core identity as a code editor. This three-font system (gothic display, serif body, mono code) gives Cursor one of the most typographically rich palettes in developer tooling.
The border system is particularly distinctive -- Cursor uses `oklab()` color space for border colors, applying warm brown at various alpha levels (0.1, 0.2, 0.55) to create borders that feel organic rather than mechanical. The signature border color `oklab(0.263084 -0.00230259 0.0124794 / 0.1)` is not a simple rgba value but a perceptually uniform color that maintains visual consistency across different backgrounds.
**Key Characteristics:**
- CursorGothic with aggressive negative letter-spacing (-2.16px at 72px, -0.72px at 36px) for compressed display headings
- jjannon serif for body text with OpenType `"cswh"` (contextual swash alternates)
- berkeleyMono for code and technical labels
- Warm off-white background (`#f2f1ed`) instead of pure white -- the entire system is warm-shifted
- Primary text color `#26251e` (warm near-black with yellow undertone)
- Accent orange `#f54e00` for brand highlight and links
- oklab-space borders at various alpha levels for perceptually uniform edge treatment
- Pill-shaped elements with extreme radius (33.5M px, effectively full-pill)
- 8px base spacing system with fine-grained sub-8px increments (1.5px, 2px, 2.5px, 3px, 4px, 5px, 6px)
- Any box containing text content (cards, panels, table shells, callouts) must use a white background (`#ffffff`) for readability and contrast.
## 2. Color Palette & Roles
### Primary
- **Cursor Dark** (`#26251e`): Primary text, headings, dark UI surfaces. A warm near-black with distinct yellow-brown undertone -- the defining color of the system.
- **Cursor Cream** (`#f2f1ed`): Page background, primary surface. Not white but a warm cream that sets the entire warm tone.
- **Cursor Light** (`#e6e5e0`): Secondary surface, button backgrounds, card fills. A slightly warmer, slightly darker cream.
- **Pure White** (`#ffffff`): Used sparingly for maximum contrast elements and specific surface highlights.
- **True Black** (`#000000`): Minimal use, specific code/console contexts.
### Accent
- **Cursor Orange** (`#f54e00`): Brand accent, `--color-accent`. A vibrant red-orange used for primary CTAs, active links, and brand moments. Warm and urgent.
- **Gold** (`#c08532`): Secondary accent, warm gold for premium or highlighted contexts.
### Semantic
- **Error** (`#cf2d56`): `--color-error`. A warm crimson-rose rather than cold red.
- **Success** (`#1f8a65`): `--color-success`. A muted teal-green, warm-shifted.
### Timeline / Feature Colors
- **Thinking** (`#dfa88f`): Warm peach for "thinking" state in AI timeline.
- **Grep** (`#9fc9a2`): Soft sage green for search/grep operations.
- **Read** (`#9fbbe0`): Soft blue for file reading operations.
- **Edit** (`#c0a8dd`): Soft lavender for editing operations.
### Surface Scale
- **Surface 100** (`#f7f7f4`): Lightest button/card surface, barely tinted.
- **Surface 200** (`#f2f1ed`): Primary page background.
- **Surface 300** (`#ebeae5`): Button default background, subtle emphasis.
- **Surface 400** (`#e6e5e0`): Card backgrounds, secondary surfaces.
- **Surface 500** (`#e1e0db`): Tertiary button background, deeper emphasis.
### Border Colors
- **Border Primary** (`oklab(0.263084 -0.00230259 0.0124794 / 0.1)`): Standard border, 10% warm brown in oklab space.
- **Border Medium** (`oklab(0.263084 -0.00230259 0.0124794 / 0.2)`): Emphasized border, 20% warm brown.
- **Border Strong** (`rgba(38, 37, 30, 0.55)`): Strong borders, table rules.
- **Border Solid** (`#26251e`): Full-opacity dark border for maximum contrast.
- **Border Light** (`#f2f1ed`): Light border matching page background.
### Shadows & Depth
- **Card Shadow** (`rgba(0,0,0,0.14) 0px 28px 70px, rgba(0,0,0,0.1) 0px 14px 32px, oklab(0.263084 -0.00230259 0.0124794 / 0.1) 0px 0px 0px 1px`): Heavy elevated card with warm oklab border ring.
- **Ambient Shadow** (`rgba(0,0,0,0.02) 0px 0px 16px, rgba(0,0,0,0.008) 0px 0px 8px`): Subtle ambient glow for floating elements.
## 3. Typography Rules
### Font Family
- **Display/Headlines**: `CursorGothic`, with fallbacks: `CursorGothic Fallback, system-ui, Helvetica Neue, Helvetica, Arial`
- **Body/Editorial**: `jjannon`, with fallbacks: `Iowan Old Style, Palatino Linotype, URW Palladio L, P052, ui-serif, Georgia, Cambria, Times New Roman, Times`
- **Code/Technical**: `berkeleyMono`, with fallbacks: `ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New`
- **UI/System**: `system-ui`, with fallbacks: `-apple-system, Segoe UI, Helvetica Neue, Arial`
- **Icons**: `CursorIcons16` (icon font at 14px and 12px)
- **OpenType Features**: `"cswh"` on jjannon body text, `"ss09"` on CursorGothic buttons/captions
### Hierarchy
| Role | Font | Size | Weight | Line Height | Letter Spacing | Notes |
|------|------|------|--------|-------------|----------------|-------|
| Display Hero | CursorGothic | 72px (4.50rem) | 400 | 1.10 (tight) | -2.16px | Maximum compression, hero statements |
| Section Heading | CursorGothic | 36px (2.25rem) | 400 | 1.20 (tight) | -0.72px | Feature sections, CTA headlines |
| Sub-heading | CursorGothic | 26px (1.63rem) | 400 | 1.25 (tight) | -0.325px | Card headings, sub-sections |
| Title Small | CursorGothic | 22px (1.38rem) | 400 | 1.30 (tight) | -0.11px | Smaller titles, list headings |
| Body Serif | jjannon | 19.2px (1.20rem) | 500 | 1.50 | normal | Editorial body with `"cswh"` |
| Body Serif SM | jjannon | 17.28px (1.08rem) | 400 | 1.35 | normal | Standard body text, descriptions |
| Body Sans | CursorGothic | 16px (1.00rem) | 400 | 1.50 | normal/0.08px | UI body text |
| Button Label | CursorGothic | 14px (0.88rem) | 400 | 1.00 (tight) | normal | Primary button text |
| Button Caption | CursorGothic | 14px (0.88rem) | 400 | 1.50 | 0.14px | Secondary button with `"ss09"` |
| Caption | CursorGothic | 11px (0.69rem) | 400-500 | 1.50 | normal | Small captions, metadata |
| System Heading | system-ui | 20px (1.25rem) | 700 | 1.55 | normal | System UI headings |
| System Caption | system-ui | 13px (0.81rem) | 500-600 | 1.33 | normal | System UI labels |
| System Micro | system-ui | 11px (0.69rem) | 500 | 1.27 (tight) | 0.048px | Uppercase micro labels |
| Mono Body | berkeleyMono | 12px (0.75rem) | 400 | 1.67 (relaxed) | normal | Code blocks |
| Mono Small | berkeleyMono | 11px (0.69rem) | 400 | 1.33 | -0.275px | Inline code, terminal |
| Lato Heading | Lato | 16px (1.00rem) | 600 | 1.33 | normal | Lato section headings |
| Lato Caption | Lato | 14px (0.88rem) | 400-600 | 1.33 | normal | Lato captions |
| Lato Micro | Lato | 12px (0.75rem) | 400-600 | 1.27 (tight) | 0.053px | Lato small labels |
### Principles
- **Gothic compression for impact**: CursorGothic at display sizes uses -2.16px letter-spacing at 72px, progressively relaxing: -0.72px at 36px, -0.325px at 26px, -0.11px at 22px, normal at 16px and below. The tracking creates a sense of precision engineering.
- **Serif for soul**: jjannon provides literary warmth. The `"cswh"` feature adds contextual swash alternates that give body text a calligraphic quality.
- **Three typographic voices**: Gothic (display/UI), serif (editorial/body), mono (code/technical). Each serves a distinct communication purpose.
- **Weight restraint**: CursorGothic uses weight 400 almost exclusively, relying on size and tracking for hierarchy rather than weight. System-ui components use 500-700 for functional emphasis.
## 4. Component Stylings
### Buttons
**Primary (Warm Surface)**
- Background: `#ebeae5` (Surface 300)
- Text: `#26251e` (Cursor Dark)
- Padding: 10px 12px 10px 14px
- Radius: 8px
- Outline: none
- Hover: text shifts to `var(--color-error)` (`#cf2d56`)
- Focus shadow: `rgba(0,0,0,0.1) 0px 4px 12px`
- Use: Primary actions, main CTAs
**Secondary Pill**
- Background: `#e6e5e0` (Surface 400)
- Text: `oklab(0.263 / 0.6)` (60% warm brown)
- Padding: 3px 8px
- Radius: full pill (33.5M px)
- Hover: text shifts to `var(--color-error)`
- Use: Tags, filters, secondary actions
**Tertiary Pill**
- Background: `#e1e0db` (Surface 500)
- Text: `oklab(0.263 / 0.6)` (60% warm brown)
- Radius: full pill
- Use: Active filter state, selected tags
**Ghost (Transparent)**
- Background: `rgba(38, 37, 30, 0.06)` (6% warm brown)
- Text: `rgba(38, 37, 30, 0.55)` (55% warm brown)
- Padding: 6px 12px
- Use: Tertiary actions, dismiss buttons
**Light Surface**
- Background: `#f7f7f4` (Surface 100) or `#f2f1ed` (Surface 200)
- Text: `#26251e` or `oklab(0.263 / 0.9)` (90%)
- Padding: 0px 8px 1px 12px
- Use: Dropdown triggers, subtle interactive elements
### Cards & Containers
- Background: `#ffffff` for any text-bearing card or panel
- Border: `1px solid oklab(0.263 / 0.1)` (warm brown at 10%)
- Radius: 8px (standard), 4px (compact), 10px (featured)
- Shadow: `rgba(0,0,0,0.14) 0px 28px 70px, rgba(0,0,0,0.1) 0px 14px 32px` for elevated cards
- Hover: shadow intensification
### Inputs & Forms
- Background: transparent or surface
- Text: `#26251e`
- Padding: 8px 8px 6px (textarea)
- Border: `1px solid oklab(0.263 / 0.1)`
- Focus: border shifts to `oklab(0.263 / 0.2)` or accent orange
### Navigation
- Clean horizontal nav on warm cream background
- Cursor logotype left-aligned (~96x24px)
- Links: 14px CursorGothic or system-ui, weight 500
- CTA button: warm surface with Cursor Dark text
- Tab navigation: bottom border `1px solid oklab(0.263 / 0.1)` with active tab differentiation
### Image Treatment
- Code editor screenshots with `1px solid oklab(0.263 / 0.1)` border
- Rounded corners: 8px standard
- AI chat/timeline screenshots dominate feature sections
- Warm gradient or solid cream backgrounds behind hero images
### Distinctive Components
**AI Timeline**
- Vertical timeline showing AI operations: thinking (peach), grep (sage), read (blue), edit (lavender)
- Each step uses its semantic color with matching text
- Connected with vertical lines
- Core visual metaphor for Cursor's AI-first coding experience
**Code Editor Previews**
- Dark code editor screenshots with warm cream border frame
- berkeleyMono for code text
- Syntax highlighting using timeline colors
**Pricing Cards**
- Warm surface backgrounds with bordered containers
- Feature lists using jjannon serif for readability
- CTA buttons with accent orange or primary dark styling
## 5. Layout Principles
### Spacing System
- Base unit: 8px
- Fine scale: 1.5px, 2px, 2.5px, 3px, 4px, 5px, 6px (sub-8px for micro-adjustments)
- Standard scale: 8px, 10px, 12px, 14px (derived from extraction)
- Extended scale (inferred): 16px, 24px, 32px, 48px, 64px, 96px
- Notable: fine-grained sub-8px increments for precise icon/text alignment
### Grid & Container
- Max content width: approximately 1200px
- Hero: centered single-column with generous top padding (80-120px)
- Feature sections: 2-3 column grids for cards and features
- Full-width sections with warm cream or slightly darker backgrounds
- Sidebar layouts for documentation and settings pages
### Whitespace Philosophy
- **Warm negative space**: The cream background means whitespace has warmth and texture, unlike cold white minimalism. Large empty areas feel cozy rather than clinical.
- **Compressed text, open layout**: Aggressive negative letter-spacing on CursorGothic headlines is balanced by generous surrounding margins. Text is dense; space around it breathes.
- **Section variation**: Alternating surface tones (cream → lighter cream → cream) create subtle section differentiation without harsh boundaries.
### Border Radius Scale
- Micro (1.5px): Fine detail elements
- Small (2px): Inline elements, code spans
- Medium (3px): Small containers, inline badges
- Standard (4px): Cards, images, compact buttons
- Comfortable (8px): Primary buttons, cards, menus
- Featured (10px): Larger containers, featured cards
- Full Pill (33.5M px / 9999px): Pill buttons, tags, badges
## 6. Depth & Elevation
| Level | Treatment | Use |
|-------|-----------|-----|
| Flat (Level 0) | No shadow | Page background, text blocks |
| Border Ring (Level 1) | `oklab(0.263 / 0.1) 0px 0px 0px 1px` | Standard card/container border (warm oklab) |
| Border Medium (Level 1b) | `oklab(0.263 / 0.2) 0px 0px 0px 1px` | Emphasized borders, active states |
| Ambient (Level 2) | `rgba(0,0,0,0.02) 0px 0px 16px, rgba(0,0,0,0.008) 0px 0px 8px` | Floating elements, subtle glow |
| Elevated Card (Level 3) | `rgba(0,0,0,0.14) 0px 28px 70px, rgba(0,0,0,0.1) 0px 14px 32px, oklab ring` | Modals, popovers, elevated cards |
| Focus | `rgba(0,0,0,0.1) 0px 4px 12px` on button focus | Interactive focus feedback |
**Shadow Philosophy**: Cursor's depth system is built around two ideas. First, borders use perceptually uniform oklab color space rather than rgba, ensuring warm brown borders look consistent across different background tones. Second, elevation shadows use dramatically large blur values (28px, 70px) with moderate opacity (0.14, 0.1), creating a diffused, atmospheric lift rather than hard-edged drop shadows. Cards don't feel like they float above the page -- they feel like the page has gently opened a space for them.
### Decorative Depth
- Warm cream surface variations create subtle tonal depth without shadows
- oklab borders at 10% and 20% create a spectrum of edge definition
- No harsh divider lines -- section separation through background tone shifts and spacing
## 7. Interaction & Motion
### Hover States
- Buttons: text color shifts to `--color-error` (`#cf2d56`) on hover -- a distinctive warm crimson that signals interactivity
- Links: color shift to accent orange (`#f54e00`) or underline decoration with `rgba(38, 37, 30, 0.4)`
- Cards: shadow intensification on hover (ambient → elevated)
### Focus States
- Shadow-based focus: `rgba(0,0,0,0.1) 0px 4px 12px` for depth-based focus indication
- Border focus: `oklab(0.263 / 0.2)` (20% border) for input/form focus
- Consistent warm tone in all focus states -- no cold blue focus rings
### Transitions
- Color transitions: 150ms ease for text/background color changes
- Shadow transitions: 200ms ease for elevation changes
- Transform: subtle scale or translate for interactive feedback
## 8. Responsive Behavior
### Breakpoints
| Name | Width | Key Changes |
|------|-------|-------------|
| Mobile | <600px | Single column, reduced padding, stacked navigation |
| Tablet Small | 600-768px | 2-column grids begin |
| Tablet | 768-900px | Expanded card grids, sidebar appears |
| Desktop Small | 900-1279px | Full layout forming |
| Desktop | >1279px | Full layout, maximum content width |
### Touch Targets
- Buttons use comfortable padding (6px-14px vertical, 8px-14px horizontal)
- Pill buttons maintain tap-friendly sizing with 3px-10px padding
- Navigation links at 14px with adequate spacing for touch
### Collapsing Strategy
- Hero: 72px CursorGothic → 36px → 26px on smaller screens, maintaining proportional letter-spacing
- Navigation: horizontal links → hamburger menu on mobile
- Feature cards: 3-column → 2-column → single column stacked
- Code editor screenshots: maintain aspect ratio, may shrink with border treatment preserved
- Timeline visualization: horizontal → vertical stacking
- Section spacing: 80px+ → 48px → 32px on mobile
### Image Behavior
- Editor screenshots maintain warm border treatment at all sizes
- AI timeline adapts from horizontal to vertical layout
- Product screenshots use responsive images with consistent border radius
- Full-width hero images scale proportionally
## 9. Agent Prompt Guide
### Quick Color Reference
- Primary CTA background: `#ebeae5` (warm cream button)
- Page background: `#f2f1ed` (warm off-white)
- Text color: `#26251e` (warm near-black)
- Secondary text: `rgba(38, 37, 30, 0.55)` (55% warm brown)
- Accent: `#f54e00` (orange)
- Error/hover: `#cf2d56` (warm crimson)
- Success: `#1f8a65` (muted teal)
- Border: `oklab(0.263084 -0.00230259 0.0124794 / 0.1)` or `rgba(38, 37, 30, 0.1)` as fallback
### Example Component Prompts
- "Create a hero section on `#f2f1ed` warm cream background. Headline at 72px CursorGothic weight 400, line-height 1.10, letter-spacing -2.16px, color `#26251e`. Subtitle at 17.28px jjannon weight 400, line-height 1.35, color `rgba(38,37,30,0.55)`. Primary CTA button (`#ebeae5` bg, 8px radius, 10px 14px padding) with hover text shift to `#cf2d56`."
- "Design a card: `#e6e5e0` background, border `1px solid rgba(38,37,30,0.1)`. Radius 8px. Title at 22px CursorGothic weight 400, letter-spacing -0.11px. Body at 17.28px jjannon weight 400, color `rgba(38,37,30,0.55)`. Use `#f54e00` for link accents."
- "Build a pill tag: `#e6e5e0` background, `rgba(38,37,30,0.6)` text, full-pill radius (9999px), 3px 8px padding, 14px CursorGothic weight 400."
- "Create navigation: sticky `#f2f1ed` background with backdrop-filter blur. 14px system-ui weight 500 for links, `#26251e` text. CTA button right-aligned with `#ebeae5` bg and 8px radius. Bottom border `1px solid rgba(38,37,30,0.1)`."
- "Design an AI timeline showing four steps: Thinking (`#dfa88f`), Grep (`#9fc9a2`), Read (`#9fbbe0`), Edit (`#c0a8dd`). Each step: 14px system-ui label + 16px CursorGothic description + vertical connecting line in `rgba(38,37,30,0.1)`."
### Iteration Guide
1. Always use warm tones -- `#f2f1ed` background, `#26251e` text, never pure white/black for primary surfaces
2. Letter-spacing scales with font size for CursorGothic: -2.16px at 72px, -0.72px at 36px, -0.325px at 26px, normal at 16px
3. Use `rgba(38, 37, 30, alpha)` as a CSS-compatible fallback for oklab borders
4. Three fonts, three voices: CursorGothic (display/UI), jjannon (editorial), berkeleyMono (code)
5. Pill shapes (9999px radius) for tags and filters; 8px radius for primary buttons and cards
6. Hover states use `#cf2d56` text color -- the warm crimson shift is a signature interaction
7. Shadows use large blur values (28px, 70px) for diffused atmospheric depth
8. The sub-8px spacing scale (1.5, 2, 2.5, 3, 4, 5, 6px) is critical for icon/text micro-alignment
+673 -72
View File
@@ -1,146 +1,747 @@
:root {
--web2-blue: #1d9bf0;
--web2-slate: #0f172a;
--web2-muted: #64748b;
--web2-card: #ffffff;
--web2-border: #e5e7eb;
--theme_text_primary: #0f1d33;
--theme_text_weak: rgba(15, 29, 51, 0.67);
--theme_text_inverse: #f6faff;
--theme_text_inverse_muted: rgba(246, 250, 255, 0.77);
--theme_text_secondary_active: rgba(13, 24, 42, 0.84);
--theme_text_button_spotlight: #f7fbff;
--theme_text_success: #0f6a35;
--theme_text_placeholder: rgba(15, 29, 51, 0.47);
--theme_accent_blue: #195fc8;
--theme_accent_blue_hover: #144ea7;
--theme_accent_blue_soft: #e8f1ff;
--theme_accent_blue_border: rgba(25, 95, 200, 0.44);
--theme_surface_primary: #ffffff;
--theme_surface_subtle: #f3f8ff;
--theme_surface_shell: #f7fbff;
--theme_surface_section: #ffffff;
--theme_surface_raised: #ffffff;
--theme_surface_code: #ecf3ff;
--theme_surface_table_head: rgba(233, 242, 255, 0.92);
--theme_surface_chart_tooltip: rgba(12, 21, 37, 0.96);
--theme_border: #d5e0ee;
--theme_border_medium: #bccde4;
--theme_border_strong: #4d6281;
--theme_border_focus: rgba(25, 95, 200, 0.78);
--theme_shadow_card: rgba(4, 16, 34, 0.08) 0 8px 22px, rgba(25, 95, 200, 0.09) 0 1px 3px;
--theme_shadow_ambient: rgba(19, 68, 142, 0.08) 0 18px 42px -24px;
--theme_shadow_soft: var(--theme_shadow_ambient);
--theme_shadow_button: rgba(25, 95, 200, 0.26) 0 2px 6px;
--theme_shadow_button_ring: 0 0 0 2px rgba(25, 95, 200, 0.28);
--theme_shadow_table_inset: inset rgba(25, 95, 200, 0.14) 0 0 0 1px;
--theme_shadow_tooltip: rgba(5, 14, 26, 0.28) 0 14px 32px;
--theme_gradient_hero_primary: radial-gradient(circle at 8% 4%, rgba(25, 95, 200, 0.09) 0, rgba(25, 95, 200, 0) 31%);
--theme_gradient_hero_secondary: radial-gradient(circle at 90% 12%, rgba(20, 78, 167, 0.08) 0, rgba(20, 78, 167, 0) 29%);
--theme_gradient_card_accent: linear-gradient(180deg, #195fc8 0%, #144ea7 100%);
--theme_focus_outline: 2px solid rgba(25, 95, 200, 0.58);
--theme_radius_button: 12px;
--theme_radius_card: 16px;
--theme_radius_section: 24px;
--theme_radius_large: 30px;
--theme_radius_code: 8px;
--theme_radius_pill: 9999px;
--theme_font_body: "jjannon", "Iowan Old Style", "Palatino Linotype", "URW Palladio L", "P052", ui-serif, Georgia, Cambria, "Times New Roman", Times, serif;
--theme_font_display: "CursorGothic", "CursorGothic Fallback", system-ui, "Helvetica Neue", Helvetica, Arial, sans-serif;
--theme_font_code: "berkeleyMono", ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
--theme_font_ui: system-ui, -apple-system, "Segoe UI", "Helvetica Neue", Arial, sans-serif;
--theme_letter_body: 0.012em;
--theme_letter_caption: 0.085em;
--theme_letter_button: 0.01em;
--theme_transition_fast: 150ms ease;
--theme_transition_base: 200ms ease;
--web2-blue: var(--theme_accent_blue);
--web2-slate: var(--theme_text_primary);
--web2-muted: var(--theme_text_weak);
--web2-card: var(--theme_surface_primary);
--web2-border: var(--theme_border);
}
*,
*::before,
*::after {
box-sizing: border-box;
}
html,
body {
font-family: "Segoe UI", "Helvetica Neue", Arial, sans-serif;
color: var(--web2-slate);
margin: 0;
padding: 0;
}
body {
font-family: var(--theme_font_body);
color: var(--theme_text_primary);
letter-spacing: var(--theme_letter_body);
background: var(--theme_surface_shell);
}
.web2-bg {
background: #ffffff;
background:
var(--theme_gradient_hero_primary),
var(--theme_gradient_hero_secondary),
var(--theme_surface_shell);
}
.web2-shell {
max-width: 1100px;
max-width: 1140px;
margin: 0 auto;
padding: 2rem 1.5rem 4rem;
padding: 2.5rem 1.5rem 2.25rem;
}
.web2-shell-wide {
max-width: min(1760px, calc(100vw - 2rem));
}
.web2-page-head {
display: flex;
flex-direction: column;
gap: 1rem;
}
.web2-page-head-row {
display: flex;
flex-wrap: wrap;
align-items: flex-start;
justify-content: space-between;
gap: 1rem;
}
.web2-head-copy {
flex: 1 1 740px;
min-width: 0;
max-width: 72ch;
}
.web2-page-title {
margin-top: 0.6rem;
font-family: var(--theme_font_display);
font-size: clamp(1.7rem, 1.1rem + 1.6vw, 2.35rem);
line-height: 1.15;
letter-spacing: -0.325px;
overflow-wrap: anywhere;
word-break: break-word;
hyphens: auto;
}
.web2-page-subtitle {
margin-top: 0.45rem;
font-size: 1.08rem;
line-height: 1.45;
color: var(--theme_text_weak);
}
.web2-actions {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: flex-end;
flex: 0 0 auto;
gap: 0.5rem;
}
.web2-section-head {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between;
gap: 0.75rem;
margin-bottom: 1rem;
}
.web2-kpi-grid {
display: grid;
gap: 1rem;
grid-template-columns: repeat(1, minmax(0, 1fr));
}
.web2-kpi-label {
font-size: 0.7rem;
font-family: var(--theme_font_ui);
font-weight: 500;
text-transform: uppercase;
letter-spacing: 0.16em;
color: var(--theme_text_weak);
}
.web2-kpi-value {
margin-top: 0.55rem;
font-size: 1.2rem;
font-family: var(--theme_font_display);
font-weight: 400;
line-height: 1.2;
color: var(--theme_text_primary);
letter-spacing: -0.11px;
}
.web2-kpi-value-mono {
font-family: var(--theme_font_code);
font-size: 0.98rem;
font-weight: 400;
letter-spacing: -0.015em;
}
.web2-kpi-truncate {
display: block;
max-width: 100%;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.web2-card-overview {
background: var(--theme_surface_primary);
}
.web2-card-featured {
background: var(--theme_surface_primary);
border-color: var(--theme_border_medium);
box-shadow: var(--theme_shadow_card), var(--theme_shadow_soft);
position: relative;
}
.web2-card-featured::before {
content: "";
position: absolute;
left: 0;
top: 0;
bottom: 0;
width: 3px;
background: var(--theme_gradient_card_accent);
border-top-left-radius: var(--theme_radius_card);
border-bottom-left-radius: var(--theme_radius_card);
}
.web2-index-sections {
display: grid;
gap: 1.35rem;
grid-template-columns: minmax(0, 1fr);
}
.web2-index-overview {
box-shadow: var(--theme_shadow_soft);
}
.web2-index-featured {
margin-top: 0.45rem;
border-width: 1px;
border-color: var(--theme_border_medium);
box-shadow: var(--theme_shadow_card), var(--theme_shadow_soft);
}
.web2-index-wide {
grid-column: 1 / -1;
}
.web2-note {
padding: 0.78rem 0.92rem;
border-radius: var(--theme_radius_card);
border: 1px solid var(--theme_border_medium);
background: var(--theme_surface_subtle);
color: var(--theme_text_secondary_active);
font-family: var(--theme_font_ui);
font-size: 0.86rem;
line-height: 1.45;
}
.web2-header {
background: var(--web2-card);
border: 1px solid var(--web2-border);
border-radius: 4px;
padding: 1.5rem 2rem;
background: var(--theme_surface_section);
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_section);
padding: 1.65rem 2rem;
box-shadow: var(--theme_shadow_card), var(--theme_shadow_soft);
}
.web2-card {
background: var(--web2-card);
border: 1px solid var(--web2-border);
border-radius: 4px;
padding: 1.5rem 1.75rem;
background: var(--theme_surface_primary);
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_card);
padding: 1.4rem 1.6rem;
box-shadow: var(--theme_shadow_card), var(--theme_shadow_soft);
}
.web2-card h2,
.web2-section-title {
position: relative;
margin: 0;
padding-left: 0.9rem;
font-size: 1.36rem;
font-family: var(--theme_font_display);
font-weight: 400;
letter-spacing: -0.11px;
color: var(--theme_text_primary);
}
.web2-card h2::before,
.web2-section-title::before {
content: "";
position: absolute;
left: 0;
top: 50%;
transform: translateY(-50%);
width: 4px;
height: 72%;
background: var(--theme_accent_blue);
border-radius: 999px;
box-shadow: none;
}
.web2-pill {
display: inline-flex;
align-items: center;
gap: 0.4rem;
background: #f8fafc;
border: 1px solid var(--web2-border);
color: var(--web2-muted);
padding: 0.2rem 0.6rem;
border-radius: 3px;
font-size: 0.85rem;
letter-spacing: 0.02em;
background: var(--theme_surface_subtle);
border: 1px solid var(--theme_border);
color: var(--theme_text_weak);
padding: 0.22rem 0.78rem;
border-radius: var(--theme_radius_pill);
font-family: var(--theme_font_display);
font-size: 0.76rem;
font-weight: 400;
letter-spacing: 0;
}
.web2-code {
font-family: var(--theme_font_code);
background: var(--theme_surface_code);
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_code);
padding: 0.12rem 0.42rem;
font-size: 0.84em;
color: var(--theme_text_primary);
}
.web2-paragraphs p + p {
margin-top: 0.85rem;
}
.web2-link {
color: var(--web2-blue);
color: var(--theme_accent_blue);
text-decoration: none;
font-weight: 600;
font-weight: 500;
transition: color var(--theme_transition_fast), text-decoration-color var(--theme_transition_fast);
}
.web2-link:hover {
color: var(--theme_accent_blue_hover);
text-decoration: underline;
text-decoration-color: var(--theme_border_medium);
}
.web2-button {
background: var(--web2-blue);
color: #fff;
padding: 0.45rem 0.9rem;
border-radius: 3px;
border: 1px solid #1482d0;
box-shadow: none;
font-weight: 600;
background: var(--theme_accent_blue);
color: var(--theme_text_button_spotlight);
padding: 0.62rem 0.9rem 0.6rem 1rem;
border-radius: var(--theme_radius_button);
border: 1px solid var(--theme_accent_blue_border);
box-shadow: var(--theme_shadow_button);
font-family: var(--theme_font_display);
font-weight: 400;
letter-spacing: var(--theme_letter_button);
text-decoration: none;
transition: transform var(--theme_transition_fast), background var(--theme_transition_fast), border-color var(--theme_transition_fast), box-shadow var(--theme_transition_fast);
}
.web2-button:hover {
background: #1787d4;
background: var(--theme_accent_blue_hover);
color: var(--theme_text_button_spotlight);
border-color: var(--theme_accent_blue_border);
box-shadow: var(--theme_shadow_button);
transform: translateY(-1px);
}
.web2-button.secondary {
background: var(--theme_surface_raised);
color: var(--theme_text_secondary_active);
border-color: var(--theme_border);
box-shadow: none;
border-radius: var(--theme_radius_pill);
}
.web2-button.secondary:hover {
background: var(--theme_surface_subtle);
color: var(--theme_text_primary);
border-color: var(--theme_border_medium);
transform: none;
}
.web2-button-group {
display: flex;
flex-wrap: wrap;
}
.web2-button-group .web2-button {
margin: 0 0.5rem 0.5rem 0;
}
.web3-button {
background: #f3f4f6;
color: #0f172a;
padding: 0.5rem 1rem;
border-radius: 6px;
border: 1px solid #e5e7eb;
background: var(--theme_surface_primary);
color: var(--theme_text_primary);
padding: 0.52rem 1.02rem;
border-radius: var(--theme_radius_button);
border: 1px solid var(--theme_border);
text-decoration: none;
font-weight: 600;
transition: background 0.15s ease, border-color 0.15s ease, color 0.15s ease, box-shadow 0.15s ease;
font-family: var(--theme_font_display);
font-weight: 400;
letter-spacing: var(--theme_letter_button);
transition: background var(--theme_transition_fast), border-color var(--theme_transition_fast), color var(--theme_transition_fast), box-shadow var(--theme_transition_fast);
display: inline-flex;
align-items: center;
gap: 0.35rem;
}
.web3-button:hover {
background: #e2e8f0;
border-color: #cbd5e1;
background: var(--theme_accent_blue_soft);
border-color: var(--theme_border_medium);
color: var(--theme_accent_blue_hover);
}
.web3-button.active {
background: #dbeafe;
border-color: #93c5fd;
color: #1d4ed8;
box-shadow: 0 0 0 2px rgba(147, 197, 253, 0.35);
background: var(--theme_accent_blue_soft);
border-color: var(--theme_accent_blue_border);
color: var(--theme_accent_blue);
box-shadow: var(--theme_shadow_button_ring);
}
.web3-button-group {
display: flex;
gap: 0.75rem;
flex-wrap: wrap;
margin-top: 4px;
}
.web2-list li {
background: #ffffff;
border: 1px solid var(--web2-border);
border-radius: 3px;
padding: 0.75rem 1rem;
box-shadow: none;
.web2-table-shell {
overflow-x: auto;
overflow-y: hidden;
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_card);
background: var(--theme_surface_primary);
box-shadow: var(--theme_shadow_table_inset);
}
.web2-list li {
background: var(--theme_surface_primary);
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_card);
padding: 0.75rem 1rem;
box-shadow: var(--theme_shadow_card);
}
.web2-table {
width: 100%;
border-collapse: collapse;
font-size: 0.95rem;
letter-spacing: 0.08px;
min-width: 560px;
}
.web2-table thead th {
text-align: left;
padding: 0.75rem 0.5rem;
font-weight: 700;
color: var(--web2-muted);
border-bottom: 1px solid var(--web2-border);
padding: 0.8rem 0.55rem;
font-family: var(--theme_font_ui);
font-weight: 600;
color: var(--theme_text_weak);
letter-spacing: var(--theme_letter_caption);
border-bottom: 1px solid var(--theme_border);
background: var(--theme_surface_table_head);
}
.web2-table tbody td {
padding: 0.9rem 0.5rem;
border-bottom: 1px solid var(--web2-border);
padding: 0.92rem 0.55rem;
border-bottom: 1px solid var(--theme_border);
}
.web2-table tbody tr:nth-child(odd) {
background: #f8fafc;
background: var(--theme_surface_primary);
}
.web2-table tbody tr:nth-child(even) {
background: #ffffff;
background: var(--theme_surface_subtle);
}
.web2-group-row td {
background: #e8eef5;
color: #0f172a;
border-bottom: 1px solid var(--web2-border);
padding: 0.65rem 0.5rem;
background: var(--theme_surface_subtle);
color: var(--theme_text_primary);
border-bottom: 1px solid var(--theme_border);
padding: 0.7rem 0.55rem;
}
.web2-badge {
display: inline-flex;
align-items: center;
gap: 0.25rem;
border: 1px solid var(--web2-border);
padding: 0.15rem 0.45rem;
border-radius: 3px;
font-size: 0.8rem;
color: var(--web2-muted);
background: #f8fafc;
border: 1px solid var(--theme_border);
padding: 0.16rem 0.5rem;
border-radius: var(--theme_radius_pill);
font-family: var(--theme_font_display);
font-size: 0.74rem;
letter-spacing: var(--theme_letter_caption);
color: var(--theme_text_weak);
background: var(--theme_surface_raised);
}
.web2-form-grid {
display: grid;
gap: 0.75rem;
grid-template-columns: repeat(1, minmax(0, 1fr));
}
.web2-field {
display: flex;
flex-direction: column;
gap: 0.32rem;
}
.web2-label {
font-size: 0.82rem;
font-weight: 600;
letter-spacing: 0.1px;
color: var(--theme_text_weak);
}
.web2-input {
width: 100%;
padding: 0.62rem 0.72rem;
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_button);
background: var(--theme_surface_primary);
color: var(--theme_text_primary);
font: inherit;
letter-spacing: var(--theme_letter_body);
transition: border-color var(--theme_transition_fast), box-shadow var(--theme_transition_fast), background var(--theme_transition_fast);
}
.web2-input::placeholder {
color: var(--theme_text_placeholder);
}
.web2-input:hover {
border-color: var(--theme_border_medium);
}
.web2-input:focus-visible {
outline: none;
border-color: var(--theme_border_focus);
box-shadow: var(--theme_shadow_button_ring);
}
.web2-form-actions {
display: flex;
flex-wrap: wrap;
gap: 0.5rem;
}
.web2-form-actions-full {
grid-column: 1 / -1;
}
.web2-subcard {
padding: 0.95rem 1rem;
border-radius: var(--theme_radius_card);
border: 1px solid var(--theme_border);
background: var(--theme_surface_raised);
}
.web2-subcard-label {
font-size: 0.72rem;
font-family: var(--theme_font_ui);
font-weight: 600;
text-transform: uppercase;
letter-spacing: 0.2em;
color: var(--theme_text_weak);
}
.web2-subcard-value {
margin-top: 0.45rem;
font-size: 1.08rem;
font-family: var(--theme_font_display);
font-weight: 400;
color: var(--theme_text_primary);
}
.web2-muted {
color: var(--theme_text_weak);
}
.web2-caption {
font-size: 0.76rem;
letter-spacing: var(--theme_letter_caption);
}
.web2-details-summary {
cursor: pointer;
font-size: 0.86rem;
font-family: var(--theme_font_ui);
font-weight: 600;
color: var(--theme_text_secondary_active);
}
.web2-card-grid {
display: grid;
gap: 1.4rem;
}
.web3-chart-frame {
position: relative;
min-width: 760px;
width: 100%;
}
.web3-chart-canvas {
display: block;
width: 100%;
height: 360px;
background: var(--theme_surface_primary);
border: 1px solid var(--theme_border);
border-radius: var(--theme_radius_card);
}
.web3-chart-tooltip {
position: absolute;
left: 0;
top: 0;
opacity: 0;
pointer-events: none;
background: var(--theme_surface_chart_tooltip);
color: var(--theme_text_inverse);
padding: 0.55rem 0.65rem;
border-radius: var(--theme_radius_button);
font-size: 0.75rem;
line-height: 1.35;
min-width: 170px;
box-shadow: var(--theme_shadow_tooltip);
z-index: 20;
transition: opacity 80ms linear;
}
.web3-chart-tooltip.visible {
opacity: 1;
}
.web3-chart-tooltip-title {
font-weight: 700;
color: var(--theme_text_inverse);
margin-bottom: 0.35rem;
}
.web3-chart-tooltip-row {
display: flex;
align-items: center;
justify-content: space-between;
gap: 0.65rem;
}
.web3-chart-tooltip-label {
display: inline-flex;
align-items: center;
color: var(--theme_text_inverse_muted);
}
.web3-chart-tooltip-value {
font-weight: 700;
color: var(--theme_text_inverse);
}
.web3-chart-tooltip-swatch {
display: inline-block;
width: 8px;
height: 8px;
border-radius: 999px;
margin-right: 0.35rem;
}
.web2-footer {
width: 100%;
padding: 0.65rem 1.5rem 1.25rem;
}
.web2-footer-inner {
max-width: 1140px;
margin: 0 auto;
border-top: 1px solid var(--theme_border);
padding-top: 0.75rem;
text-align: center;
font-size: 0.74rem;
font-style: normal;
letter-spacing: var(--theme_letter_caption);
color: var(--theme_text_weak);
}
a:focus-visible,
button:focus-visible,
summary:focus-visible,
.web2-button:focus-visible,
.web3-button:focus-visible,
.web2-link:focus-visible {
outline: var(--theme_focus_outline);
outline-offset: 2px;
}
@media (max-width: 900px) {
.web2-shell {
padding: 1.5rem 1rem 1.25rem;
}
.web2-header {
border-radius: var(--theme_radius_card);
padding: 1.2rem 1rem;
}
.web2-card {
padding: 1.1rem 1rem;
}
.web3-chart-frame {
min-width: 640px;
}
.web2-footer {
padding: 0.5rem 1rem 1rem;
}
.web2-page-title {
font-size: 1.82rem;
}
.web2-page-subtitle {
font-size: 0.9rem;
}
.web2-index-sections {
gap: 1.15rem;
}
.web2-index-featured {
margin-top: 0.75rem;
}
.web2-actions {
width: 100%;
}
.web2-actions .web2-button {
flex: 1 1 auto;
text-align: center;
}
.web2-table {
min-width: 520px;
}
}
@media (min-width: 1500px) {
.web2-shell {
padding-left: 1rem;
padding-right: 1rem;
}
.web2-shell-wide {
max-width: min(1860px, calc(100vw - 1.25rem));
}
}
@media (min-width: 780px) {
.web2-kpi-grid {
grid-template-columns: repeat(3, minmax(0, 1fr));
}
.web2-form-grid {
grid-template-columns: repeat(3, minmax(0, 1fr));
}
}
@media (min-width: 1024px) {
.web2-index-sections {
grid-template-columns: minmax(0, 4fr) minmax(0, 8fr);
gap: 1.5rem;
}
.web2-index-featured {
margin-top: 0;
}
}
+521
View File
@@ -0,0 +1,521 @@
(function () {
"use strict";
function clamp(value, min, max) {
if (value < min) {
return min;
}
if (value > max) {
return max;
}
return value;
}
function toNumber(value) {
var num = Number(value);
return Number.isFinite(num) ? num : null;
}
function escapeHTML(value) {
return String(value)
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;")
.replace(/'/g, "&#39;");
}
function formatValue(value, format) {
if (value === null || value === undefined || Number.isNaN(value)) {
return "-";
}
switch (format) {
case "int":
return String(Math.round(value));
case "float1":
return Number(value).toFixed(1);
case "float2":
return Number(value).toFixed(2);
default:
return String(value);
}
}
function pickTickIndices(total, desired) {
if (total <= 0) {
return [];
}
if (total === 1) {
return [0];
}
var target = Math.max(2, Math.min(total, desired || 6));
if (target >= total) {
var all = [];
for (var i = 0; i < total; i++) {
all.push(i);
}
return all;
}
var indices = [0];
var step = (total - 1) / (target - 1);
for (var j = 1; j < target - 1; j++) {
indices.push(Math.round(j * step));
}
indices.push(total - 1);
var seen = {};
var deduped = [];
for (var k = 0; k < indices.length; k++) {
var idx = indices[k];
if (!seen[idx]) {
seen[idx] = true;
deduped.push(idx);
}
}
deduped.sort(function (a, b) {
return a - b;
});
return deduped;
}
function getPlotBounds(width, height) {
return {
left: 52,
top: 16,
right: width - 20,
bottom: height - 78,
};
}
function buildScales(config, plot) {
var maxY = 0;
for (var i = 0; i < config.series.length; i++) {
var values = config.series[i].values || [];
for (var j = 0; j < values.length; j++) {
var value = toNumber(values[j]);
if (value !== null && value > maxY) {
maxY = value;
}
}
}
if (maxY <= 0) {
maxY = 1;
}
var count = config.labels.length;
var xSpan = plot.right - plot.left;
var ySpan = plot.bottom - plot.top;
return {
maxY: maxY,
xForIndex: function (index) {
if (count <= 1) {
return plot.left;
}
return plot.left + (index / (count - 1)) * xSpan;
},
yForValue: function (value) {
var numeric = toNumber(value);
if (numeric === null) {
return null;
}
return plot.bottom - (numeric / maxY) * ySpan;
},
};
}
function drawGrid(ctx, plot, config, scales) {
ctx.save();
ctx.strokeStyle = "#e2e8f0";
ctx.lineWidth = 1;
ctx.setLineDash([2, 4]);
var yTickCount = Math.max(2, config.yTicks || 5);
for (var i = 0; i < yTickCount; i++) {
var yRatio = i / (yTickCount - 1);
var y = plot.top + yRatio * (plot.bottom - plot.top);
ctx.beginPath();
ctx.moveTo(plot.left, y);
ctx.lineTo(plot.right, y);
ctx.stroke();
}
var xIndices = pickTickIndices(config.labels.length, config.xTicks || 6);
for (var j = 0; j < xIndices.length; j++) {
var x = scales.xForIndex(xIndices[j]);
ctx.beginPath();
ctx.moveTo(x, plot.top);
ctx.lineTo(x, plot.bottom);
ctx.stroke();
}
ctx.restore();
}
function drawAxes(ctx, plot) {
ctx.save();
ctx.strokeStyle = "#94a3b8";
ctx.lineWidth = 1.5;
ctx.setLineDash([]);
ctx.beginPath();
ctx.moveTo(plot.left, plot.bottom);
ctx.lineTo(plot.right, plot.bottom);
ctx.stroke();
ctx.beginPath();
ctx.moveTo(plot.left, plot.top);
ctx.lineTo(plot.left, plot.bottom);
ctx.stroke();
ctx.restore();
}
function drawLabels(ctx, plot, config, scales) {
ctx.save();
ctx.fillStyle = "#475569";
ctx.font = "10px sans-serif";
var yTickCount = Math.max(2, config.yTicks || 5);
for (var i = 0; i < yTickCount; i++) {
var ratio = i / (yTickCount - 1);
var y = plot.top + ratio * (plot.bottom - plot.top);
var value = scales.maxY * (1 - ratio);
ctx.textAlign = "right";
ctx.textBaseline = "middle";
ctx.fillText(formatValue(value, "int"), plot.left - 8, y);
}
var xIndices = pickTickIndices(config.labels.length, config.xTicks || 6);
for (var j = 0; j < xIndices.length; j++) {
var idx = xIndices[j];
var tick = (config.tickLabels && config.tickLabels[idx]) || config.labels[idx] || "";
ctx.textAlign = "center";
ctx.textBaseline = "top";
ctx.fillText(tick, scales.xForIndex(idx), plot.bottom + 12);
}
if (config.yLabel) {
ctx.save();
ctx.translate(16, plot.top + (plot.bottom-plot.top)/2);
ctx.rotate(-Math.PI / 2);
ctx.textAlign = "center";
ctx.textBaseline = "top";
ctx.font = "12px sans-serif";
ctx.fillText(config.yLabel, 0, 0);
ctx.restore();
}
if (config.xLabel) {
ctx.textAlign = "center";
ctx.textBaseline = "top";
ctx.font = "12px sans-serif";
ctx.fillText(config.xLabel, plot.left + (plot.right-plot.left)/2, plot.bottom + 48);
}
ctx.restore();
}
function drawSeries(ctx, plot, config, scales) {
for (var i = 0; i < config.series.length; i++) {
var series = config.series[i];
var values = series.values || [];
if (!values.length) {
continue;
}
ctx.save();
ctx.strokeStyle = series.color || "#2563eb";
ctx.lineWidth = series.lineWidth || 2.5;
ctx.setLineDash(Array.isArray(series.dash) ? series.dash : []);
ctx.beginPath();
var moved = false;
for (var j = 0; j < values.length; j++) {
var y = scales.yForValue(values[j]);
if (y === null) {
continue;
}
var x = scales.xForIndex(j);
if (!moved) {
ctx.moveTo(x, y);
moved = true;
} else {
ctx.lineTo(x, y);
}
}
ctx.stroke();
ctx.restore();
}
}
function drawLegend(ctx, config, width, height) {
var x = 52;
var y = height - 32;
ctx.save();
ctx.font = "12px sans-serif";
ctx.textBaseline = "middle";
for (var i = 0; i < config.series.length; i++) {
var series = config.series[i];
var label = series.name || "Series";
ctx.strokeStyle = series.color || "#2563eb";
ctx.fillStyle = "#475569";
ctx.lineWidth = series.lineWidth || 2.5;
ctx.setLineDash(Array.isArray(series.dash) ? series.dash : []);
ctx.beginPath();
ctx.moveTo(x, y);
ctx.lineTo(x + 16, y);
ctx.stroke();
ctx.setLineDash([]);
ctx.fillText(label, x + 22, y);
x += 22 + ctx.measureText(label).width + 18;
if (x > width - 160) {
x = 52;
y += 18;
}
}
ctx.restore();
}
function updateTooltip(state, config) {
if (!state.tooltip) {
return;
}
if (state.hoverIndex === null) {
state.tooltip.classList.remove("visible");
state.tooltip.setAttribute("aria-hidden", "true");
return;
}
var idx = state.hoverIndex;
var rows = [];
rows.push('<div class="web3-chart-tooltip-title">' + escapeHTML(config.labels[idx] || "") + "</div>");
for (var i = 0; i < config.series.length; i++) {
var series = config.series[i];
if (series.tooltipHidden) {
continue;
}
var values = series.values || [];
var value = toNumber(values[idx]);
var valueLabel = formatValue(value, series.tooltipFormat || "int");
rows.push(
'<div class="web3-chart-tooltip-row">' +
'<span class="web3-chart-tooltip-label"><span class="web3-chart-tooltip-swatch" style="background:' + escapeHTML(series.color || "#2563eb") + '"></span>' +
escapeHTML(series.name || "Series") +
"</span>" +
'<span class="web3-chart-tooltip-value">' + escapeHTML(valueLabel) + "</span>" +
"</div>"
);
}
var hoverRows = config.hoverRows || [];
for (var j = 0; j < hoverRows.length; j++) {
var hover = hoverRows[j];
var values = hover.values || [];
var label = values[idx] || "-";
rows.push(
'<div class="web3-chart-tooltip-row">' +
'<span class="web3-chart-tooltip-label">' + escapeHTML(hover.name || "Value") + "</span>" +
'<span class="web3-chart-tooltip-value">' + escapeHTML(label) + "</span>" +
"</div>"
);
}
state.tooltip.innerHTML = rows.join("");
state.tooltip.classList.add("visible");
state.tooltip.setAttribute("aria-hidden", "false");
var box = state.wrapper.getBoundingClientRect();
var tooltipBox = state.tooltip.getBoundingClientRect();
var left = clamp(state.mouseX + 14, 4, box.width - tooltipBox.width - 4);
var top = clamp(state.mouseY + 14, 4, box.height - tooltipBox.height - 4);
state.tooltip.style.left = left + "px";
state.tooltip.style.top = top + "px";
}
function drawHover(ctx, plot, config, scales, hoverIndex) {
if (hoverIndex === null) {
return;
}
var x = scales.xForIndex(hoverIndex);
ctx.save();
ctx.strokeStyle = "#94a3b8";
ctx.lineWidth = 1;
ctx.setLineDash([3, 4]);
ctx.beginPath();
ctx.moveTo(x, plot.top);
ctx.lineTo(x, plot.bottom);
ctx.stroke();
ctx.setLineDash([]);
for (var i = 0; i < config.series.length; i++) {
var series = config.series[i];
var values = series.values || [];
var y = scales.yForValue(values[hoverIndex]);
if (y === null) {
continue;
}
ctx.fillStyle = "#ffffff";
ctx.strokeStyle = series.color || "#2563eb";
ctx.lineWidth = 1.5;
ctx.beginPath();
ctx.arc(x, y, 3.5, 0, Math.PI * 2);
ctx.fill();
ctx.stroke();
}
ctx.restore();
}
function renderLineChart(options) {
if (!options || !options.canvasId || !options.config) {
return;
}
var canvas = document.getElementById(options.canvasId);
if (!canvas) {
return;
}
var config = options.config;
if (!Array.isArray(config.labels) || config.labels.length === 0 || !Array.isArray(config.series) || config.series.length === 0) {
return;
}
var wrapper = canvas.parentElement;
var tooltip = options.tooltipId ? document.getElementById(options.tooltipId) : null;
var ctx = canvas.getContext("2d");
if (!ctx) {
return;
}
var state = {
canvas: canvas,
wrapper: wrapper,
tooltip: tooltip,
hoverIndex: null,
mouseX: 0,
mouseY: 0,
scales: null,
plot: null,
cssWidth: 0,
cssHeight: config.height || 360,
};
function redraw() {
ctx.setTransform(1, 0, 0, 1, 0, 0);
ctx.clearRect(0, 0, canvas.width, canvas.height);
var dpr = window.devicePixelRatio || 1;
ctx.setTransform(dpr, 0, 0, dpr, 0, 0);
ctx.fillStyle = "#ffffff";
ctx.fillRect(0, 0, state.cssWidth, state.cssHeight);
drawGrid(ctx, state.plot, config, state.scales);
drawAxes(ctx, state.plot);
drawSeries(ctx, state.plot, config, state.scales);
drawLabels(ctx, state.plot, config, state.scales);
drawLegend(ctx, config, state.cssWidth, state.cssHeight);
drawHover(ctx, state.plot, config, state.scales, state.hoverIndex);
updateTooltip(state, config);
}
function resize() {
var rect = canvas.getBoundingClientRect();
var width = Math.max(320, Math.floor(rect.width));
var height = config.height || 360;
var dpr = window.devicePixelRatio || 1;
canvas.width = Math.round(width * dpr);
canvas.height = Math.round(height * dpr);
canvas.style.height = height + "px";
state.cssWidth = width;
state.cssHeight = height;
state.plot = getPlotBounds(width, height);
state.scales = buildScales(config, state.plot);
redraw();
}
canvas.addEventListener("mousemove", function (event) {
var rect = canvas.getBoundingClientRect();
var x = event.clientX - rect.left;
var y = event.clientY - rect.top;
state.mouseX = x;
state.mouseY = y;
if (!state.plot || config.labels.length === 0) {
return;
}
if (x < state.plot.left || x > state.plot.right || y < state.plot.top || y > state.plot.bottom) {
state.hoverIndex = null;
redraw();
return;
}
var ratio = (x - state.plot.left) / (state.plot.right - state.plot.left);
var idx = Math.round(ratio * (config.labels.length - 1));
state.hoverIndex = clamp(idx, 0, config.labels.length - 1);
redraw();
});
canvas.addEventListener("mouseleave", function () {
state.hoverIndex = null;
redraw();
});
window.addEventListener("resize", resize);
if (window.ResizeObserver) {
var observer = new ResizeObserver(function () {
resize();
});
observer.observe(wrapper);
}
resize();
}
function renderFromScript(options) {
if (!options || !options.configId) {
return;
}
var configNode = document.getElementById(options.configId);
if (!configNode) {
return;
}
var payload = configNode.textContent || "";
if (!payload.trim()) {
return;
}
try {
var config = JSON.parse(payload);
renderLineChart({
canvasId: options.canvasId,
tooltipId: options.tooltipId,
config: config,
});
} catch (error) {
// Leave page functional even when chart config is malformed.
}
}
function renderFromDataset(options) {
if (!options || !options.canvasId) {
return;
}
var canvas = document.getElementById(options.canvasId);
if (!canvas) {
return;
}
var payload = canvas.dataset.chartConfig || "";
if (!payload.trim()) {
return;
}
try {
var config = JSON.parse(payload);
renderLineChart({
canvasId: options.canvasId,
tooltipId: options.tooltipId,
config: config,
});
} catch (error) {
// Leave page functional even when chart config is malformed.
}
}
window.Web3Charts = {
renderLineChart: renderLineChart,
renderFromScript: renderFromScript,
renderFromDataset: renderFromDataset,
};
})();
+49 -31
View File
@@ -1,45 +1,60 @@
module vctp
go 1.25.5
go 1.26.2
require (
github.com/a-h/templ v0.3.977
github.com/go-co-op/gocron/v2 v2.19.0
github.com/jackc/pgx/v5 v5.8.0
github.com/a-h/templ v0.3.1001
github.com/go-co-op/gocron/v2 v2.21.0
github.com/go-ldap/ldap/v3 v3.4.13
github.com/jackc/pgx/v5 v5.9.1
github.com/jmoiron/sqlx v1.4.0
github.com/pressly/goose/v3 v3.26.0
github.com/prometheus/client_golang v1.19.0
github.com/pressly/goose/v3 v3.27.0
github.com/prometheus/client_golang v1.23.2
github.com/swaggo/swag v1.16.6
github.com/vmware/govmomi v0.52.0
github.com/xuri/excelize/v2 v2.10.0
gopkg.in/yaml.v2 v2.4.0
modernc.org/sqlite v1.44.0
github.com/vmware/govmomi v0.53.0
github.com/xuri/excelize/v2 v2.10.1
gopkg.in/yaml.v3 v3.0.1
modernc.org/sqlite v1.48.2
)
require (
github.com/Azure/go-ntlmssp v0.1.0 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.2.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/swag v0.19.15 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
github.com/go-openapi/jsonpointer v0.22.5 // indirect
github.com/go-openapi/jsonreference v0.21.5 // indirect
github.com/go-openapi/spec v0.22.4 // indirect
github.com/go-openapi/swag v0.25.5 // indirect
github.com/go-openapi/swag/cmdutils v0.25.5 // indirect
github.com/go-openapi/swag/conv v0.25.5 // indirect
github.com/go-openapi/swag/fileutils v0.25.5 // indirect
github.com/go-openapi/swag/jsonname v0.25.5 // indirect
github.com/go-openapi/swag/jsonutils v0.25.5 // indirect
github.com/go-openapi/swag/loading v0.25.5 // indirect
github.com/go-openapi/swag/mangling v0.25.5 // indirect
github.com/go-openapi/swag/netutils v0.25.5 // indirect
github.com/go-openapi/swag/stringutils v0.25.5 // indirect
github.com/go-openapi/swag/typeutils v0.25.5 // indirect
github.com/go-openapi/swag/yamlutils v0.25.5 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mailru/easyjson v0.9.2 // indirect
github.com/mattn/go-isatty v0.0.21 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncruces/go-strftime v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/procfs v0.20.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/richardlehane/mscfb v1.0.6 // indirect
github.com/richardlehane/msoleps v1.0.6 // indirect
@@ -49,16 +64,19 @@ require (
github.com/xuri/efp v0.0.1 // indirect
github.com/xuri/nfp v0.0.2-0.20250530014748-2ddeb826f9a9 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/tools v0.41.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
modernc.org/libc v1.67.4 // indirect
go.yaml.in/yaml/v2 v2.4.4 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.50.0 // indirect
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect
golang.org/x/mod v0.34.0 // indirect
golang.org/x/net v0.53.0 // indirect
golang.org/x/sync v0.20.0 // indirect
golang.org/x/sys v0.43.0 // indirect
golang.org/x/text v0.36.0 // indirect
golang.org/x/tools v0.43.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
modernc.org/libc v1.72.0 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect
)
+172
View File
@@ -1,35 +1,116 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.2.1 h1:QsZ4TjvwiMpat6gBCBxEQI0rcS9ehtkKtSpiUnd9N28=
github.com/PuerkitoBio/purell v1.2.1/go.mod h1:ZwHcC/82TOaovDi//J/804umJFFmbOHPngi8iYYv/Eo=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/a-h/templ v0.3.1001 h1:yHDTgexACdJttyiyamcTHXr2QkIeVF1MukLy44EAhMY=
github.com/a-h/templ v0.3.1001/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
github.com/go-co-op/gocron/v2 v2.19.1 h1:B4iLeA0NB/2iO3EKQ7NfKn5KsQgZfjb2fkvoZJU3yBI=
github.com/go-co-op/gocron/v2 v2.19.1/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
github.com/go-co-op/gocron/v2 v2.21.0 h1:e1nt9AEFglarRH9/9y9q0V5sblwxlknpHPjttEajrwQ=
github.com/go-co-op/gocron/v2 v2.21.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
github.com/go-ldap/ldap/v3 v3.4.13 h1:+x1nG9h+MZN7h/lUi5Q3UZ0fJ1GyDQYbPvbuH38baDQ=
github.com/go-ldap/ldap/v3 v3.4.13/go.mod h1:LxsGZV6vbaK0sIvYfsv47rfh4ca0JXokCoKjZxsszv0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
github.com/go-openapi/jsonpointer v0.22.5 h1:8on/0Yp4uTb9f4XvTrM2+1CPrV05QPZXu+rvu2o9jcA=
github.com/go-openapi/jsonpointer v0.22.5/go.mod h1:gyUR3sCvGSWchA2sUBJGluYMbe1zazrYWIkWPjjMUY0=
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/jsonreference v0.21.5 h1:6uCGVXU/aNF13AQNggxfysJ+5ZcU4nEAe+pJyVWRdiE=
github.com/go-openapi/jsonreference v0.21.5/go.mod h1:u25Bw85sX4E2jzFodh1FOKMTZLcfifd1Q+iKKOUxExw=
github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
github.com/go-openapi/spec v0.22.3 h1:qRSmj6Smz2rEBxMnLRBMeBWxbbOvuOoElvSvObIgwQc=
github.com/go-openapi/spec v0.22.3/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs=
github.com/go-openapi/spec v0.22.4 h1:4pxGjipMKu0FzFiu/DPwN3CTBRlVM2yLf/YTWorYfDQ=
github.com/go-openapi/spec v0.22.4/go.mod h1:WQ6Ai0VPWMZgMT4XySjlRIE6GP1bGQOtEThn3gcWLtQ=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
github.com/go-openapi/swag v0.25.5 h1:pNkwbUEeGwMtcgxDr+2GBPAk4kT+kJ+AaB+TMKAg+TU=
github.com/go-openapi/swag v0.25.5/go.mod h1:B3RT6l8q7X803JRxa2e59tHOiZlX1t8viplOcs9CwTA=
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
github.com/go-openapi/swag/cmdutils v0.25.5 h1:yh5hHrpgsw4NwM9KAEtaDTXILYzdXh/I8Whhx9hKj7c=
github.com/go-openapi/swag/cmdutils v0.25.5/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
github.com/go-openapi/swag/conv v0.25.5 h1:wAXBYEXJjoKwE5+vc9YHhpQOFj2JYBMF2DUi+tGu97g=
github.com/go-openapi/swag/conv v0.25.5/go.mod h1:CuJ1eWvh1c4ORKx7unQnFGyvBbNlRKbnRyAvDvzWA4k=
github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
github.com/go-openapi/swag/fileutils v0.25.5 h1:B6JTdOcs2c0dBIs9HnkyTW+5gC+8NIhVBUwERkFhMWk=
github.com/go-openapi/swag/fileutils v0.25.5/go.mod h1:V3cT9UdMQIaH4WiTrUc9EPtVA4txS0TOmRURmhGF4kc=
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonname v0.25.5 h1:8p150i44rv/Drip4vWI3kGi9+4W9TdI3US3uUYSFhSo=
github.com/go-openapi/swag/jsonname v0.25.5/go.mod h1:jNqqikyiAK56uS7n8sLkdaNY/uq6+D2m2LANat09pKU=
github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
github.com/go-openapi/swag/jsonutils v0.25.5 h1:XUZF8awQr75MXeC+/iaw5usY/iM7nXPDwdG3Jbl9vYo=
github.com/go-openapi/swag/jsonutils v0.25.5/go.mod h1:48FXUaz8YsDAA9s5AnaUvAmry1UcLcNVWUjY42XkrN4=
github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
github.com/go-openapi/swag/loading v0.25.5 h1:odQ/umlIZ1ZVRteI6ckSrvP6e2w9UTF5qgNdemJHjuU=
github.com/go-openapi/swag/loading v0.25.5/go.mod h1:I8A8RaaQ4DApxhPSWLNYWh9NvmX2YKMoB9nwvv6oW6g=
github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
github.com/go-openapi/swag/mangling v0.25.5 h1:hyrnvbQRS7vKePQPHHDso+k6CGn5ZBs5232UqWZmJZw=
github.com/go-openapi/swag/mangling v0.25.5/go.mod h1:6hadXM/o312N/h98RwByLg088U61TPGiltQn71Iw0NY=
github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
github.com/go-openapi/swag/netutils v0.25.5 h1:LZq2Xc2QI8+7838elRAaPCeqJnHODfSyOa7ZGfxDKlU=
github.com/go-openapi/swag/netutils v0.25.5/go.mod h1:lHbtmj4m57APG/8H7ZcMMSWzNqIQcu0RFiXrPUara14=
github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
github.com/go-openapi/swag/stringutils v0.25.5 h1:NVkoDOA8YBgtAR/zvCx5rhJKtZF3IzXcDdwOsYzrB6M=
github.com/go-openapi/swag/stringutils v0.25.5/go.mod h1:PKK8EZdu4QJq8iezt17HM8RXnLAzY7gW0O1KKarrZII=
github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
github.com/go-openapi/swag/typeutils v0.25.5 h1:EFJ+PCga2HfHGdo8s8VJXEVbeXRCYwzzr9u4rJk7L7E=
github.com/go-openapi/swag/typeutils v0.25.5/go.mod h1:itmFmScAYE1bSD8C4rS0W+0InZUBrB2xSPbWt6DLGuc=
github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
github.com/go-openapi/swag/yamlutils v0.25.5 h1:kASCIS+oIeoc55j28T4o8KwlV2S4ZLPT6G0iq2SSbVQ=
github.com/go-openapi/swag/yamlutils v0.25.5/go.mod h1:Gek1/SjjfbYvM+Iq4QGwa/2lEXde9n2j4a3wI3pNuOQ=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
@@ -47,6 +128,8 @@ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7Ulw
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc=
github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
@@ -68,12 +151,20 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mailru/easyjson v0.9.2 h1:dX8U45hQsZpxd80nLvDGihsQ/OxlvTkVUXH2r/8cb2M=
github.com/mailru/easyjson v0.9.2/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs=
github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
@@ -81,14 +172,26 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM=
github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
github.com/pressly/goose/v3 v3.27.0 h1:/D30gVTuQhu0WsNZYbJi4DMOsx1lNq+6SkLe+Wp59BM=
github.com/pressly/goose/v3 v3.27.0/go.mod h1:3ZBeCXqzkgIRvrEMDkYh1guvtoJTU5oMMuDdkutoM78=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/procfs v0.20.0 h1:AA7aCvjxwAquZAlonN7888f2u4IN8WVeFgBi4k82M4Q=
github.com/prometheus/procfs v0.20.0/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo=
github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc=
github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/richardlehane/mscfb v1.0.6 h1:eN3bvvZCp00bs7Zf52bxNwAx5lJDBK1tCuH19qq5aC8=
@@ -97,6 +200,7 @@ github.com/richardlehane/msoleps v1.0.6 h1:9BvkpjvD+iUBalUY4esMwv6uBkfOip/Lzvd93
github.com/richardlehane/msoleps v1.0.6/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
@@ -113,44 +217,94 @@ github.com/tiendc/go-deepcopy v1.7.2 h1:Ut2yYR7W9tWjTQitganoIue4UGxZwCcJy3orjrrI
github.com/tiendc/go-deepcopy v1.7.2/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ=
github.com/vmware/govmomi v0.52.0 h1:JyxQ1IQdllrY7PJbv2am9mRsv3p9xWlIQ66bv+XnyLw=
github.com/vmware/govmomi v0.52.0/go.mod h1:Yuc9xjznU3BH0rr6g7MNS1QGvxnJlE1vOvTJ7Lx7dqI=
github.com/vmware/govmomi v0.53.0 h1:e1bZCotAq7wm4xy95ePN2uoWwz28pNp/ewZZhpBY7/4=
github.com/vmware/govmomi v0.53.0/go.mod h1:EWfuzPfxT5NV+aS2we02SLFdhvJkgeY7t7+TszgBSMY=
github.com/xuri/efp v0.0.1 h1:fws5Rv3myXyYni8uwj2qKjVaRP30PdjeYe2Y6FDsCL8=
github.com/xuri/efp v0.0.1/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI=
github.com/xuri/excelize/v2 v2.10.0 h1:8aKsP7JD39iKLc6dH5Tw3dgV3sPRh8uRVXu/fMstfW4=
github.com/xuri/excelize/v2 v2.10.0/go.mod h1:SC5TzhQkaOsTWpANfm+7bJCldzcnU/jrhqkTi/iBHBU=
github.com/xuri/excelize/v2 v2.10.1 h1:V62UlqopMqha3kOpnlHy2CcRVw1V8E63jFoWUmMzxN0=
github.com/xuri/excelize/v2 v2.10.1/go.mod h1:iG5tARpgaEeIhTqt3/fgXCGoBRt4hNXgCp3tfXKoOIc=
github.com/xuri/nfp v0.0.2-0.20250530014748-2ddeb826f9a9 h1:+C0TIdyyYmzadGaL/HBLbf3WdLgC29pgyhTjAT/0nuE=
github.com/xuri/nfp v0.0.2-0.20250530014748-2ddeb826f9a9/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ=
go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0=
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo=
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -165,18 +319,30 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/cc/v4 v4.27.3 h1:uNCgn37E5U09mTv1XgskEVUJ8ADKpmFMPxzGJ0TSo+U=
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
modernc.org/ccgo/v4 v4.30.2 h1:4yPaaq9dXYXZ2V8s1UgrC3KIj580l2N4ClrLwnbv2so=
modernc.org/ccgo/v4 v4.32.0 h1:hjG66bI/kqIPX1b2yT6fr/jt+QedtP2fqojG2VrFuVw=
modernc.org/ccgo/v4 v4.32.4 h1:L5OB8rpEX4ZsXEQwGozRfJyJSFHbbNVOoQ59DU9/KuU=
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
modernc.org/fileutil v1.4.0 h1:j6ZzNTftVS054gi281TyLjHPp6CPHr2KCxEXjEbD6SM=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
modernc.org/gc/v3 v3.1.2 h1:ZtDCnhonXSZexk/AYsegNRV1lJGgaNZJuKjJSWKyEqo=
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
modernc.org/libc v1.67.4 h1:zZGmCMUVPORtKv95c2ReQN5VDjvkoRm9GWPTEPuvlWg=
modernc.org/libc v1.67.4/go.mod h1:QvvnnJ5P7aitu0ReNpVIEyesuhmDLQ8kaEoyMjIFZJA=
modernc.org/libc v1.68.0 h1:PJ5ikFOV5pwpW+VqCK1hKJuEWsonkIJhhIXyuF/91pQ=
modernc.org/libc v1.68.0/go.mod h1:NnKCYeoYgsEqnY3PgvNgAeaJnso968ygU8Z0DxjoEc0=
modernc.org/libc v1.70.0 h1:U58NawXqXbgpZ/dcdS9kMshu08aiA6b7gusEusqzNkw=
modernc.org/libc v1.70.0/go.mod h1:OVmxFGP1CI/Z4L3E0Q3Mf1PDE0BucwMkcXjjLntvHJo=
modernc.org/libc v1.72.0 h1:IEu559v9a0XWjw0DPoVKtXpO2qt5NVLAnFaBbjq+n8c=
modernc.org/libc v1.72.0/go.mod h1:tTU8DL8A+XLVkEY3x5E/tO7s2Q/q42EtnNWda/L5QhQ=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
@@ -187,6 +353,12 @@ modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.44.0 h1:YjCKJnzZde2mLVy0cMKTSL4PxCmbIguOq9lGp8ZvGOc=
modernc.org/sqlite v1.44.0/go.mod h1:2Dq41ir5/qri7QJJJKNZcP4UF7TsX/KNeykYgPDtGhE=
modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU=
modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
modernc.org/sqlite v1.47.0 h1:R1XyaNpoW4Et9yly+I2EeX7pBza/w+pmYee/0HJDyKk=
modernc.org/sqlite v1.47.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
modernc.org/sqlite v1.48.2 h1:5CnW4uP8joZtA0LedVqLbZV5GD7F/0x91AXeSyjoh5c=
modernc.org/sqlite v1.48.2/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+292
View File
@@ -0,0 +1,292 @@
package auth
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
)
const (
jwtAlgHS256 = "HS256"
jwtTyp = "JWT"
)
var (
ErrInvalidJWTConfig = errors.New("invalid jwt config")
ErrInvalidJWTToken = errors.New("invalid jwt token")
ErrInvalidJWTClaims = errors.New("invalid jwt claims")
ErrExpiredJWTToken = errors.New("jwt token expired")
ErrNotYetValidJWTToken = errors.New("jwt token is not yet valid")
)
type JWTConfig struct {
SigningKeyBase64 string
Issuer string
Audience string
TokenLifespan time.Duration
ClockSkew time.Duration
}
type Claims struct {
Subject string `json:"sub"`
Roles []string `json:"roles,omitempty"`
Groups []string `json:"groups,omitempty"`
Issuer string `json:"iss"`
Audience string `json:"aud"`
IssuedAt int64 `json:"iat"`
ExpiresAt int64 `json:"exp"`
NotBefore int64 `json:"nbf"`
ID string `json:"jti"`
}
type JWTService struct {
signingKey []byte
issuer string
audience string
tokenLifespan time.Duration
clockSkew time.Duration
now func() time.Time
}
type jwtHeader struct {
Algorithm string `json:"alg"`
Type string `json:"typ"`
}
func NewJWTService(cfg JWTConfig) (*JWTService, error) {
issuer := strings.TrimSpace(cfg.Issuer)
audience := strings.TrimSpace(cfg.Audience)
if issuer == "" {
return nil, fmt.Errorf("%w: issuer is required", ErrInvalidJWTConfig)
}
if audience == "" {
return nil, fmt.Errorf("%w: audience is required", ErrInvalidJWTConfig)
}
if cfg.TokenLifespan <= 0 {
return nil, fmt.Errorf("%w: token lifespan must be greater than zero", ErrInvalidJWTConfig)
}
if cfg.ClockSkew < 0 {
return nil, fmt.Errorf("%w: clock skew cannot be negative", ErrInvalidJWTConfig)
}
signingKey, err := decodeBase64Key(strings.TrimSpace(cfg.SigningKeyBase64))
if err != nil {
return nil, fmt.Errorf("%w: signing key must be valid base64", ErrInvalidJWTConfig)
}
if len(signingKey) == 0 {
return nil, fmt.Errorf("%w: signing key cannot be empty", ErrInvalidJWTConfig)
}
return &JWTService{
signingKey: signingKey,
issuer: issuer,
audience: audience,
tokenLifespan: cfg.TokenLifespan,
clockSkew: cfg.ClockSkew,
now: time.Now,
}, nil
}
func (s *JWTService) IssueToken(subject string, roles []string, groups []string) (string, Claims, error) {
subject = strings.TrimSpace(subject)
if subject == "" {
return "", Claims{}, fmt.Errorf("%w: subject is required", ErrInvalidJWTClaims)
}
now := s.now().UTC()
claims := Claims{
Subject: subject,
Roles: compactTrimmedStrings(roles),
Groups: compactTrimmedStrings(groups),
Issuer: s.issuer,
Audience: s.audience,
IssuedAt: now.Unix(),
ExpiresAt: now.Add(s.tokenLifespan).Unix(),
NotBefore: now.Unix(),
ID: newTokenID(),
}
if err := validateClaims(claims, now, s.issuer, s.audience, s.clockSkew); err != nil {
return "", Claims{}, err
}
token, err := encodeSignedJWT(claims, s.signingKey)
if err != nil {
return "", Claims{}, err
}
return token, claims, nil
}
func (s *JWTService) VerifyToken(token string) (Claims, error) {
header, claims, signingInput, signature, err := parseJWT(token)
if err != nil {
return Claims{}, err
}
if header.Algorithm != jwtAlgHS256 {
return Claims{}, fmt.Errorf("%w: unsupported algorithm", ErrInvalidJWTToken)
}
if header.Type != "" && header.Type != jwtTyp {
return Claims{}, fmt.Errorf("%w: invalid token type", ErrInvalidJWTToken)
}
expected := signPayload(signingInput, s.signingKey)
if !hmac.Equal(signature, expected) {
return Claims{}, fmt.Errorf("%w: signature mismatch", ErrInvalidJWTToken)
}
now := s.now().UTC()
if err := validateClaims(claims, now, s.issuer, s.audience, s.clockSkew); err != nil {
return Claims{}, err
}
return claims, nil
}
func validateClaims(claims Claims, now time.Time, expectedIssuer string, expectedAudience string, clockSkew time.Duration) error {
if strings.TrimSpace(claims.Subject) == "" {
return fmt.Errorf("%w: subject is required", ErrInvalidJWTClaims)
}
if strings.TrimSpace(claims.ID) == "" {
return fmt.Errorf("%w: jti is required", ErrInvalidJWTClaims)
}
if claims.Issuer != expectedIssuer {
return fmt.Errorf("%w: issuer mismatch", ErrInvalidJWTClaims)
}
if claims.Audience != expectedAudience {
return fmt.Errorf("%w: audience mismatch", ErrInvalidJWTClaims)
}
if claims.IssuedAt <= 0 {
return fmt.Errorf("%w: iat is required", ErrInvalidJWTClaims)
}
if claims.NotBefore <= 0 {
return fmt.Errorf("%w: nbf is required", ErrInvalidJWTClaims)
}
if claims.ExpiresAt <= 0 {
return fmt.Errorf("%w: exp is required", ErrInvalidJWTClaims)
}
if claims.ExpiresAt <= claims.IssuedAt {
return fmt.Errorf("%w: exp must be greater than iat", ErrInvalidJWTClaims)
}
if claims.NotBefore > claims.ExpiresAt {
return fmt.Errorf("%w: nbf cannot be greater than exp", ErrInvalidJWTClaims)
}
unixNow := now.Unix()
skewSeconds := int64(clockSkew / time.Second)
if claims.IssuedAt > unixNow+skewSeconds {
return fmt.Errorf("%w: iat is in the future", ErrInvalidJWTClaims)
}
if claims.NotBefore > unixNow+skewSeconds {
return ErrNotYetValidJWTToken
}
if unixNow > claims.ExpiresAt+skewSeconds {
return ErrExpiredJWTToken
}
return nil
}
func encodeSignedJWT(claims Claims, signingKey []byte) (string, error) {
headerJSON, err := json.Marshal(jwtHeader{Algorithm: jwtAlgHS256, Type: jwtTyp})
if err != nil {
return "", fmt.Errorf("marshal jwt header: %w", err)
}
claimsJSON, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("marshal jwt claims: %w", err)
}
headerPart := base64.RawURLEncoding.EncodeToString(headerJSON)
payloadPart := base64.RawURLEncoding.EncodeToString(claimsJSON)
signingInput := headerPart + "." + payloadPart
signature := signPayload(signingInput, signingKey)
signaturePart := base64.RawURLEncoding.EncodeToString(signature)
return signingInput + "." + signaturePart, nil
}
func parseJWT(token string) (jwtHeader, Claims, string, []byte, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: malformed token", ErrInvalidJWTToken)
}
if parts[0] == "" || parts[1] == "" || parts[2] == "" {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: malformed token", ErrInvalidJWTToken)
}
headerBytes, err := base64.RawURLEncoding.DecodeString(parts[0])
if err != nil {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: invalid header encoding", ErrInvalidJWTToken)
}
payloadBytes, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: invalid payload encoding", ErrInvalidJWTToken)
}
signature, err := base64.RawURLEncoding.DecodeString(parts[2])
if err != nil {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: invalid signature encoding", ErrInvalidJWTToken)
}
var header jwtHeader
if err := json.Unmarshal(headerBytes, &header); err != nil {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: invalid header json", ErrInvalidJWTToken)
}
var claims Claims
if err := json.Unmarshal(payloadBytes, &claims); err != nil {
return jwtHeader{}, Claims{}, "", nil, fmt.Errorf("%w: invalid claims json", ErrInvalidJWTToken)
}
return header, claims, parts[0] + "." + parts[1], signature, nil
}
func signPayload(payload string, signingKey []byte) []byte {
mac := hmac.New(sha256.New, signingKey)
mac.Write([]byte(payload))
return mac.Sum(nil)
}
func newTokenID() string {
raw := make([]byte, 16)
if _, err := rand.Read(raw); err != nil {
return fmt.Sprintf("fallback-%d", time.Now().UTC().UnixNano())
}
return hex.EncodeToString(raw)
}
func decodeBase64Key(value string) ([]byte, error) {
encodings := []*base64.Encoding{
base64.StdEncoding,
base64.RawStdEncoding,
base64.URLEncoding,
base64.RawURLEncoding,
}
for _, encoding := range encodings {
decoded, err := encoding.DecodeString(value)
if err == nil {
return decoded, nil
}
}
return nil, errors.New("invalid base64 encoding")
}
func compactTrimmedStrings(values []string) []string {
if len(values) == 0 {
return nil
}
out := make([]string, 0, len(values))
for _, value := range values {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
continue
}
out = append(out, trimmed)
}
if len(out) == 0 {
return nil
}
return out
}
+247
View File
@@ -0,0 +1,247 @@
package auth
import (
"encoding/base64"
"errors"
"strings"
"testing"
"time"
)
func TestNewJWTServiceRejectsBadConfig(t *testing.T) {
_, err := NewJWTService(JWTConfig{
SigningKeyBase64: "!!!",
Issuer: "vctp",
Audience: "vctp-api",
TokenLifespan: time.Hour,
ClockSkew: time.Minute,
})
if err == nil {
t.Fatal("expected invalid base64 signing key to fail")
}
if !errors.Is(err, ErrInvalidJWTConfig) {
t.Fatalf("expected ErrInvalidJWTConfig, got: %v", err)
}
}
func TestIssueAndVerifyTokenRoundTrip(t *testing.T) {
now := time.Unix(1_700_000_000, 0).UTC()
svc := mustJWTService(t)
svc.now = func() time.Time { return now }
token, issuedClaims, err := svc.IssueToken("alice", []string{"admin", " viewer "}, []string{"cn=vctp-admins,dc=example,dc=com"})
if err != nil {
t.Fatalf("IssueToken returned error: %v", err)
}
if token == "" {
t.Fatal("expected non-empty token")
}
if issuedClaims.Subject != "alice" {
t.Fatalf("expected subject alice, got %q", issuedClaims.Subject)
}
if issuedClaims.Issuer != "vctp" {
t.Fatalf("expected issuer vctp, got %q", issuedClaims.Issuer)
}
if issuedClaims.Audience != "vctp-api" {
t.Fatalf("expected audience vctp-api, got %q", issuedClaims.Audience)
}
if issuedClaims.IssuedAt != now.Unix() {
t.Fatalf("unexpected iat: %d", issuedClaims.IssuedAt)
}
if issuedClaims.NotBefore != now.Unix() {
t.Fatalf("unexpected nbf: %d", issuedClaims.NotBefore)
}
if issuedClaims.ExpiresAt != now.Add(2*time.Hour).Unix() {
t.Fatalf("unexpected exp: %d", issuedClaims.ExpiresAt)
}
if issuedClaims.ID == "" {
t.Fatal("expected jti to be populated")
}
verifiedClaims, err := svc.VerifyToken(token)
if err != nil {
t.Fatalf("VerifyToken returned error: %v", err)
}
if verifiedClaims.Subject != issuedClaims.Subject {
t.Fatalf("subject mismatch: got %q want %q", verifiedClaims.Subject, issuedClaims.Subject)
}
if verifiedClaims.ID != issuedClaims.ID {
t.Fatalf("jti mismatch: got %q want %q", verifiedClaims.ID, issuedClaims.ID)
}
}
func TestVerifyTokenRejectsInvalidSignature(t *testing.T) {
svc := mustJWTService(t)
svc.now = func() time.Time { return time.Unix(1_700_000_000, 0).UTC() }
token, _, err := svc.IssueToken("alice", []string{"admin"}, nil)
if err != nil {
t.Fatalf("IssueToken returned error: %v", err)
}
other := mustJWTServiceWithKey(t, base64.StdEncoding.EncodeToString([]byte("a different secret key")))
other.now = svc.now
_, err = other.VerifyToken(token)
if err == nil {
t.Fatal("expected signature mismatch to fail")
}
if !errors.Is(err, ErrInvalidJWTToken) {
t.Fatalf("expected ErrInvalidJWTToken, got: %v", err)
}
}
func TestVerifyTokenRejectsIssuerAndAudienceMismatch(t *testing.T) {
issuerSvc := mustJWTService(t)
issuerSvc.now = func() time.Time { return time.Unix(1_700_000_000, 0).UTC() }
token, _, err := issuerSvc.IssueToken("alice", nil, nil)
if err != nil {
t.Fatalf("IssueToken returned error: %v", err)
}
wrongIssuer, err := NewJWTService(JWTConfig{
SigningKeyBase64: base64.StdEncoding.EncodeToString([]byte("super-secret-signing-key")),
Issuer: "other-issuer",
Audience: "vctp-api",
TokenLifespan: 2 * time.Hour,
ClockSkew: time.Minute,
})
if err != nil {
t.Fatalf("failed to create verifier with wrong issuer: %v", err)
}
wrongIssuer.now = issuerSvc.now
_, err = wrongIssuer.VerifyToken(token)
if err == nil {
t.Fatal("expected issuer mismatch to fail")
}
if !errors.Is(err, ErrInvalidJWTClaims) {
t.Fatalf("expected ErrInvalidJWTClaims, got: %v", err)
}
if !strings.Contains(strings.ToLower(err.Error()), "issuer") {
t.Fatalf("expected issuer mismatch error, got: %v", err)
}
wrongAudience, err := NewJWTService(JWTConfig{
SigningKeyBase64: base64.StdEncoding.EncodeToString([]byte("super-secret-signing-key")),
Issuer: "vctp",
Audience: "other-audience",
TokenLifespan: 2 * time.Hour,
ClockSkew: time.Minute,
})
if err != nil {
t.Fatalf("failed to create verifier with wrong audience: %v", err)
}
wrongAudience.now = issuerSvc.now
_, err = wrongAudience.VerifyToken(token)
if err == nil {
t.Fatal("expected audience mismatch to fail")
}
if !errors.Is(err, ErrInvalidJWTClaims) {
t.Fatalf("expected ErrInvalidJWTClaims, got: %v", err)
}
if !strings.Contains(strings.ToLower(err.Error()), "audience") {
t.Fatalf("expected audience mismatch error, got: %v", err)
}
}
func TestVerifyTokenRejectsExpiredNotBeforeAndFutureIssuedAt(t *testing.T) {
base := time.Unix(1_700_000_000, 0).UTC()
svc := mustJWTService(t)
svc.now = func() time.Time { return base }
token, claims, err := svc.IssueToken("alice", nil, nil)
if err != nil {
t.Fatalf("IssueToken returned error: %v", err)
}
svc.now = func() time.Time { return base.Add(3 * time.Hour) }
_, err = svc.VerifyToken(token)
if !errors.Is(err, ErrExpiredJWTToken) {
t.Fatalf("expected ErrExpiredJWTToken, got: %v", err)
}
notBeforeClaims := claims
notBeforeClaims.NotBefore = base.Add(10 * time.Minute).Unix()
notBeforeClaims.IssuedAt = base.Unix()
notBeforeClaims.ExpiresAt = base.Add(2 * time.Hour).Unix()
notBeforeClaims.ID = "forced-jti-1"
notBeforeToken, err := encodeSignedJWT(notBeforeClaims, svc.signingKey)
if err != nil {
t.Fatalf("failed to create token with future nbf: %v", err)
}
svc.now = func() time.Time { return base }
_, err = svc.VerifyToken(notBeforeToken)
if !errors.Is(err, ErrNotYetValidJWTToken) {
t.Fatalf("expected ErrNotYetValidJWTToken, got: %v", err)
}
futureIatClaims := claims
futureIatClaims.IssuedAt = base.Add(20 * time.Minute).Unix()
futureIatClaims.NotBefore = base.Unix()
futureIatClaims.ExpiresAt = base.Add(3 * time.Hour).Unix()
futureIatClaims.ID = "forced-jti-2"
futureIatToken, err := encodeSignedJWT(futureIatClaims, svc.signingKey)
if err != nil {
t.Fatalf("failed to create token with future iat: %v", err)
}
_, err = svc.VerifyToken(futureIatToken)
if err == nil {
t.Fatal("expected future iat validation to fail")
}
if !errors.Is(err, ErrInvalidJWTClaims) {
t.Fatalf("expected ErrInvalidJWTClaims for future iat, got: %v", err)
}
}
func TestVerifyTokenRejectsMissingJTI(t *testing.T) {
base := time.Unix(1_700_000_000, 0).UTC()
svc := mustJWTService(t)
svc.now = func() time.Time { return base }
token, claims, err := svc.IssueToken("alice", nil, nil)
if err != nil {
t.Fatalf("IssueToken returned error: %v", err)
}
if token == "" {
t.Fatal("expected non-empty token")
}
claims.ID = ""
customToken, err := encodeSignedJWT(claims, svc.signingKey)
if err != nil {
t.Fatalf("failed to create token without jti: %v", err)
}
_, err = svc.VerifyToken(customToken)
if err == nil {
t.Fatal("expected missing jti token to fail")
}
if !errors.Is(err, ErrInvalidJWTClaims) {
t.Fatalf("expected ErrInvalidJWTClaims, got: %v", err)
}
if !strings.Contains(strings.ToLower(err.Error()), "jti") {
t.Fatalf("expected jti validation error, got: %v", err)
}
}
func mustJWTService(t *testing.T) *JWTService {
t.Helper()
return mustJWTServiceWithKey(t, base64.StdEncoding.EncodeToString([]byte("super-secret-signing-key")))
}
func mustJWTServiceWithKey(t *testing.T, keyBase64 string) *JWTService {
t.Helper()
svc, err := NewJWTService(JWTConfig{
SigningKeyBase64: keyBase64,
Issuer: "vctp",
Audience: "vctp-api",
TokenLifespan: 2 * time.Hour,
ClockSkew: time.Minute,
})
if err != nil {
t.Fatalf("failed to create jwt service: %v", err)
}
return svc
}
+354
View File
@@ -0,0 +1,354 @@
package auth
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"net/url"
"os"
"sort"
"strings"
"time"
"github.com/go-ldap/ldap/v3"
)
var (
ErrInvalidLDAPConfig = errors.New("invalid ldap config")
ErrLDAPInvalidCredentials = errors.New("invalid ldap credentials")
ErrLDAPOperationFailed = errors.New("ldap operation failed")
)
type LDAPConfig struct {
BindAddress string
BaseDN string
TrustCertFile string
DisableValidation bool
Insecure bool
DialTimeout time.Duration
}
type LDAPIdentity struct {
Username string
UserDN string
Groups []string
}
type LDAPAuthenticator struct {
bindAddress string
baseDN string
trustCertFile string
disableValidation bool
insecure bool
dialTimeout time.Duration
}
func NewLDAPAuthenticator(cfg LDAPConfig) (*LDAPAuthenticator, error) {
bindAddress := strings.TrimSpace(cfg.BindAddress)
baseDN := strings.TrimSpace(cfg.BaseDN)
trustCertFile := strings.TrimSpace(cfg.TrustCertFile)
if bindAddress == "" {
return nil, fmt.Errorf("%w: bind address is required", ErrInvalidLDAPConfig)
}
if baseDN == "" {
return nil, fmt.Errorf("%w: base DN is required", ErrInvalidLDAPConfig)
}
if _, err := url.ParseRequestURI(bindAddress); err != nil {
return nil, fmt.Errorf("%w: bind address must be a valid URL: %v", ErrInvalidLDAPConfig, err)
}
dialTimeout := cfg.DialTimeout
if dialTimeout <= 0 {
dialTimeout = 10 * time.Second
}
return &LDAPAuthenticator{
bindAddress: bindAddress,
baseDN: baseDN,
trustCertFile: trustCertFile,
disableValidation: cfg.DisableValidation,
insecure: cfg.Insecure,
dialTimeout: dialTimeout,
}, nil
}
func (a *LDAPAuthenticator) AuthenticateAndFetchGroups(ctx context.Context, username string, password string) (LDAPIdentity, error) {
username = strings.TrimSpace(username)
if username == "" || password == "" {
return LDAPIdentity{}, ErrLDAPInvalidCredentials
}
if err := ctxErr(ctx); err != nil {
return LDAPIdentity{}, err
}
conn, err := a.connect()
if err != nil {
return LDAPIdentity{}, err
}
defer conn.Close()
if err := conn.Bind(username, password); err != nil {
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
return LDAPIdentity{}, ErrLDAPInvalidCredentials
}
return LDAPIdentity{}, fmt.Errorf("%w: bind failed: %v", ErrLDAPOperationFailed, err)
}
if err := ctxErr(ctx); err != nil {
return LDAPIdentity{}, err
}
identity := LDAPIdentity{
Username: username,
UserDN: username,
}
entry, err := a.lookupUserEntry(conn, username)
if err != nil {
return LDAPIdentity{}, err
}
if entry != nil {
if strings.TrimSpace(entry.DN) != "" {
identity.UserDN = entry.DN
}
if v := firstNonEmpty(
entry.GetAttributeValue("uid"),
entry.GetAttributeValue("sAMAccountName"),
entry.GetAttributeValue("userPrincipalName"),
entry.GetAttributeValue("cn"),
); v != "" {
identity.Username = v
}
}
groupSet := make(map[string]struct{})
if entry != nil {
for _, groupDN := range entry.GetAttributeValues("memberOf") {
groupDN = strings.TrimSpace(groupDN)
if groupDN == "" {
continue
}
groupSet[groupDN] = struct{}{}
}
}
groupEntries, err := conn.Search(ldap.NewSearchRequest(
a.baseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
0,
0,
false,
fmt.Sprintf("(|(member=%s)(uniqueMember=%s)(memberUid=%s))",
ldap.EscapeFilter(identity.UserDN),
ldap.EscapeFilter(identity.UserDN),
ldap.EscapeFilter(username),
),
[]string{"dn"},
nil,
))
if err == nil {
for _, e := range groupEntries.Entries {
if dn := strings.TrimSpace(e.DN); dn != "" {
groupSet[dn] = struct{}{}
}
}
}
identity.Groups = mapKeysSorted(groupSet)
return identity, nil
}
func ResolveRoles(groupDNs []string, groupRoleMappings map[string]string) []string {
if len(groupDNs) == 0 || len(groupRoleMappings) == 0 {
return nil
}
normalizedMappings := make(map[string]string, len(groupRoleMappings))
for groupDN, role := range groupRoleMappings {
groupDN = normalizeDN(groupDN)
role = strings.ToLower(strings.TrimSpace(role))
if groupDN == "" || role == "" {
continue
}
normalizedMappings[groupDN] = role
}
roleSet := make(map[string]struct{})
for _, groupDN := range groupDNs {
if role, ok := normalizedMappings[normalizeDN(groupDN)]; ok {
roleSet[role] = struct{}{}
}
}
return mapKeysSorted(roleSet)
}
func HasAnyGroup(groupDNs []string, requiredGroupDNs []string) bool {
requiredGroupDNs = compactTrimmedStrings(requiredGroupDNs)
if len(requiredGroupDNs) == 0 {
return true
}
if len(groupDNs) == 0 {
return false
}
required := make(map[string]struct{}, len(requiredGroupDNs))
for _, groupDN := range requiredGroupDNs {
required[normalizeDN(groupDN)] = struct{}{}
}
for _, groupDN := range groupDNs {
if _, ok := required[normalizeDN(groupDN)]; ok {
return true
}
}
return false
}
func (a *LDAPAuthenticator) connect() (*ldap.Conn, error) {
tlsConfig, err := a.buildTLSConfig()
if err != nil {
return nil, err
}
parsedURL, err := url.Parse(a.bindAddress)
if err != nil {
return nil, fmt.Errorf("%w: invalid bind address: %v", ErrInvalidLDAPConfig, err)
}
options := []ldap.DialOpt{
ldap.DialWithDialer(&net.Dialer{Timeout: a.dialTimeout}),
ldap.DialWithTLSConfig(tlsConfig),
}
conn, err := ldap.DialURL(a.bindAddress, options...)
if err != nil {
return nil, fmt.Errorf("%w: unable to connect: %v", ErrLDAPOperationFailed, err)
}
conn.SetTimeout(a.dialTimeout)
// For ldap://, opportunistically upgrade to TLS unless explicitly configured as insecure.
if parsedURL.Scheme == "ldap" && !a.insecure {
if err := conn.StartTLS(tlsConfig); err != nil {
conn.Close()
return nil, fmt.Errorf("%w: starttls failed: %v", ErrLDAPOperationFailed, err)
}
}
return conn, nil
}
func (a *LDAPAuthenticator) buildTLSConfig() (*tls.Config, error) {
tlsConfig := &tls.Config{
MinVersion: tls.VersionTLS12,
InsecureSkipVerify: a.insecure || a.disableValidation, //nolint:gosec // controlled by explicit config flags
}
if a.trustCertFile == "" {
return tlsConfig, nil
}
caPEM, err := os.ReadFile(a.trustCertFile)
if err != nil {
return nil, fmt.Errorf("%w: failed to read ldap trust cert file: %v", ErrInvalidLDAPConfig, err)
}
roots := x509.NewCertPool()
if !roots.AppendCertsFromPEM(caPEM) {
return nil, fmt.Errorf("%w: ldap trust cert file contains no valid certificates", ErrInvalidLDAPConfig)
}
tlsConfig.RootCAs = roots
return tlsConfig, nil
}
func (a *LDAPAuthenticator) lookupUserEntry(conn *ldap.Conn, username string) (*ldap.Entry, error) {
if looksLikeDN(username) {
searchRes, err := conn.Search(ldap.NewSearchRequest(
username,
ldap.ScopeBaseObject,
ldap.NeverDerefAliases,
1,
0,
false,
"(objectClass=*)",
[]string{"uid", "sAMAccountName", "userPrincipalName", "cn", "memberOf"},
nil,
))
if err != nil {
return nil, fmt.Errorf("%w: unable to load user entry: %v", ErrLDAPOperationFailed, err)
}
if len(searchRes.Entries) == 0 {
return nil, nil
}
return searchRes.Entries[0], nil
}
searchRes, err := conn.Search(ldap.NewSearchRequest(
a.baseDN,
ldap.ScopeWholeSubtree,
ldap.NeverDerefAliases,
2,
0,
false,
fmt.Sprintf("(|(uid=%s)(cn=%s)(sAMAccountName=%s)(userPrincipalName=%s))",
ldap.EscapeFilter(username),
ldap.EscapeFilter(username),
ldap.EscapeFilter(username),
ldap.EscapeFilter(username),
),
[]string{"uid", "sAMAccountName", "userPrincipalName", "cn", "memberOf"},
nil,
))
if err != nil {
return nil, fmt.Errorf("%w: user lookup failed: %v", ErrLDAPOperationFailed, err)
}
if len(searchRes.Entries) == 0 {
return nil, nil
}
return searchRes.Entries[0], nil
}
func normalizeDN(value string) string {
return strings.ToLower(strings.TrimSpace(value))
}
func mapKeysSorted[K ~string, V any](m map[K]V) []K {
if len(m) == 0 {
return nil
}
out := make([]K, 0, len(m))
for key := range m {
out = append(out, key)
}
sort.Slice(out, func(i, j int) bool {
return out[i] < out[j]
})
return out
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
value = strings.TrimSpace(value)
if value != "" {
return value
}
}
return ""
}
func looksLikeDN(value string) bool {
value = strings.TrimSpace(value)
return strings.Contains(value, "=") && strings.Contains(value, ",")
}
func ctxErr(ctx context.Context) error {
if ctx == nil {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
default:
return nil
}
}
+39
View File
@@ -0,0 +1,39 @@
package auth
import "testing"
func TestResolveRoles(t *testing.T) {
roles := ResolveRoles(
[]string{
"cn=vctp-admins,ou=groups,dc=example,dc=com",
" CN=VCTP-VIEWERS,OU=GROUPS,DC=EXAMPLE,DC=COM ",
},
map[string]string{
"cn=vctp-admins,ou=groups,dc=example,dc=com": "admin",
"cn=vctp-viewers,ou=groups,dc=example,dc=com": "viewer",
},
)
if len(roles) != 2 {
t.Fatalf("expected 2 roles, got %d (%#v)", len(roles), roles)
}
if roles[0] != "admin" || roles[1] != "viewer" {
t.Fatalf("unexpected resolved roles: %#v", roles)
}
}
func TestHasAnyGroup(t *testing.T) {
groups := []string{
"cn=vctp-admins,ou=groups,dc=example,dc=com",
}
if !HasAnyGroup(groups, []string{" cn=vctp-admins,ou=groups,dc=example,dc=com "}) {
t.Fatal("expected group intersection to match")
}
if HasAnyGroup(groups, []string{"cn=vctp-operators,ou=groups,dc=example,dc=com"}) {
t.Fatal("expected no intersection")
}
if !HasAnyGroup(groups, nil) {
t.Fatal("expected empty required groups to allow")
}
}
+14 -2
View File
@@ -238,7 +238,7 @@ func CreateUpdatesReport(logger *slog.Logger, Database db.Database, ctx context.
}
// Helper function to get the actual value of sql.Null types
func getFieldValue(field reflect.Value) interface{} {
func getFieldValue(field reflect.Value) any {
switch field.Kind() {
case reflect.Struct:
// Handle sql.Null types based on their concrete type
@@ -279,6 +279,8 @@ func SetColAutoWidth(xlsx *excelize.File, sheetName string) error {
if err != nil {
return err
}
const minColWidth = 10
const maxColWidth = 80
for idx, col := range cols {
largestWidth := 0
for _, rowCell := range col {
@@ -287,12 +289,22 @@ func SetColAutoWidth(xlsx *excelize.File, sheetName string) error {
largestWidth = cellWidth
}
}
// Keep a sane minimum so sheets that rely on computed content
// (for example pivot output populated by Excel) don't collapse to width 0.
if largestWidth < minColWidth {
largestWidth = minColWidth
}
if largestWidth > maxColWidth {
largestWidth = maxColWidth
}
//fmt.Printf("SetColAutoWidth calculated largest width for column index '%d' is '%d'\n", idx, largestWidth)
name, err := excelize.ColumnNumberToName(idx + 1)
if err != nil {
return err
}
xlsx.SetColWidth(sheetName, name, name, float64(largestWidth))
if err := xlsx.SetColWidth(sheetName, name, name, float64(largestWidth)); err != nil {
return err
}
}
// No errors at this point
return nil
File diff suppressed because it is too large Load Diff
+77
View File
@@ -0,0 +1,77 @@
package report
import (
"io"
"log/slog"
"strings"
"testing"
"github.com/xuri/excelize/v2"
)
func TestAddSummaryPivotSheetCreatesPivotTables(t *testing.T) {
xlsx := excelize.NewFile()
const dataSheet = "Snapshot Report"
if err := xlsx.SetSheetName("Sheet1", dataSheet); err != nil {
t.Fatalf("SetSheetName failed: %v", err)
}
headers := []string{"Name", "Datacenter", "ResourcePool", "AvgVcpuCount", "AvgRamGB", "AvgIsPresent"}
if err := xlsx.SetSheetRow(dataSheet, "A1", &headers); err != nil {
t.Fatalf("SetSheetRow header failed: %v", err)
}
row1 := []any{"vm-1", "dc-1", "pool-1", 4.0, 16.0, 1.0}
if err := xlsx.SetSheetRow(dataSheet, "A2", &row1); err != nil {
t.Fatalf("SetSheetRow data failed: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
addSummaryPivotSheet(logger, xlsx, dataSheet, headers, 1, "inventory_daily_summary_20260215", nil)
pivots, err := xlsx.GetPivotTables("Summary")
if err != nil {
t.Fatalf("GetPivotTables failed: %v", err)
}
if len(pivots) != 4 {
t.Fatalf("expected 4 pivot tables, got %d", len(pivots))
}
expectedNames := map[string]bool{
"PivotAvgVcpu": false,
"PivotAvgRam": false,
"PivotProratedVmCount": false,
"PivotVmNameCount": false,
}
var avgVcpuPivot excelize.PivotTableOptions
avgVcpuFound := false
for _, pivot := range pivots {
if _, ok := expectedNames[pivot.Name]; ok {
expectedNames[pivot.Name] = true
}
if pivot.Name == "PivotAvgVcpu" {
avgVcpuPivot = pivot
avgVcpuFound = true
}
if strings.Contains(pivot.DataRange, "'") {
t.Fatalf("pivot %q has quoted DataRange %q; expected unquoted sheet reference", pivot.Name, pivot.DataRange)
}
if strings.Contains(pivot.PivotTableRange, "'") {
t.Fatalf("pivot %q has quoted PivotTableRange %q; expected unquoted sheet reference", pivot.Name, pivot.PivotTableRange)
}
}
for name, seen := range expectedNames {
if !seen {
t.Fatalf("missing expected pivot table %q", name)
}
}
if !avgVcpuFound {
t.Fatal("missing PivotAvgVcpu definition")
}
if len(avgVcpuPivot.Rows) != 1 || avgVcpuPivot.Rows[0].Data != "Datacenter" {
t.Fatalf("PivotAvgVcpu rows = %#v; expected Datacenter only", avgVcpuPivot.Rows)
}
if len(avgVcpuPivot.Columns) != 1 || avgVcpuPivot.Columns[0].Data != "ResourcePool" {
t.Fatalf("PivotAvgVcpu columns = %#v; expected ResourcePool only", avgVcpuPivot.Columns)
}
}
+4
View File
@@ -5,6 +5,7 @@ import (
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"log/slog"
)
@@ -68,6 +69,9 @@ func (s *Secrets) Decrypt(base64CipherText string) ([]byte, error) {
// Extract the nonce from the ciphertext
nonceSize := gcm.NonceSize()
if len(cipherText) < nonceSize {
return nil, fmt.Errorf("ciphertext is too short")
}
nonce, cipherText := cipherText[:nonceSize], cipherText[nonceSize:]
// Decrypt the ciphertext
+27
View File
@@ -0,0 +1,27 @@
package secrets
import (
"encoding/base64"
"io"
"log/slog"
"strings"
"testing"
)
func testLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func TestDecryptRejectsShortCiphertext(t *testing.T) {
key := []byte("0123456789abcdef0123456789abcdef")
s := New(testLogger(), key)
encoded := base64.StdEncoding.EncodeToString([]byte{1, 2, 3})
_, err := s.Decrypt(encoded)
if err == nil {
t.Fatal("expected error for short ciphertext, got nil")
}
if !strings.Contains(err.Error(), "ciphertext is too short") {
t.Fatalf("unexpected error: %v", err)
}
}
+28
View File
@@ -0,0 +1,28 @@
package settings
import "context"
type reloadedContextKey struct{}
// MarkReloadedInContext marks that a given Settings instance has been refreshed in this context flow.
func MarkReloadedInContext(ctx context.Context, cfg *Settings) context.Context {
if ctx == nil {
ctx = context.Background()
}
if cfg == nil {
return ctx
}
return context.WithValue(ctx, reloadedContextKey{}, cfg)
}
// IsReloadedInContext reports whether this context flow already refreshed the provided Settings.
func IsReloadedInContext(ctx context.Context, cfg *Settings) bool {
if ctx == nil || cfg == nil {
return false
}
marked, ok := ctx.Value(reloadedContextKey{}).(*Settings)
if !ok || marked == nil {
return false
}
return marked == cfg
}
+290 -36
View File
@@ -1,14 +1,39 @@
package settings
import (
"encoding/base64"
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"regexp"
"strings"
"vctp/internal/utils"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
var (
postgresURIUserInfoPasswordPattern = regexp.MustCompile(`(?i)(postgres(?:ql)?://[^@/\s]*:)([^@/\s]*)(@)`)
postgresKVPasswordPattern = regexp.MustCompile(`(?i)(\bpassword\s*=\s*)(?:'[^']*'|"[^"]*"|[^\s]+)`)
)
const (
authModeDisabled = "disabled"
authModeOptional = "optional"
authModeRequired = "required"
authRoleAdmin = "admin"
authRoleViewer = "viewer"
defaultAuthTokenLifespanMinutes = 120
defaultAuthJWTIssuer = "vctp"
defaultAuthJWTAudience = "vctp-api"
defaultAuthClockSkewSeconds = 60
scheduledAggregationEngineGo = "go"
scheduledAggregationEngineSQL = "sql"
)
type Settings struct {
@@ -17,42 +42,77 @@ type Settings struct {
Values *SettingsYML
}
type ReportSummaryPivot struct {
Metric string `yaml:"metric"`
Title string `yaml:"title"`
PivotName string `yaml:"pivot_name"`
PivotRange string `yaml:"pivot_range"`
TitleCell string `yaml:"title_cell"`
}
// SettingsYML struct holds various runtime data that is too cumbersome to specify via command line, eg replacement properties
type SettingsYML struct {
Settings struct {
LogLevel string `yaml:"log_level"`
LogOutput string `yaml:"log_output"`
DatabaseDriver string `yaml:"database_driver"`
DatabaseURL string `yaml:"database_url"`
BindIP string `yaml:"bind_ip"`
BindPort int `yaml:"bind_port"`
BindDisableTLS bool `yaml:"bind_disable_tls"`
TLSCertFilename string `yaml:"tls_cert_filename"`
TLSKeyFilename string `yaml:"tls_key_filename"`
VcenterUsername string `yaml:"vcenter_username"`
VcenterPassword string `yaml:"vcenter_password"`
VcenterInsecure bool `yaml:"vcenter_insecure"`
VcenterEventPollingSeconds int `yaml:"vcenter_event_polling_seconds"`
VcenterInventoryPollingSeconds int `yaml:"vcenter_inventory_polling_seconds"`
VcenterInventorySnapshotSeconds int `yaml:"vcenter_inventory_snapshot_seconds"`
VcenterInventoryAggregateSeconds int `yaml:"vcenter_inventory_aggregate_seconds"`
HourlySnapshotConcurrency int `yaml:"hourly_snapshot_concurrency"`
HourlySnapshotMaxAgeDays int `yaml:"hourly_snapshot_max_age_days"`
DailySnapshotMaxAgeMonths int `yaml:"daily_snapshot_max_age_months"`
SnapshotCleanupCron string `yaml:"snapshot_cleanup_cron"`
ReportsDir string `yaml:"reports_dir"`
HourlyJobTimeoutSeconds int `yaml:"hourly_job_timeout_seconds"`
HourlySnapshotTimeoutSeconds int `yaml:"hourly_snapshot_timeout_seconds"`
HourlySnapshotRetrySeconds int `yaml:"hourly_snapshot_retry_seconds"`
HourlySnapshotMaxRetries int `yaml:"hourly_snapshot_max_retries"`
DailyJobTimeoutSeconds int `yaml:"daily_job_timeout_seconds"`
MonthlyJobTimeoutSeconds int `yaml:"monthly_job_timeout_seconds"`
CleanupJobTimeoutSeconds int `yaml:"cleanup_job_timeout_seconds"`
TenantsToFilter []string `yaml:"tenants_to_filter"`
NodeChargeClusters []string `yaml:"node_charge_clusters"`
SrmActiveActiveVms []string `yaml:"srm_activeactive_vms"`
VcenterAddresses []string `yaml:"vcenter_addresses"`
PostgresWorkMemMB int `yaml:"postgres_work_mem_mb"`
LogLevel string `yaml:"log_level"`
LogOutput string `yaml:"log_output"`
DatabaseDriver string `yaml:"database_driver"`
DatabaseURL string `yaml:"database_url"`
EnableExperimentalPostgres bool `yaml:"enable_experimental_postgres"`
BindIP string `yaml:"bind_ip"`
BindPort int `yaml:"bind_port"`
BindDisableTLS bool `yaml:"bind_disable_tls"`
TLSCertFilename string `yaml:"tls_cert_filename"`
TLSKeyFilename string `yaml:"tls_key_filename"`
EncryptionKey string `yaml:"encryption_key"`
VcenterUsername string `yaml:"vcenter_username"`
VcenterPassword string `yaml:"vcenter_password"`
VcenterInsecure bool `yaml:"vcenter_insecure"`
EnableLegacyAPI bool `yaml:"enable_legacy_api"`
AuthEnabled bool `yaml:"auth_enabled"`
AuthMode string `yaml:"auth_mode"`
AuthJWTSigningKey string `yaml:"auth_jwt_signing_key"`
AuthTokenLifespanMinutes int `yaml:"auth_token_lifespan_minutes"`
AuthJWTIssuer string `yaml:"auth_jwt_issuer"`
AuthJWTAudience string `yaml:"auth_jwt_audience"`
AuthClockSkewSeconds int `yaml:"auth_clock_skew_seconds"`
AuthGroupRoleMappings map[string]string `yaml:"auth_group_role_mappings"`
LDAPGroups []string `yaml:"ldap_groups"`
LDAPBindAddress string `yaml:"ldap_bind_address"`
LDAPBaseDN string `yaml:"ldap_base_dn"`
LDAPTrustCertFile string `yaml:"ldap_trust_cert_file"`
LDAPDisableValidation bool `yaml:"ldap_disable_validation"`
LDAPInsecure bool `yaml:"ldap_insecure"`
EnablePprof bool `yaml:"enable_pprof"`
VcenterEventPollingSeconds int `yaml:"vcenter_event_polling_seconds"`
VcenterInventoryPollingSeconds int `yaml:"vcenter_inventory_polling_seconds"`
VcenterInventorySnapshotSeconds int `yaml:"vcenter_inventory_snapshot_seconds"`
VcenterInventoryAggregateSeconds int `yaml:"vcenter_inventory_aggregate_seconds"`
HourlySnapshotConcurrency int `yaml:"hourly_snapshot_concurrency"`
HourlySnapshotMaxAgeDays int `yaml:"hourly_snapshot_max_age_days"`
DailySnapshotMaxAgeMonths int `yaml:"daily_snapshot_max_age_months"`
HourlyIndexMaxAgeDays int `yaml:"hourly_index_max_age_days"`
SnapshotCleanupCron string `yaml:"snapshot_cleanup_cron"`
ReportsDir string `yaml:"reports_dir"`
HourlyJobTimeoutSeconds int `yaml:"hourly_job_timeout_seconds"`
HourlySnapshotTimeoutSeconds int `yaml:"hourly_snapshot_timeout_seconds"`
HourlySnapshotRetrySeconds int `yaml:"hourly_snapshot_retry_seconds"`
HourlySnapshotMaxRetries int `yaml:"hourly_snapshot_max_retries"`
CaptureWriteBatchSize int `yaml:"capture_write_batch_size"`
SnapshotTableCompatMode *bool `yaml:"snapshot_table_compat_mode"`
AsyncReportGeneration *bool `yaml:"async_report_generation"`
PostgresVmHourlyPartitioning *bool `yaml:"postgres_vm_hourly_partitioning_enabled"`
ScheduledAggregationEngine string `yaml:"scheduled_aggregation_engine"`
DailyJobTimeoutSeconds int `yaml:"daily_job_timeout_seconds"`
MonthlyJobTimeoutSeconds int `yaml:"monthly_job_timeout_seconds"`
MonthlyAggregationGranularity string `yaml:"monthly_aggregation_granularity"`
MonthlyAggregationCron string `yaml:"monthly_aggregation_cron"`
CleanupJobTimeoutSeconds int `yaml:"cleanup_job_timeout_seconds"`
TenantsToFilter []string `yaml:"tenants_to_filter"`
NodeChargeClusters []string `yaml:"node_charge_clusters"`
SrmActiveActiveVms []string `yaml:"srm_activeactive_vms"`
VcenterAddresses []string `yaml:"vcenter_addresses"`
PostgresWorkMemMB int `yaml:"postgres_work_mem_mb"`
ReportSummaryPivots []ReportSummaryPivot `yaml:"report_summary_pivots"`
} `yaml:"settings"`
}
@@ -84,21 +144,43 @@ func (s *Settings) ReadYMLSettings() error {
// Init new YAML decode
d := yaml.NewDecoder(file)
d.KnownFields(true)
// Start YAML decoding from file
if err := d.Decode(&settings); err != nil {
return fmt.Errorf("unable to decode settings file : '%s'", err)
}
if err := applyDefaultsAndValidateSettings(&settings); err != nil {
return fmt.Errorf("invalid settings file: %w", err)
}
// Avoid logging sensitive fields (e.g., credentials).
redacted := settings
redacted.Settings.VcenterPassword = "REDACTED"
if redacted.Settings.EncryptionKey != "" {
redacted.Settings.EncryptionKey = "REDACTED"
}
if redacted.Settings.AuthJWTSigningKey != "" {
redacted.Settings.AuthJWTSigningKey = "REDACTED"
}
if redacted.Settings.DatabaseURL != "" {
redacted.Settings.DatabaseURL = redactDatabaseURL(redacted.Settings.DatabaseURL)
}
s.Logger.Debug("Updating settings", "settings", redacted)
s.Values = &settings
return nil
}
func redactDatabaseURL(databaseURL string) string {
if strings.TrimSpace(databaseURL) == "" {
return databaseURL
}
redacted := postgresURIUserInfoPasswordPattern.ReplaceAllString(databaseURL, `${1}REDACTED${3}`)
redacted = postgresKVPasswordPattern.ReplaceAllString(redacted, `${1}REDACTED`)
return redacted
}
func (s *Settings) WriteYMLSettings() error {
if s.Values == nil {
return errors.New("settings are not loaded")
@@ -112,9 +194,9 @@ func (s *Settings) WriteYMLSettings() error {
return fmt.Errorf("unable to encode settings file: %w", err)
}
mode := os.FileMode(0o644)
mode := os.FileMode(0o600)
if info, err := os.Stat(s.SettingsPath); err == nil {
mode = info.Mode().Perm()
mode = secureSettingsFileMode(info.Mode().Perm())
}
dir := filepath.Dir(s.SettingsPath)
@@ -144,3 +226,175 @@ func (s *Settings) WriteYMLSettings() error {
return nil
}
func secureSettingsFileMode(mode os.FileMode) os.FileMode {
// Ensure owner read/write, strip world permissions and all execute bits.
secured := mode & 0o660
secured |= 0o600
return secured
}
func applyDefaultsAndValidateSettings(cfg *SettingsYML) error {
if cfg == nil {
return errors.New("settings config is nil")
}
s := &cfg.Settings
s.AuthMode = strings.ToLower(strings.TrimSpace(s.AuthMode))
if s.AuthMode == "" {
s.AuthMode = authModeDisabled
}
if s.AuthTokenLifespanMinutes == 0 {
s.AuthTokenLifespanMinutes = defaultAuthTokenLifespanMinutes
}
s.AuthJWTIssuer = strings.TrimSpace(s.AuthJWTIssuer)
if s.AuthJWTIssuer == "" {
s.AuthJWTIssuer = defaultAuthJWTIssuer
}
s.AuthJWTAudience = strings.TrimSpace(s.AuthJWTAudience)
if s.AuthJWTAudience == "" {
s.AuthJWTAudience = defaultAuthJWTAudience
}
if s.AuthClockSkewSeconds == 0 {
s.AuthClockSkewSeconds = defaultAuthClockSkewSeconds
}
if s.CaptureWriteBatchSize <= 0 {
s.CaptureWriteBatchSize = 1000
}
if s.SnapshotTableCompatMode == nil {
v := true
s.SnapshotTableCompatMode = &v
}
if s.AsyncReportGeneration == nil {
v := true
s.AsyncReportGeneration = &v
}
if s.PostgresVmHourlyPartitioning == nil {
v := false
s.PostgresVmHourlyPartitioning = &v
}
s.ScheduledAggregationEngine = strings.ToLower(strings.TrimSpace(s.ScheduledAggregationEngine))
if s.ScheduledAggregationEngine == "" {
s.ScheduledAggregationEngine = scheduledAggregationEngineGo
}
s.MonthlyAggregationGranularity = strings.ToLower(strings.TrimSpace(s.MonthlyAggregationGranularity))
if s.MonthlyAggregationGranularity == "" {
s.MonthlyAggregationGranularity = "daily"
}
s.AuthJWTSigningKey = strings.TrimSpace(s.AuthJWTSigningKey)
s.LDAPBindAddress = strings.TrimSpace(s.LDAPBindAddress)
s.LDAPBaseDN = strings.TrimSpace(s.LDAPBaseDN)
s.LDAPTrustCertFile = strings.TrimSpace(s.LDAPTrustCertFile)
s.LDAPGroups = compactTrimmedStrings(s.LDAPGroups)
if !isValidAuthMode(s.AuthMode) {
return fmt.Errorf("settings.auth_mode must be one of %q, %q, %q", authModeDisabled, authModeOptional, authModeRequired)
}
if s.AuthTokenLifespanMinutes <= 0 {
return errors.New("settings.auth_token_lifespan_minutes must be greater than 0")
}
if s.AuthClockSkewSeconds < 0 {
return errors.New("settings.auth_clock_skew_seconds must be >= 0")
}
switch s.ScheduledAggregationEngine {
case scheduledAggregationEngineGo, scheduledAggregationEngineSQL:
default:
return fmt.Errorf("settings.scheduled_aggregation_engine must be %q or %q", scheduledAggregationEngineGo, scheduledAggregationEngineSQL)
}
if len(s.AuthGroupRoleMappings) > 0 {
normalized := make(map[string]string, len(s.AuthGroupRoleMappings))
for groupDN, role := range s.AuthGroupRoleMappings {
groupDN = strings.TrimSpace(groupDN)
role = strings.ToLower(strings.TrimSpace(role))
if groupDN == "" {
return errors.New("settings.auth_group_role_mappings contains an empty group DN key")
}
if !isValidAuthRole(role) {
return fmt.Errorf("settings.auth_group_role_mappings[%q] has unsupported role %q", groupDN, role)
}
normalized[groupDN] = role
}
s.AuthGroupRoleMappings = normalized
}
if !s.AuthEnabled {
return nil
}
if s.AuthMode == authModeDisabled {
return errors.New("settings.auth_mode must be optional or required when settings.auth_enabled=true")
}
if s.AuthJWTSigningKey == "" {
return errors.New("settings.auth_jwt_signing_key is required when settings.auth_enabled=true")
}
decodedKey, err := decodeBase64(s.AuthJWTSigningKey)
if err != nil {
return errors.New("settings.auth_jwt_signing_key must be valid base64")
}
if len(decodedKey) == 0 {
return errors.New("settings.auth_jwt_signing_key cannot decode to an empty value")
}
if s.LDAPBindAddress == "" {
return errors.New("settings.ldap_bind_address is required when settings.auth_enabled=true")
}
if s.LDAPBaseDN == "" {
return errors.New("settings.ldap_base_dn is required when settings.auth_enabled=true")
}
if len(s.AuthGroupRoleMappings) == 0 {
return errors.New("settings.auth_group_role_mappings must define at least one mapping when settings.auth_enabled=true")
}
return nil
}
func isValidAuthMode(mode string) bool {
switch mode {
case authModeDisabled, authModeOptional, authModeRequired:
return true
default:
return false
}
}
func isValidAuthRole(role string) bool {
switch role {
case authRoleAdmin, authRoleViewer:
return true
default:
return false
}
}
func decodeBase64(value string) ([]byte, error) {
encodings := []*base64.Encoding{
base64.StdEncoding,
base64.RawStdEncoding,
base64.URLEncoding,
base64.RawURLEncoding,
}
for _, encoding := range encodings {
decoded, err := encoding.DecodeString(value)
if err == nil {
return decoded, nil
}
}
return nil, errors.New("invalid base64 encoding")
}
func compactTrimmedStrings(values []string) []string {
if len(values) == 0 {
return nil
}
out := make([]string, 0, len(values))
for _, value := range values {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
continue
}
out = append(out, trimmed)
}
if len(out) == 0 {
return nil
}
return out
}
@@ -0,0 +1,62 @@
package settings
import (
"bytes"
"log/slog"
"os"
"path/filepath"
"strings"
"testing"
)
func TestRedactDatabaseURL_PostgresURI(t *testing.T) {
input := "postgres://vctp_user:Secr3tP%40ss@db-host:5432/vctp?sslmode=disable"
got := redactDatabaseURL(input)
want := "postgres://vctp_user:REDACTED@db-host:5432/vctp?sslmode=disable"
if got != want {
t.Fatalf("unexpected redaction result\nwant: %s\ngot: %s", want, got)
}
}
func TestRedactDatabaseURL_PostgresKeyValue(t *testing.T) {
input := "host=db-host port=5432 dbname=vctp user=vctp_user password='P@ss:w0rd#%' sslmode=disable"
got := redactDatabaseURL(input)
want := "host=db-host port=5432 dbname=vctp user=vctp_user password=REDACTED sslmode=disable"
if got != want {
t.Fatalf("unexpected redaction result\nwant: %s\ngot: %s", want, got)
}
}
func TestRedactDatabaseURL_UnchangedWhenNoPassword(t *testing.T) {
input := "host=db-host port=5432 dbname=vctp user=vctp_user sslmode=disable"
got := redactDatabaseURL(input)
if got != input {
t.Fatalf("expected input to remain unchanged\nwant: %s\ngot: %s", input, got)
}
}
func TestReadYMLSettingsRedactsAuthJWTSigningKey(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
auth_jwt_signing_key: "c2VjcmV0"
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
var output bytes.Buffer
logger := slog.New(slog.NewTextHandler(&output, &slog.HandlerOptions{Level: slog.LevelDebug}))
s := New(logger, settingsPath)
if err := s.ReadYMLSettings(); err != nil {
t.Fatalf("expected settings to load, got error: %v", err)
}
logged := output.String()
if strings.Contains(logged, "c2VjcmV0") {
t.Fatalf("expected auth_jwt_signing_key to be redacted in logs, got log output: %s", logged)
}
if !strings.Contains(logged, "REDACTED") {
t.Fatalf("expected redacted marker in logs, got log output: %s", logged)
}
}
+222
View File
@@ -0,0 +1,222 @@
package settings
import (
"io"
"log/slog"
"os"
"path/filepath"
"strings"
"testing"
)
func TestReadYMLSettingsRejectsUnknownField(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
log_level: "info"
unknown_field: true
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
s := New(logger, settingsPath)
err := s.ReadYMLSettings()
if err == nil {
t.Fatal("expected unknown field decode error")
}
if !strings.Contains(strings.ToLower(err.Error()), "unknown_field") {
t.Fatalf("expected error to mention unknown field, got: %v", err)
}
}
func TestReadYMLSettingsAppliesAuthDefaults(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
log_level: "info"
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
s := New(logger, settingsPath)
if err := s.ReadYMLSettings(); err != nil {
t.Fatalf("expected settings to load, got error: %v", err)
}
got := s.Values.Settings
if got.AuthMode != authModeDisabled {
t.Fatalf("expected default auth_mode=%q, got %q", authModeDisabled, got.AuthMode)
}
if got.AuthTokenLifespanMinutes != defaultAuthTokenLifespanMinutes {
t.Fatalf("expected default auth_token_lifespan_minutes=%d, got %d", defaultAuthTokenLifespanMinutes, got.AuthTokenLifespanMinutes)
}
if got.AuthJWTIssuer != defaultAuthJWTIssuer {
t.Fatalf("expected default auth_jwt_issuer=%q, got %q", defaultAuthJWTIssuer, got.AuthJWTIssuer)
}
if got.AuthJWTAudience != defaultAuthJWTAudience {
t.Fatalf("expected default auth_jwt_audience=%q, got %q", defaultAuthJWTAudience, got.AuthJWTAudience)
}
if got.AuthClockSkewSeconds != defaultAuthClockSkewSeconds {
t.Fatalf("expected default auth_clock_skew_seconds=%d, got %d", defaultAuthClockSkewSeconds, got.AuthClockSkewSeconds)
}
if got.CaptureWriteBatchSize != 1000 {
t.Fatalf("expected default capture_write_batch_size=1000, got %d", got.CaptureWriteBatchSize)
}
if got.SnapshotTableCompatMode == nil || !*got.SnapshotTableCompatMode {
t.Fatalf("expected default snapshot_table_compat_mode=true, got %#v", got.SnapshotTableCompatMode)
}
if got.AsyncReportGeneration == nil || !*got.AsyncReportGeneration {
t.Fatalf("expected default async_report_generation=true, got %#v", got.AsyncReportGeneration)
}
if got.PostgresVmHourlyPartitioning == nil || *got.PostgresVmHourlyPartitioning {
t.Fatalf("expected default postgres_vm_hourly_partitioning_enabled=false, got %#v", got.PostgresVmHourlyPartitioning)
}
if got.ScheduledAggregationEngine != scheduledAggregationEngineGo {
t.Fatalf("expected default scheduled_aggregation_engine=%q, got %q", scheduledAggregationEngineGo, got.ScheduledAggregationEngine)
}
if got.MonthlyAggregationGranularity != "daily" {
t.Fatalf("expected default monthly_aggregation_granularity=daily, got %q", got.MonthlyAggregationGranularity)
}
}
func TestReadYMLSettingsRejectsInvalidScheduledAggregationEngine(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
scheduled_aggregation_engine: "hybrid"
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
s := New(logger, settingsPath)
err := s.ReadYMLSettings()
if err == nil {
t.Fatal("expected invalid scheduled_aggregation_engine to fail")
}
if !strings.Contains(strings.ToLower(err.Error()), "scheduled_aggregation_engine") {
t.Fatalf("expected error to mention scheduled_aggregation_engine, got: %v", err)
}
}
func TestReadYMLSettingsRejectsInvalidAuthMode(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
auth_mode: "sometimes"
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
s := New(logger, settingsPath)
err := s.ReadYMLSettings()
if err == nil {
t.Fatal("expected invalid auth_mode to fail")
}
if !strings.Contains(strings.ToLower(err.Error()), "auth_mode") {
t.Fatalf("expected error to mention auth_mode, got: %v", err)
}
}
func TestReadYMLSettingsRejectsAuthEnabledWithoutSigningKey(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
auth_enabled: true
auth_mode: "required"
ldap_bind_address: "ldaps://ldap.example.com:636"
ldap_base_dn: "dc=example,dc=com"
auth_group_role_mappings:
"cn=vctp-admin,ou=groups,dc=example,dc=com": "admin"
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
s := New(logger, settingsPath)
err := s.ReadYMLSettings()
if err == nil {
t.Fatal("expected auth_enabled=true without signing key to fail")
}
if !strings.Contains(strings.ToLower(err.Error()), "auth_jwt_signing_key") {
t.Fatalf("expected error to mention auth_jwt_signing_key, got: %v", err)
}
}
func TestReadYMLSettingsAcceptsValidAuthConfigAndNormalizesMappings(t *testing.T) {
tmpDir := t.TempDir()
settingsPath := filepath.Join(tmpDir, "vctp.yml")
content := `settings:
auth_enabled: true
auth_mode: "REQUIRED"
auth_jwt_signing_key: "c2VjcmV0"
auth_token_lifespan_minutes: 90
auth_jwt_issuer: " custom-issuer "
auth_jwt_audience: " custom-audience "
auth_clock_skew_seconds: 15
ldap_bind_address: "ldaps://ldap.example.com:636"
ldap_base_dn: "dc=example,dc=com"
ldap_groups:
- " cn=vctp-viewers,ou=groups,dc=example,dc=com "
auth_group_role_mappings:
" cn=vctp-admins,ou=groups,dc=example,dc=com ": " ADMIN "
"cn=vctp-viewers,ou=groups,dc=example,dc=com": "viewer"
`
if err := os.WriteFile(settingsPath, []byte(content), 0o600); err != nil {
t.Fatalf("failed to write settings file: %v", err)
}
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
s := New(logger, settingsPath)
if err := s.ReadYMLSettings(); err != nil {
t.Fatalf("expected valid auth config, got error: %v", err)
}
got := s.Values.Settings
if got.AuthMode != authModeRequired {
t.Fatalf("expected normalized auth_mode=%q, got %q", authModeRequired, got.AuthMode)
}
if got.AuthJWTIssuer != "custom-issuer" {
t.Fatalf("expected trimmed auth_jwt_issuer, got %q", got.AuthJWTIssuer)
}
if got.AuthJWTAudience != "custom-audience" {
t.Fatalf("expected trimmed auth_jwt_audience, got %q", got.AuthJWTAudience)
}
if len(got.LDAPGroups) != 1 || got.LDAPGroups[0] != "cn=vctp-viewers,ou=groups,dc=example,dc=com" {
t.Fatalf("expected ldap_groups to be compacted+trimmed, got %#v", got.LDAPGroups)
}
if got.AuthGroupRoleMappings["cn=vctp-admins,ou=groups,dc=example,dc=com"] != authRoleAdmin {
t.Fatalf("expected admin mapping to normalize role to %q, got %#v", authRoleAdmin, got.AuthGroupRoleMappings)
}
}
func TestSecureSettingsFileMode(t *testing.T) {
cases := []struct {
name string
in os.FileMode
want os.FileMode
}{
{name: "already strict", in: 0o600, want: 0o600},
{name: "group read allowed", in: 0o640, want: 0o640},
{name: "too open world", in: 0o666, want: 0o660},
{name: "exec bits stripped", in: 0o755, want: 0o640},
{name: "no perms gets owner rw", in: 0o000, want: 0o600},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
got := secureSettingsFileMode(tc.in)
if got != tc.want {
t.Fatalf("unexpected mode conversion: in=%#o got=%#o want=%#o", tc.in, got, tc.want)
}
})
}
}
+369
View File
@@ -0,0 +1,369 @@
package tasks
import (
"context"
"database/sql"
"fmt"
"slices"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
)
type AggregationBenchmarkStats struct {
Runs int
Min time.Duration
Median time.Duration
Avg time.Duration
Max time.Duration
}
type AggregationBenchmarkReport struct {
Runs int
DailyWindowStart time.Time
DailyWindowEnd time.Time
DailyGo AggregationBenchmarkStats
DailySQL AggregationBenchmarkStats
DailyGoRowsWritten int64
DailySQLRowsWritten int64
MonthlyWindowStart time.Time
MonthlyWindowEnd time.Time
MonthlyGo AggregationBenchmarkStats
MonthlySQL AggregationBenchmarkStats
MonthlyGoRowsWritten int64
MonthlySQLRowsWritten int64
}
// RunCanonicalAggregationBenchmark compares Go and SQL aggregation cores on canonical cache tables.
func (c *CronTask) RunCanonicalAggregationBenchmark(ctx context.Context, runs int) (AggregationBenchmarkReport, error) {
if runs <= 0 {
runs = 3
}
report := AggregationBenchmarkReport{Runs: runs}
dbConn := c.Database.DB()
logger := loggerFromCtx(ctx, c.Logger)
hourlyStart, hourlyEnd, err := latestDailyWindowFromHourlyCache(ctx, dbConn)
if err != nil {
return report, err
}
if !hourlyStart.IsZero() {
if logger != nil {
logger.Info("canonical benchmark phase starting", "phase", "daily", "window_start", hourlyStart.Format(time.RFC3339), "window_end", hourlyEnd.Format(time.RFC3339), "runs", runs)
}
report.DailyWindowStart = hourlyStart
report.DailyWindowEnd = hourlyEnd
goDurations := make([]time.Duration, 0, runs)
sqlDurations := make([]time.Duration, 0, runs)
var goRows, sqlRows int64
for i := 0; i < runs; i++ {
run := i + 1
if logger != nil {
logger.Info("canonical benchmark run starting", "phase", "daily", "mode", "go", "run", run, "runs", runs)
}
dur, rows, runErr := c.benchmarkDailyGoCore(ctx, hourlyStart, hourlyEnd)
if runErr != nil {
return report, fmt.Errorf("daily go benchmark run %d failed: %w", i+1, runErr)
}
if logger != nil {
logger.Info("canonical benchmark run complete", "phase", "daily", "mode", "go", "run", run, "runs", runs, "duration", dur, "rows", rows)
}
goDurations = append(goDurations, dur)
goRows = rows
if logger != nil {
logger.Info("canonical benchmark run starting", "phase", "daily", "mode", "sql", "run", run, "runs", runs)
}
dur, rows, runErr = c.benchmarkDailySQLCore(ctx, hourlyStart, hourlyEnd)
if runErr != nil {
return report, fmt.Errorf("daily sql benchmark run %d failed: %w", i+1, runErr)
}
if logger != nil {
logger.Info("canonical benchmark run complete", "phase", "daily", "mode", "sql", "run", run, "runs", runs, "duration", dur, "rows", rows)
}
sqlDurations = append(sqlDurations, dur)
sqlRows = rows
}
report.DailyGo = summarizeDurations(goDurations)
report.DailySQL = summarizeDurations(sqlDurations)
report.DailyGoRowsWritten = goRows
report.DailySQLRowsWritten = sqlRows
if logger != nil {
logger.Info("canonical benchmark phase complete", "phase", "daily", "runs", runs)
}
} else if logger != nil {
logger.Info("canonical benchmark phase skipped", "phase", "daily", "reason", "no benchmarkable window found in vm_hourly_stats")
}
monthlyStart, monthlyEnd, err := latestMonthlyWindowFromDailyRollup(ctx, dbConn)
if err != nil {
return report, err
}
if !monthlyStart.IsZero() {
if logger != nil {
logger.Info("canonical benchmark phase starting", "phase", "monthly", "window_start", monthlyStart.Format(time.RFC3339), "window_end", monthlyEnd.Format(time.RFC3339), "runs", runs)
}
report.MonthlyWindowStart = monthlyStart
report.MonthlyWindowEnd = monthlyEnd
goDurations := make([]time.Duration, 0, runs)
sqlDurations := make([]time.Duration, 0, runs)
var goRows, sqlRows int64
for i := 0; i < runs; i++ {
run := i + 1
if logger != nil {
logger.Info("canonical benchmark run starting", "phase", "monthly", "mode", "go", "run", run, "runs", runs)
}
dur, rows, runErr := c.benchmarkMonthlyGoCore(ctx, monthlyStart, monthlyEnd)
if runErr != nil {
return report, fmt.Errorf("monthly go benchmark run %d failed: %w", i+1, runErr)
}
if logger != nil {
logger.Info("canonical benchmark run complete", "phase", "monthly", "mode", "go", "run", run, "runs", runs, "duration", dur, "rows", rows)
}
goDurations = append(goDurations, dur)
goRows = rows
if logger != nil {
logger.Info("canonical benchmark run starting", "phase", "monthly", "mode", "sql", "run", run, "runs", runs)
}
dur, rows, runErr = c.benchmarkMonthlySQLCore(ctx, monthlyStart, monthlyEnd)
if runErr != nil {
return report, fmt.Errorf("monthly sql benchmark run %d failed: %w", i+1, runErr)
}
if logger != nil {
logger.Info("canonical benchmark run complete", "phase", "monthly", "mode", "sql", "run", run, "runs", runs, "duration", dur, "rows", rows)
}
sqlDurations = append(sqlDurations, dur)
sqlRows = rows
}
report.MonthlyGo = summarizeDurations(goDurations)
report.MonthlySQL = summarizeDurations(sqlDurations)
report.MonthlyGoRowsWritten = goRows
report.MonthlySQLRowsWritten = sqlRows
if logger != nil {
logger.Info("canonical benchmark phase complete", "phase", "monthly", "runs", runs)
}
} else if logger != nil {
logger.Info("canonical benchmark phase skipped", "phase", "monthly", "reason", "no benchmarkable window found in vm_daily_rollup")
}
if report.DailyWindowStart.IsZero() && report.MonthlyWindowStart.IsZero() {
return report, fmt.Errorf("no benchmarkable canonical windows found (vm_hourly_stats/vm_daily_rollup are empty)")
}
return report, nil
}
func (c *CronTask) benchmarkDailyGoCore(ctx context.Context, dayStart, dayEnd time.Time) (time.Duration, int64, error) {
tableName, err := benchmarkSummaryTableName("benchmark_daily_go")
if err != nil {
return 0, 0, err
}
dbConn := c.Database.DB()
if err := db.EnsureSummaryTable(ctx, dbConn, tableName); err != nil {
return 0, 0, err
}
defer dropSnapshotTable(ctx, dbConn, tableName)
started := time.Now()
aggMap, snapTimes, err := c.scanHourlyCache(ctx, dayStart, dayEnd)
if err != nil {
return 0, 0, err
}
if len(aggMap) == 0 || len(snapTimes) == 0 {
return 0, 0, fmt.Errorf("no daily rows found in canonical hourly cache")
}
totalSamplesByVcenter := sampleCountsByVcenter(aggMap)
if err := c.insertDailyAggregates(ctx, tableName, aggMap, len(snapTimes), totalSamplesByVcenter); err != nil {
return 0, 0, err
}
elapsed := time.Since(started)
rows, err := db.TableRowCount(ctx, dbConn, tableName)
if err != nil {
return 0, 0, err
}
return elapsed, rows, nil
}
func (c *CronTask) benchmarkDailySQLCore(ctx context.Context, dayStart, dayEnd time.Time) (time.Duration, int64, error) {
tableName, err := benchmarkSummaryTableName("benchmark_daily_sql")
if err != nil {
return 0, 0, err
}
dbConn := c.Database.DB()
if err := db.EnsureSummaryTable(ctx, dbConn, tableName); err != nil {
return 0, 0, err
}
defer dropSnapshotTable(ctx, dbConn, tableName)
insertQuery, err := db.BuildDailySummaryInsert(tableName, buildCanonicalHourlySummaryUnion(dayStart, dayEnd))
if err != nil {
return 0, 0, err
}
started := time.Now()
if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil {
return 0, 0, err
}
elapsed := time.Since(started)
rows, err := db.TableRowCount(ctx, dbConn, tableName)
if err != nil {
return 0, 0, err
}
return elapsed, rows, nil
}
func (c *CronTask) benchmarkMonthlyGoCore(ctx context.Context, monthStart, monthEnd time.Time) (time.Duration, int64, error) {
tableName, err := benchmarkSummaryTableName("benchmark_monthly_go")
if err != nil {
return 0, 0, err
}
dbConn := c.Database.DB()
if err := db.EnsureSummaryTable(ctx, dbConn, tableName); err != nil {
return 0, 0, err
}
defer dropSnapshotTable(ctx, dbConn, tableName)
started := time.Now()
aggMap, err := c.scanDailyRollup(ctx, monthStart, monthEnd)
if err != nil {
return 0, 0, err
}
if len(aggMap) == 0 {
return 0, 0, fmt.Errorf("no monthly rows found in canonical daily rollup")
}
if err := c.insertMonthlyAggregates(ctx, tableName, aggMap); err != nil {
return 0, 0, err
}
elapsed := time.Since(started)
rows, err := db.TableRowCount(ctx, dbConn, tableName)
if err != nil {
return 0, 0, err
}
return elapsed, rows, nil
}
func (c *CronTask) benchmarkMonthlySQLCore(ctx context.Context, monthStart, monthEnd time.Time) (time.Duration, int64, error) {
tableName, err := benchmarkSummaryTableName("benchmark_monthly_sql")
if err != nil {
return 0, 0, err
}
dbConn := c.Database.DB()
if err := db.EnsureSummaryTable(ctx, dbConn, tableName); err != nil {
return 0, 0, err
}
defer dropSnapshotTable(ctx, dbConn, tableName)
insertQuery, err := db.BuildMonthlySummaryInsert(tableName, buildCanonicalDailyRollupSummaryUnion(monthStart, monthEnd))
if err != nil {
return 0, 0, err
}
started := time.Now()
if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil {
return 0, 0, err
}
elapsed := time.Since(started)
rows, err := db.TableRowCount(ctx, dbConn, tableName)
if err != nil {
return 0, 0, err
}
return elapsed, rows, nil
}
func benchmarkSummaryTableName(prefix string) (string, error) {
return db.SafeTableName(fmt.Sprintf("%s_%d", prefix, time.Now().UTC().UnixNano()))
}
func latestDailyWindowFromHourlyCache(ctx context.Context, dbConn *sqlx.DB) (time.Time, time.Time, error) {
if !db.TableExists(ctx, dbConn, "vm_hourly_stats") {
return time.Time{}, time.Time{}, nil
}
query := dbConn.Rebind(`
SELECT MAX("SnapshotTime")
FROM vm_hourly_stats
WHERE "SnapshotTime" > ?
`)
var maxSnapshot sql.NullInt64
if err := dbConn.GetContext(ctx, &maxSnapshot, query, 0); err != nil {
return time.Time{}, time.Time{}, err
}
if !maxSnapshot.Valid || maxSnapshot.Int64 <= 0 {
return time.Time{}, time.Time{}, nil
}
dayStart := time.Unix(maxSnapshot.Int64, 0).UTC()
dayStart = time.Date(dayStart.Year(), dayStart.Month(), dayStart.Day(), 0, 0, 0, 0, time.UTC)
dayEnd := dayStart.AddDate(0, 0, 1)
countQuery := dbConn.Rebind(`
SELECT COUNT(1)
FROM vm_hourly_stats
WHERE "SnapshotTime" >= ? AND "SnapshotTime" < ?
`)
var count int64
if err := dbConn.GetContext(ctx, &count, countQuery, dayStart.Unix(), dayEnd.Unix()); err != nil {
return time.Time{}, time.Time{}, err
}
if count == 0 {
return time.Time{}, time.Time{}, nil
}
return dayStart, dayEnd, nil
}
func latestMonthlyWindowFromDailyRollup(ctx context.Context, dbConn *sqlx.DB) (time.Time, time.Time, error) {
if !db.TableExists(ctx, dbConn, "vm_daily_rollup") {
return time.Time{}, time.Time{}, nil
}
query := dbConn.Rebind(`
SELECT MAX("Date")
FROM vm_daily_rollup
WHERE "Date" > ?
`)
var maxDate sql.NullInt64
if err := dbConn.GetContext(ctx, &maxDate, query, 0); err != nil {
return time.Time{}, time.Time{}, err
}
if !maxDate.Valid || maxDate.Int64 <= 0 {
return time.Time{}, time.Time{}, nil
}
monthStart := time.Unix(maxDate.Int64, 0).UTC()
monthStart = time.Date(monthStart.Year(), monthStart.Month(), 1, 0, 0, 0, 0, time.UTC)
monthEnd := monthStart.AddDate(0, 1, 0)
countQuery := dbConn.Rebind(`
SELECT COUNT(1)
FROM vm_daily_rollup
WHERE "Date" >= ? AND "Date" < ?
`)
var count int64
if err := dbConn.GetContext(ctx, &count, countQuery, monthStart.Unix(), monthEnd.Unix()); err != nil {
return time.Time{}, time.Time{}, err
}
if count == 0 {
return time.Time{}, time.Time{}, nil
}
return monthStart, monthEnd, nil
}
func summarizeDurations(values []time.Duration) AggregationBenchmarkStats {
if len(values) == 0 {
return AggregationBenchmarkStats{}
}
sorted := append([]time.Duration(nil), values...)
slices.Sort(sorted)
total := time.Duration(0)
for _, v := range sorted {
total += v
}
median := sorted[len(sorted)/2]
if len(sorted)%2 == 0 {
median = (sorted[(len(sorted)/2)-1] + sorted[len(sorted)/2]) / 2
}
return AggregationBenchmarkStats{
Runs: len(sorted),
Min: sorted[0],
Median: median,
Avg: total / time.Duration(len(sorted)),
Max: sorted[len(sorted)-1],
}
}
+589
View File
@@ -0,0 +1,589 @@
package tasks
import (
"context"
"fmt"
"io"
"log/slog"
"math"
"testing"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
)
type tasksTestDatabase struct {
dbConn *sqlx.DB
logger *slog.Logger
}
func (d *tasksTestDatabase) DB() *sqlx.DB { return d.dbConn }
func (d *tasksTestDatabase) Queries() db.Querier { return nil }
func (d *tasksTestDatabase) Logger() *slog.Logger {
if d.logger != nil {
return d.logger
}
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func (d *tasksTestDatabase) Close() error { return d.dbConn.Close() }
type dailySummaryRow struct {
Name string `db:"Name"`
Vcenter string `db:"Vcenter"`
VmId string `db:"VmId"`
VmUuid string `db:"VmUuid"`
ResourcePool string `db:"ResourcePool"`
CreationTime int64 `db:"CreationTime"`
DeletionTime int64 `db:"DeletionTime"`
SnapshotTime int64 `db:"SnapshotTime"`
SamplesPresent int64 `db:"SamplesPresent"`
AvgVcpuCount float64 `db:"AvgVcpuCount"`
AvgRamGB float64 `db:"AvgRamGB"`
AvgProvisionedDisk float64 `db:"AvgProvisionedDisk"`
AvgIsPresent float64 `db:"AvgIsPresent"`
PoolTinPct float64 `db:"PoolTinPct"`
PoolBronzePct float64 `db:"PoolBronzePct"`
PoolSilverPct float64 `db:"PoolSilverPct"`
PoolGoldPct float64 `db:"PoolGoldPct"`
}
type monthlySummaryRow struct {
Name string `db:"Name"`
Vcenter string `db:"Vcenter"`
VmId string `db:"VmId"`
VmUuid string `db:"VmUuid"`
ResourcePool string `db:"ResourcePool"`
CreationTime int64 `db:"CreationTime"`
DeletionTime int64 `db:"DeletionTime"`
SamplesPresent int64 `db:"SamplesPresent"`
AvgVcpuCount float64 `db:"AvgVcpuCount"`
AvgRamGB float64 `db:"AvgRamGB"`
AvgProvisionedDisk float64 `db:"AvgProvisionedDisk"`
AvgIsPresent float64 `db:"AvgIsPresent"`
PoolTinPct float64 `db:"PoolTinPct"`
PoolBronzePct float64 `db:"PoolBronzePct"`
PoolSilverPct float64 `db:"PoolSilverPct"`
PoolGoldPct float64 `db:"PoolGoldPct"`
}
type hourlySeedRow struct {
SnapshotTime int64
Name string
Vcenter string
VmID string
VmUUID string
ResourcePool string
Datacenter string
Cluster string
Folder string
ProvisionedDisk float64
VcpuCount int64
RamGB int64
CreationTime int64
DeletionTime int64
IsTemplate string
PoweredOn string
SrmPlaceholder string
}
type dailySeedRow struct {
SnapshotTime int64
Name string
Vcenter string
VmID string
VmUUID string
ResourcePool string
Datacenter string
Cluster string
Folder string
ProvisionedDisk float64
VcpuCount int64
RamGB int64
CreationTime int64
DeletionTime int64
IsTemplate string
PoweredOn string
SrmPlaceholder string
SamplesPresent int64
AvgVcpuCount float64
AvgRamGB float64
AvgProvisionedDisk float64
AvgIsPresent float64
PoolTinPct float64
PoolBronzePct float64
PoolSilverPct float64
PoolGoldPct float64
Tin float64
Bronze float64
Silver float64
Gold float64
TotalSamples int64
SumVcpu int64
SumRam int64
SumDisk float64
TinHits int64
BronzeHits int64
SilverHits int64
GoldHits int64
}
func TestDailyGoldenParity_SQLUnionVsGoCanonical(t *testing.T) {
ctx := context.Background()
dbConn := newTasksTestDB(t)
task := newTasksTestCronTask(dbConn)
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_hourly_stats: %v", err)
}
dayStart := time.Date(2026, time.January, 15, 0, 0, 0, 0, time.UTC)
dayEnd := dayStart.AddDate(0, 0, 1)
t1 := dayStart.Add(1 * time.Hour).Unix()
t2 := dayStart.Add(2 * time.Hour).Unix()
t3 := dayStart.Add(3 * time.Hour).Unix()
rows := []hourlySeedRow{
{SnapshotTime: t1, Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1", ResourcePool: "Tin", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", ProvisionedDisk: 100, VcpuCount: 2, RamGB: 8, CreationTime: 0, IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"},
{SnapshotTime: t3, Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1", ResourcePool: "Gold", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", ProvisionedDisk: 120, VcpuCount: 4, RamGB: 16, CreationTime: dayStart.Add(30 * time.Minute).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"},
{SnapshotTime: t2, Name: "vm-bravo", Vcenter: "vc-a", VmID: "vm-2", VmUUID: "uuid-2", ResourcePool: "Bronze", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod", ProvisionedDisk: 30, VcpuCount: 1, RamGB: 2, CreationTime: dayStart.Add(-2 * time.Hour).Unix(), DeletionTime: dayStart.Add(4 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"},
{SnapshotTime: t1, Name: "vm-charlie", Vcenter: "vc-a", VmID: "vm-3", VmUUID: "uuid-3", ResourcePool: "Silver", Datacenter: "dc-1", Cluster: "cluster-2", Folder: "/prod2", ProvisionedDisk: 50, VcpuCount: 2, RamGB: 4, CreationTime: dayStart.Add(-5 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"},
{SnapshotTime: t3, Name: "vm-charlie", Vcenter: "vc-a", VmID: "vm-3", VmUUID: "uuid-3", ResourcePool: "Silver", Datacenter: "dc-1", Cluster: "cluster-2", Folder: "/prod2", ProvisionedDisk: 50, VcpuCount: 2, RamGB: 4, CreationTime: dayStart.Add(-5 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE"},
{SnapshotTime: t3, Name: "vm-template", Vcenter: "vc-a", VmID: "vm-t", VmUUID: "uuid-t", ResourcePool: "Tin", Datacenter: "dc-1", Cluster: "cluster-3", Folder: "/templates", ProvisionedDisk: 500, VcpuCount: 16, RamGB: 64, CreationTime: dayStart.Add(-10 * time.Hour).Unix(), IsTemplate: "TRUE", PoweredOn: "FALSE", SrmPlaceholder: "FALSE"},
}
for _, row := range rows {
if err := insertHourlyCacheSeedRow(ctx, dbConn, row); err != nil {
t.Fatalf("failed to insert vm_hourly_stats row: %v", err)
}
}
hourlyTableTimes := []int64{t1, t2, t3}
hourlyTables := make([]string, 0, len(hourlyTableTimes))
for _, ts := range hourlyTableTimes {
tableName, err := hourlyInventoryTableName(time.Unix(ts, 0).UTC())
if err != nil {
t.Fatalf("failed to build hourly table name: %v", err)
}
hourlyTables = append(hourlyTables, tableName)
if err := db.EnsureSnapshotTable(ctx, dbConn, tableName); err != nil {
t.Fatalf("failed to ensure snapshot table %s: %v", tableName, err)
}
}
for _, row := range rows {
tableName, err := hourlyInventoryTableName(time.Unix(row.SnapshotTime, 0).UTC())
if err != nil {
t.Fatalf("failed to build per-row hourly table name: %v", err)
}
if err := insertHourlySnapshotSeedRow(ctx, dbConn, tableName, row); err != nil {
t.Fatalf("failed to insert snapshot row for table %s: %v", tableName, err)
}
}
oldSummaryTable, err := db.SafeTableName("test_daily_sql_union_summary")
if err != nil {
t.Fatalf("failed to build old summary table name: %v", err)
}
newSummaryTable, err := db.SafeTableName("test_daily_go_cache_summary")
if err != nil {
t.Fatalf("failed to build new summary table name: %v", err)
}
if err := db.EnsureSummaryTable(ctx, dbConn, oldSummaryTable); err != nil {
t.Fatalf("failed to ensure old summary table: %v", err)
}
if err := db.EnsureSummaryTable(ctx, dbConn, newSummaryTable); err != nil {
t.Fatalf("failed to ensure new summary table: %v", err)
}
unionQuery, err := buildUnionQuery(hourlyTables, summaryUnionColumns, templateExclusionFilter())
if err != nil {
t.Fatalf("failed to build union query: %v", err)
}
insertSQL, err := db.BuildDailySummaryInsert(oldSummaryTable, unionQuery)
if err != nil {
t.Fatalf("failed to build daily sql insert: %v", err)
}
if _, err := dbConn.ExecContext(ctx, insertSQL); err != nil {
t.Fatalf("failed to execute daily sql insert: %v", err)
}
aggMap, snapTimes, err := task.scanHourlyCache(ctx, dayStart, dayEnd)
if err != nil {
t.Fatalf("scanHourlyCache failed: %v", err)
}
totalSamplesByVcenter := sampleCountsByVcenter(aggMap)
if err := task.insertDailyAggregates(ctx, newSummaryTable, aggMap, len(snapTimes), totalSamplesByVcenter); err != nil {
t.Fatalf("insertDailyAggregates failed: %v", err)
}
oldRows, err := loadDailySummaryRows(ctx, dbConn, oldSummaryTable)
if err != nil {
t.Fatalf("failed to load old daily rows: %v", err)
}
newRows, err := loadDailySummaryRows(ctx, dbConn, newSummaryTable)
if err != nil {
t.Fatalf("failed to load new daily rows: %v", err)
}
assertDailySummaryParity(t, oldRows, newRows)
byKey := mapRowsByKeyDaily(newRows)
alpha := byKey["vc-a|vm-1|uuid-1|vm-alpha"]
if !approxEqual(alpha.AvgIsPresent, 2.0/3.0, 1e-9) {
t.Fatalf("unexpected alpha AvgIsPresent: got %.12f want %.12f", alpha.AvgIsPresent, 2.0/3.0)
}
if alpha.CreationTime != dayStart.Add(30*time.Minute).Unix() {
t.Fatalf("unexpected alpha CreationTime: got %d want %d", alpha.CreationTime, dayStart.Add(30*time.Minute).Unix())
}
if alpha.ResourcePool != "Gold" {
t.Fatalf("unexpected alpha ResourcePool: got %q want %q", alpha.ResourcePool, "Gold")
}
if alpha.SnapshotTime != t3 {
t.Fatalf("unexpected alpha SnapshotTime: got %d want %d", alpha.SnapshotTime, t3)
}
if !approxEqual(alpha.PoolTinPct, 50.0, 1e-9) || !approxEqual(alpha.PoolGoldPct, 50.0, 1e-9) {
t.Fatalf("unexpected alpha pool mix: tin=%.6f gold=%.6f", alpha.PoolTinPct, alpha.PoolGoldPct)
}
bravo := byKey["vc-a|vm-2|uuid-2|vm-bravo"]
if bravo.DeletionTime != dayStart.Add(4*time.Hour).Unix() {
t.Fatalf("unexpected bravo DeletionTime: got %d want %d", bravo.DeletionTime, dayStart.Add(4*time.Hour).Unix())
}
if !approxEqual(bravo.AvgIsPresent, 1.0/3.0, 1e-9) {
t.Fatalf("unexpected bravo AvgIsPresent: got %.12f want %.12f", bravo.AvgIsPresent, 1.0/3.0)
}
}
func TestMonthlyGoldenParity_SQLDailyUnionVsGoDailyRollup(t *testing.T) {
ctx := context.Background()
dbConn := newTasksTestDB(t)
task := newTasksTestCronTask(dbConn)
if err := db.EnsureVmDailyRollup(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_daily_rollup: %v", err)
}
monthStart := time.Date(2026, time.February, 1, 0, 0, 0, 0, time.UTC)
monthEnd := monthStart.AddDate(0, 1, 0)
day1 := time.Date(2026, time.February, 3, 0, 0, 0, 0, time.UTC)
day2 := day1.AddDate(0, 0, 1)
day1Table, err := dailySummaryTableName(day1)
if err != nil {
t.Fatalf("failed to build day1 table name: %v", err)
}
day2Table, err := dailySummaryTableName(day2)
if err != nil {
t.Fatalf("failed to build day2 table name: %v", err)
}
for _, table := range []string{day1Table, day2Table} {
if err := db.EnsureSummaryTable(ctx, dbConn, table); err != nil {
t.Fatalf("failed to ensure daily summary table %s: %v", table, err)
}
}
seeds := []dailySeedRow{
{
SnapshotTime: day1.Unix(), Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1",
ResourcePool: "Bronze", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod",
ProvisionedDisk: 100, VcpuCount: 4, RamGB: 8, CreationTime: monthStart.Add(-24 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE",
SamplesPresent: 2, AvgVcpuCount: 3, AvgRamGB: 6, AvgProvisionedDisk: 90, AvgIsPresent: 1.0,
PoolBronzePct: 100, Bronze: 100,
TotalSamples: 2, SumVcpu: 6, SumRam: 12, SumDisk: 180, BronzeHits: 2,
},
{
SnapshotTime: day2.Unix(), Name: "vm-alpha", Vcenter: "vc-a", VmID: "vm-1", VmUUID: "uuid-1",
ResourcePool: "Tin", Datacenter: "dc-1", Cluster: "cluster-1", Folder: "/prod",
ProvisionedDisk: 110, VcpuCount: 2, RamGB: 8, CreationTime: monthStart.Add(-24 * time.Hour).Unix(), IsTemplate: "FALSE", PoweredOn: "TRUE", SrmPlaceholder: "FALSE",
SamplesPresent: 2, AvgVcpuCount: 2, AvgRamGB: 8, AvgProvisionedDisk: 110, AvgIsPresent: 1.0,
PoolTinPct: 100, Tin: 100,
TotalSamples: 2, SumVcpu: 4, SumRam: 16, SumDisk: 220, TinHits: 2,
},
}
for _, seed := range seeds {
targetTable := day1Table
if seed.SnapshotTime == day2.Unix() {
targetTable = day2Table
}
if err := insertDailySummarySeedRow(ctx, dbConn, targetTable, seed); err != nil {
t.Fatalf("failed to insert daily summary seed row: %v", err)
}
if err := insertDailyRollupSeedRow(ctx, dbConn, seed); err != nil {
t.Fatalf("failed to insert daily rollup seed row: %v", err)
}
}
oldMonthlyTable, err := db.SafeTableName("test_monthly_sql_union_summary")
if err != nil {
t.Fatalf("failed to build old monthly table name: %v", err)
}
newMonthlyTable, err := db.SafeTableName("test_monthly_go_rollup_summary")
if err != nil {
t.Fatalf("failed to build new monthly table name: %v", err)
}
if err := db.EnsureSummaryTable(ctx, dbConn, oldMonthlyTable); err != nil {
t.Fatalf("failed to ensure old monthly table: %v", err)
}
if err := db.EnsureSummaryTable(ctx, dbConn, newMonthlyTable); err != nil {
t.Fatalf("failed to ensure new monthly table: %v", err)
}
unionQuery, err := buildUnionQuery([]string{day1Table, day2Table}, monthlyUnionColumns, templateExclusionFilter())
if err != nil {
t.Fatalf("failed to build monthly union query: %v", err)
}
insertSQL, err := db.BuildMonthlySummaryInsert(oldMonthlyTable, unionQuery)
if err != nil {
t.Fatalf("failed to build monthly sql insert: %v", err)
}
if _, err := dbConn.ExecContext(ctx, insertSQL); err != nil {
t.Fatalf("failed to execute monthly sql insert: %v", err)
}
aggMap, err := task.scanDailyRollup(ctx, monthStart, monthEnd)
if err != nil {
t.Fatalf("scanDailyRollup failed: %v", err)
}
if err := task.insertMonthlyAggregates(ctx, newMonthlyTable, aggMap); err != nil {
t.Fatalf("insertMonthlyAggregates failed: %v", err)
}
oldRows, err := loadMonthlySummaryRows(ctx, dbConn, oldMonthlyTable)
if err != nil {
t.Fatalf("failed to load old monthly rows: %v", err)
}
newRows, err := loadMonthlySummaryRows(ctx, dbConn, newMonthlyTable)
if err != nil {
t.Fatalf("failed to load new monthly rows: %v", err)
}
assertMonthlySummaryParity(t, oldRows, newRows)
byKey := mapRowsByKeyMonthly(newRows)
alpha := byKey["vc-a|vm-1|uuid-1|vm-alpha"]
if !approxEqual(alpha.AvgVcpuCount, 2.5, 1e-9) {
t.Fatalf("unexpected alpha AvgVcpuCount: got %.6f want %.6f", alpha.AvgVcpuCount, 2.5)
}
if !approxEqual(alpha.AvgIsPresent, 1.0, 1e-9) {
t.Fatalf("unexpected alpha AvgIsPresent: got %.6f want %.6f", alpha.AvgIsPresent, 1.0)
}
if alpha.ResourcePool != "Tin" {
t.Fatalf("unexpected alpha ResourcePool: got %q want %q", alpha.ResourcePool, "Tin")
}
if !approxEqual(alpha.PoolTinPct, 50.0, 1e-9) || !approxEqual(alpha.PoolBronzePct, 50.0, 1e-9) {
t.Fatalf("unexpected alpha monthly pool mix: tin=%.6f bronze=%.6f", alpha.PoolTinPct, alpha.PoolBronzePct)
}
}
func newTasksTestCronTask(dbConn *sqlx.DB) *CronTask {
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
return &CronTask{
Logger: logger,
Database: &tasksTestDatabase{dbConn: dbConn, logger: logger},
}
}
func insertHourlyCacheSeedRow(ctx context.Context, dbConn *sqlx.DB, row hourlySeedRow) error {
_, err := dbConn.ExecContext(ctx, `
INSERT INTO vm_hourly_stats (
"SnapshotTime","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`,
row.SnapshotTime, row.Vcenter, row.VmID, row.VmUUID, row.Name, row.CreationTime, row.DeletionTime, row.ResourcePool,
row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder,
)
return err
}
func insertHourlySnapshotSeedRow(ctx context.Context, dbConn *sqlx.DB, table string, row hourlySeedRow) error {
sql := fmt.Sprintf(`
INSERT INTO %s (
"Name","Vcenter","VmId","VmUuid","EventKey","CloudId","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder","SnapshotTime"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`, table)
_, err := dbConn.ExecContext(ctx, sql,
row.Name, row.Vcenter, row.VmID, row.VmUUID, nil, nil, row.CreationTime, row.DeletionTime, row.ResourcePool,
row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder, row.SnapshotTime,
)
return err
}
func insertDailySummarySeedRow(ctx context.Context, dbConn *sqlx.DB, table string, row dailySeedRow) error {
sql := fmt.Sprintf(`
INSERT INTO %s (
"Name","Vcenter","VmId","VmUuid","EventKey","CloudId","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder",
"SnapshotTime","SamplesPresent","AvgVcpuCount","AvgRamGB","AvgProvisionedDisk","AvgIsPresent",
"PoolTinPct","PoolBronzePct","PoolSilverPct","PoolGoldPct","Tin","Bronze","Silver","Gold"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`, table)
_, err := dbConn.ExecContext(ctx, sql,
row.Name, row.Vcenter, row.VmID, row.VmUUID, nil, nil, row.CreationTime, row.DeletionTime, row.ResourcePool,
row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder,
row.SnapshotTime, row.SamplesPresent, row.AvgVcpuCount, row.AvgRamGB, row.AvgProvisionedDisk, row.AvgIsPresent,
row.PoolTinPct, row.PoolBronzePct, row.PoolSilverPct, row.PoolGoldPct, row.Tin, row.Bronze, row.Silver, row.Gold,
)
return err
}
func insertDailyRollupSeedRow(ctx context.Context, dbConn *sqlx.DB, row dailySeedRow) error {
_, err := dbConn.ExecContext(ctx, `
INSERT INTO vm_daily_rollup (
"Date","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","SamplesPresent","TotalSamples",
"SumVcpu","SumRam","SumDisk","TinHits","BronzeHits","SilverHits","GoldHits",
"LastResourcePool","LastDatacenter","LastCluster","LastFolder","LastProvisionedDisk","LastVcpuCount","LastRamGB","IsTemplate","PoweredOn","SrmPlaceholder"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`,
row.SnapshotTime, row.Vcenter, row.VmID, row.VmUUID, row.Name, row.CreationTime, row.DeletionTime, row.SamplesPresent, row.TotalSamples,
row.SumVcpu, row.SumRam, row.SumDisk, row.TinHits, row.BronzeHits, row.SilverHits, row.GoldHits,
row.ResourcePool, row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder,
)
return err
}
func loadDailySummaryRows(ctx context.Context, dbConn *sqlx.DB, table string) ([]dailySummaryRow, error) {
sql := fmt.Sprintf(`
SELECT
COALESCE("Name",'') AS "Name",
COALESCE("Vcenter",'') AS "Vcenter",
COALESCE("VmId",'') AS "VmId",
COALESCE("VmUuid",'') AS "VmUuid",
COALESCE("ResourcePool",'') AS "ResourcePool",
COALESCE("CreationTime",0) AS "CreationTime",
COALESCE("DeletionTime",0) AS "DeletionTime",
COALESCE("SnapshotTime",0) AS "SnapshotTime",
COALESCE("SamplesPresent",0) AS "SamplesPresent",
COALESCE("AvgVcpuCount",0) AS "AvgVcpuCount",
COALESCE("AvgRamGB",0) AS "AvgRamGB",
COALESCE("AvgProvisionedDisk",0) AS "AvgProvisionedDisk",
COALESCE("AvgIsPresent",0) AS "AvgIsPresent",
COALESCE("PoolTinPct",0) AS "PoolTinPct",
COALESCE("PoolBronzePct",0) AS "PoolBronzePct",
COALESCE("PoolSilverPct",0) AS "PoolSilverPct",
COALESCE("PoolGoldPct",0) AS "PoolGoldPct"
FROM %s
ORDER BY "Vcenter", "VmId", "VmUuid", "Name"
`, table)
var out []dailySummaryRow
return out, dbConn.SelectContext(ctx, &out, sql)
}
func loadMonthlySummaryRows(ctx context.Context, dbConn *sqlx.DB, table string) ([]monthlySummaryRow, error) {
sql := fmt.Sprintf(`
SELECT
COALESCE("Name",'') AS "Name",
COALESCE("Vcenter",'') AS "Vcenter",
COALESCE("VmId",'') AS "VmId",
COALESCE("VmUuid",'') AS "VmUuid",
COALESCE("ResourcePool",'') AS "ResourcePool",
COALESCE("CreationTime",0) AS "CreationTime",
COALESCE("DeletionTime",0) AS "DeletionTime",
COALESCE("SamplesPresent",0) AS "SamplesPresent",
COALESCE("AvgVcpuCount",0) AS "AvgVcpuCount",
COALESCE("AvgRamGB",0) AS "AvgRamGB",
COALESCE("AvgProvisionedDisk",0) AS "AvgProvisionedDisk",
COALESCE("AvgIsPresent",0) AS "AvgIsPresent",
COALESCE("PoolTinPct",0) AS "PoolTinPct",
COALESCE("PoolBronzePct",0) AS "PoolBronzePct",
COALESCE("PoolSilverPct",0) AS "PoolSilverPct",
COALESCE("PoolGoldPct",0) AS "PoolGoldPct"
FROM %s
ORDER BY "Vcenter", "VmId", "VmUuid", "Name"
`, table)
var out []monthlySummaryRow
return out, dbConn.SelectContext(ctx, &out, sql)
}
func mapRowsByKeyDaily(rows []dailySummaryRow) map[string]dailySummaryRow {
out := make(map[string]dailySummaryRow, len(rows))
for _, row := range rows {
out[dailyRowKey(row)] = row
}
return out
}
func mapRowsByKeyMonthly(rows []monthlySummaryRow) map[string]monthlySummaryRow {
out := make(map[string]monthlySummaryRow, len(rows))
for _, row := range rows {
out[monthlyRowKey(row)] = row
}
return out
}
func dailyRowKey(r dailySummaryRow) string {
return fmt.Sprintf("%s|%s|%s|%s", r.Vcenter, r.VmId, r.VmUuid, r.Name)
}
func monthlyRowKey(r monthlySummaryRow) string {
return fmt.Sprintf("%s|%s|%s|%s", r.Vcenter, r.VmId, r.VmUuid, r.Name)
}
func assertDailySummaryParity(t *testing.T, oldRows, newRows []dailySummaryRow) {
t.Helper()
if len(oldRows) != len(newRows) {
t.Fatalf("daily row count mismatch: old=%d new=%d", len(oldRows), len(newRows))
}
oldByKey := mapRowsByKeyDaily(oldRows)
newByKey := mapRowsByKeyDaily(newRows)
for key, oldRow := range oldByKey {
newRow, ok := newByKey[key]
if !ok {
t.Fatalf("missing key in new daily output: %s", key)
}
if oldRow.ResourcePool != newRow.ResourcePool ||
oldRow.CreationTime != newRow.CreationTime ||
oldRow.DeletionTime != newRow.DeletionTime ||
oldRow.SnapshotTime != newRow.SnapshotTime ||
oldRow.SamplesPresent != newRow.SamplesPresent {
t.Fatalf("daily scalar mismatch key=%s old=%+v new=%+v", key, oldRow, newRow)
}
assertFloatClose(t, "AvgVcpuCount", key, oldRow.AvgVcpuCount, newRow.AvgVcpuCount, 1e-9)
assertFloatClose(t, "AvgRamGB", key, oldRow.AvgRamGB, newRow.AvgRamGB, 1e-9)
assertFloatClose(t, "AvgProvisionedDisk", key, oldRow.AvgProvisionedDisk, newRow.AvgProvisionedDisk, 1e-9)
assertFloatClose(t, "AvgIsPresent", key, oldRow.AvgIsPresent, newRow.AvgIsPresent, 1e-9)
assertFloatClose(t, "PoolTinPct", key, oldRow.PoolTinPct, newRow.PoolTinPct, 1e-9)
assertFloatClose(t, "PoolBronzePct", key, oldRow.PoolBronzePct, newRow.PoolBronzePct, 1e-9)
assertFloatClose(t, "PoolSilverPct", key, oldRow.PoolSilverPct, newRow.PoolSilverPct, 1e-9)
assertFloatClose(t, "PoolGoldPct", key, oldRow.PoolGoldPct, newRow.PoolGoldPct, 1e-9)
}
}
func assertMonthlySummaryParity(t *testing.T, oldRows, newRows []monthlySummaryRow) {
t.Helper()
if len(oldRows) != len(newRows) {
t.Fatalf("monthly row count mismatch: old=%d new=%d", len(oldRows), len(newRows))
}
oldByKey := mapRowsByKeyMonthly(oldRows)
newByKey := mapRowsByKeyMonthly(newRows)
for key, oldRow := range oldByKey {
newRow, ok := newByKey[key]
if !ok {
t.Fatalf("missing key in new monthly output: %s", key)
}
if oldRow.ResourcePool != newRow.ResourcePool ||
oldRow.CreationTime != newRow.CreationTime ||
oldRow.DeletionTime != newRow.DeletionTime ||
oldRow.SamplesPresent != newRow.SamplesPresent {
t.Fatalf("monthly scalar mismatch key=%s old=%+v new=%+v", key, oldRow, newRow)
}
assertFloatClose(t, "AvgVcpuCount", key, oldRow.AvgVcpuCount, newRow.AvgVcpuCount, 1e-9)
assertFloatClose(t, "AvgRamGB", key, oldRow.AvgRamGB, newRow.AvgRamGB, 1e-9)
assertFloatClose(t, "AvgProvisionedDisk", key, oldRow.AvgProvisionedDisk, newRow.AvgProvisionedDisk, 1e-9)
assertFloatClose(t, "AvgIsPresent", key, oldRow.AvgIsPresent, newRow.AvgIsPresent, 1e-9)
assertFloatClose(t, "PoolTinPct", key, oldRow.PoolTinPct, newRow.PoolTinPct, 1e-9)
assertFloatClose(t, "PoolBronzePct", key, oldRow.PoolBronzePct, newRow.PoolBronzePct, 1e-9)
assertFloatClose(t, "PoolSilverPct", key, oldRow.PoolSilverPct, newRow.PoolSilverPct, 1e-9)
assertFloatClose(t, "PoolGoldPct", key, oldRow.PoolGoldPct, newRow.PoolGoldPct, 1e-9)
}
}
func assertFloatClose(t *testing.T, field, key string, oldVal, newVal, eps float64) {
t.Helper()
if !approxEqual(oldVal, newVal, eps) {
t.Fatalf("%s mismatch key=%s old=%.12f new=%.12f", field, key, oldVal, newVal)
}
}
func approxEqual(a, b, eps float64) bool {
return math.Abs(a-b) <= eps
}
+39
View File
@@ -0,0 +1,39 @@
package tasks
import (
"strings"
"testing"
"time"
)
func TestBuildCanonicalHourlySummaryUnionCastsInventoryIDToBigInt(t *testing.T) {
start := time.Unix(1700000000, 0).UTC()
end := start.Add(24 * time.Hour)
query := buildCanonicalHourlySummaryUnion(start, end)
if !strings.Contains(query, `CAST(NULL AS BIGINT) AS "InventoryId"`) {
t.Fatalf("expected InventoryId cast to BIGINT in canonical hourly union query")
}
if !strings.Contains(query, `CAST(NULL AS TEXT) AS "EventKey"`) {
t.Fatalf("expected EventKey cast to TEXT in canonical hourly union query")
}
if !strings.Contains(query, `CAST(NULL AS TEXT) AS "CloudId"`) {
t.Fatalf("expected CloudId cast to TEXT in canonical hourly union query")
}
}
func TestBuildCanonicalDailyRollupSummaryUnionCastsInventoryIDToBigInt(t *testing.T) {
start := time.Unix(1700000000, 0).UTC()
end := start.AddDate(0, 1, 0)
query := buildCanonicalDailyRollupSummaryUnion(start, end)
if !strings.Contains(query, `CAST(NULL AS BIGINT) AS "InventoryId"`) {
t.Fatalf("expected InventoryId cast to BIGINT in canonical daily rollup union query")
}
if !strings.Contains(query, `CAST(NULL AS TEXT) AS "EventKey"`) {
t.Fatalf("expected EventKey cast to TEXT in canonical daily rollup union query")
}
if !strings.Contains(query, `CAST(NULL AS TEXT) AS "CloudId"`) {
t.Fatalf("expected CloudId cast to TEXT in canonical daily rollup union query")
}
}
+35 -7
View File
@@ -2,18 +2,13 @@ package tasks
import (
"context"
"strings"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
)
// CronTracker manages re-entry protection and status recording for cron jobs.
type CronTracker struct {
db db.Database
bindType int
}
func NewCronTracker(database db.Database) *CronTracker {
return &CronTracker{
db: database,
@@ -30,6 +25,39 @@ func (c *CronTracker) ClearAllInProgress(ctx context.Context) error {
return err
}
// ClearStale resets in_progress for a specific job if it has been running longer than maxAge.
func (c *CronTracker) ClearStale(ctx context.Context, job string, maxAge time.Duration) error {
if err := c.ensureTable(ctx); err != nil {
return err
}
driver := strings.ToLower(c.db.DB().DriverName())
var query string
switch driver {
case "sqlite":
query = `
UPDATE cron_status
SET in_progress = FALSE
WHERE job_name = ?
AND in_progress = TRUE
AND started_at > 0
AND (strftime('%s','now') - started_at) > ?
`
case "pgx", "postgres":
query = `
UPDATE cron_status
SET in_progress = FALSE
WHERE job_name = $1
AND in_progress = TRUE
AND started_at > 0
AND (EXTRACT(EPOCH FROM now())::BIGINT - started_at) > $2
`
default:
return nil
}
_, err := c.db.DB().ExecContext(ctx, query, job, int64(maxAge.Seconds()))
return err
}
func (c *CronTracker) ensureTable(ctx context.Context) error {
conn := c.db.DB()
driver := conn.DriverName()
@@ -152,7 +180,7 @@ WHERE job_name = ?
return err
}
func nullableString(s string) interface{} {
func nullableString(s string) any {
if s == "" {
return nil
}
File diff suppressed because it is too large Load Diff
+381
View File
@@ -0,0 +1,381 @@
package tasks
import (
"context"
"fmt"
"strconv"
"strings"
"vctp/db"
"github.com/jmoiron/sqlx"
)
func insertHourlyCache(ctx context.Context, dbConn *sqlx.DB, rows []InventorySnapshotRow) error {
if len(rows) == 0 {
return nil
}
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
return err
}
driver := strings.ToLower(dbConn.DriverName())
if isPostgresDriver(driver) {
if len(rows) > 0 {
if err := db.EnsureVmHourlyStatsPartitionForSnapshot(ctx, dbConn, rows[0].SnapshotTime); err != nil {
return err
}
}
return insertHourlyCachePostgresMultiRow(ctx, dbConn, rows)
}
conflict := ""
verb := "INSERT INTO"
if driver == "sqlite" {
verb = "INSERT OR REPLACE INTO"
} else {
conflict = ` ON CONFLICT ("Vcenter","VmId","SnapshotTime") DO UPDATE SET
"VmUuid"=EXCLUDED."VmUuid",
"Name"=EXCLUDED."Name",
"CreationTime"=EXCLUDED."CreationTime",
"DeletionTime"=EXCLUDED."DeletionTime",
"ResourcePool"=EXCLUDED."ResourcePool",
"Datacenter"=EXCLUDED."Datacenter",
"Cluster"=EXCLUDED."Cluster",
"Folder"=EXCLUDED."Folder",
"ProvisionedDisk"=EXCLUDED."ProvisionedDisk",
"VcpuCount"=EXCLUDED."VcpuCount",
"RamGB"=EXCLUDED."RamGB",
"IsTemplate"=EXCLUDED."IsTemplate",
"PoweredOn"=EXCLUDED."PoweredOn",
"SrmPlaceholder"=EXCLUDED."SrmPlaceholder"`
}
cols := []string{
"SnapshotTime", "Vcenter", "VmId", "VmUuid", "Name", "CreationTime", "DeletionTime", "ResourcePool",
"Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount", "RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder",
}
bind := sqlx.BindType(dbConn.DriverName())
placeholders := strings.TrimRight(strings.Repeat("?, ", len(cols)), ", ")
stmtText := fmt.Sprintf(`%s vm_hourly_stats ("%s") VALUES (%s)%s`, verb, strings.Join(cols, `","`), placeholders, conflict)
stmtText = sqlx.Rebind(bind, stmtText)
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
stmt, err := tx.PreparexContext(ctx, stmtText)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
for _, r := range rows {
args := []any{
r.SnapshotTime, r.Vcenter, r.VmId, r.VmUuid, r.Name, r.CreationTime, r.DeletionTime, r.ResourcePool,
r.Datacenter, r.Cluster, r.Folder, r.ProvisionedDisk, r.VcpuCount, r.RamGB, r.IsTemplate, r.PoweredOn, r.SrmPlaceholder,
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
func insertHourlyCachePostgresMultiRow(ctx context.Context, dbConn *sqlx.DB, rows []InventorySnapshotRow) error {
cols := []string{
"SnapshotTime", "Vcenter", "VmId", "VmUuid", "Name", "CreationTime", "DeletionTime", "ResourcePool",
"Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount", "RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder",
}
conflict := ` ON CONFLICT ("Vcenter","VmId","SnapshotTime") DO UPDATE SET
"VmUuid"=EXCLUDED."VmUuid",
"Name"=EXCLUDED."Name",
"CreationTime"=EXCLUDED."CreationTime",
"DeletionTime"=EXCLUDED."DeletionTime",
"ResourcePool"=EXCLUDED."ResourcePool",
"Datacenter"=EXCLUDED."Datacenter",
"Cluster"=EXCLUDED."Cluster",
"Folder"=EXCLUDED."Folder",
"ProvisionedDisk"=EXCLUDED."ProvisionedDisk",
"VcpuCount"=EXCLUDED."VcpuCount",
"RamGB"=EXCLUDED."RamGB",
"IsTemplate"=EXCLUDED."IsTemplate",
"PoweredOn"=EXCLUDED."PoweredOn",
"SrmPlaceholder"=EXCLUDED."SrmPlaceholder"`
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
maxRows := postgresMaxRowsPerStatement(len(cols))
for start := 0; start < len(rows); start += maxRows {
end := min(start+maxRows, len(rows))
chunk := rows[start:end]
args := make([]any, 0, len(chunk)*len(cols))
for _, row := range chunk {
args = append(args,
row.SnapshotTime, row.Vcenter, row.VmId, row.VmUuid, row.Name, row.CreationTime, row.DeletionTime, row.ResourcePool,
row.Datacenter, row.Cluster, row.Folder, row.ProvisionedDisk, row.VcpuCount, row.RamGB, row.IsTemplate, row.PoweredOn, row.SrmPlaceholder,
)
}
stmt := buildPostgresMultiRowInsertSQL("vm_hourly_stats", cols, len(chunk), conflict)
if _, err := tx.ExecContext(ctx, stmt, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
func insertHourlyBatch(ctx context.Context, dbConn *sqlx.DB, tableName string, rows []InventorySnapshotRow) error {
if len(rows) == 0 {
return nil
}
if _, err := db.SafeTableName(tableName); err != nil {
return err
}
driver := strings.ToLower(dbConn.DriverName())
if isPostgresDriver(driver) {
return insertHourlyBatchPostgresMultiRow(ctx, dbConn, tableName, rows)
}
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
baseCols := []string{
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SnapshotTime",
}
bind := sqlx.BindType(dbConn.DriverName())
buildStmt := func(cols []string) (*sqlx.Stmt, error) {
colList := `"` + strings.Join(cols, `", "`) + `"`
placeholders := strings.TrimRight(strings.Repeat("?, ", len(cols)), ", ")
return tx.PreparexContext(ctx, sqlx.Rebind(bind, fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, tableName, colList, placeholders)))
}
stmt, err := buildStmt(baseCols)
if err != nil {
// Fallback for legacy tables that still have IsPresent.
withLegacy := append(append([]string{}, baseCols...), "IsPresent")
stmt, err = buildStmt(withLegacy)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
for _, row := range rows {
args := []any{
row.InventoryId,
row.Name,
row.Vcenter,
row.VmId,
row.EventKey,
row.CloudId,
row.CreationTime,
row.DeletionTime,
row.ResourcePool,
row.Datacenter,
row.Cluster,
row.Folder,
row.ProvisionedDisk,
row.VcpuCount,
row.RamGB,
row.IsTemplate,
row.PoweredOn,
row.SrmPlaceholder,
row.VmUuid,
row.SnapshotTime,
"TRUE",
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
defer stmt.Close()
for _, row := range rows {
args := []any{
row.InventoryId,
row.Name,
row.Vcenter,
row.VmId,
row.EventKey,
row.CloudId,
row.CreationTime,
row.DeletionTime,
row.ResourcePool,
row.Datacenter,
row.Cluster,
row.Folder,
row.ProvisionedDisk,
row.VcpuCount,
row.RamGB,
row.IsTemplate,
row.PoweredOn,
row.SrmPlaceholder,
row.VmUuid,
row.SnapshotTime,
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
func insertHourlyBatchPostgresMultiRow(ctx context.Context, dbConn *sqlx.DB, tableName string, rows []InventorySnapshotRow) error {
baseCols := []string{
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SnapshotTime",
}
err := execHourlySnapshotInsertPostgres(ctx, dbConn, tableName, baseCols, rows, false)
if err == nil {
return nil
}
if !isLegacyIsPresentError(err) {
return err
}
withLegacy := append(append([]string{}, baseCols...), "IsPresent")
if legacyErr := execHourlySnapshotInsertPostgres(ctx, dbConn, tableName, withLegacy, rows, true); legacyErr != nil {
return legacyErr
}
return nil
}
func execHourlySnapshotInsertPostgres(ctx context.Context, dbConn *sqlx.DB, tableName string, cols []string, rows []InventorySnapshotRow, includeLegacyIsPresent bool) error {
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
maxRows := postgresMaxRowsPerStatement(len(cols))
for start := 0; start < len(rows); start += maxRows {
end := min(start+maxRows, len(rows))
chunk := rows[start:end]
args := make([]any, 0, len(chunk)*len(cols))
for _, row := range chunk {
args = append(args,
row.InventoryId,
row.Name,
row.Vcenter,
row.VmId,
row.EventKey,
row.CloudId,
row.CreationTime,
row.DeletionTime,
row.ResourcePool,
row.Datacenter,
row.Cluster,
row.Folder,
row.ProvisionedDisk,
row.VcpuCount,
row.RamGB,
row.IsTemplate,
row.PoweredOn,
row.SrmPlaceholder,
row.VmUuid,
row.SnapshotTime,
)
if includeLegacyIsPresent {
args = append(args, "TRUE")
}
}
stmt := buildPostgresMultiRowInsertSQL(tableName, cols, len(chunk), "")
if _, err := tx.ExecContext(ctx, stmt, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
func isPostgresDriver(driver string) bool {
switch strings.ToLower(strings.TrimSpace(driver)) {
case "pgx", "postgres":
return true
default:
return false
}
}
func postgresMaxRowsPerStatement(colCount int) int {
if colCount <= 0 {
return 1
}
const maxBindParams = 65535
rows := maxBindParams / colCount
if rows <= 0 {
return 1
}
return rows
}
func buildPostgresMultiRowInsertSQL(tableName string, cols []string, rowCount int, suffix string) string {
if rowCount <= 0 {
return ""
}
var b strings.Builder
b.WriteString(`INSERT INTO `)
b.WriteString(tableName)
b.WriteString(` ("`)
b.WriteString(strings.Join(cols, `","`))
b.WriteString(`") VALUES `)
param := 1
for row := 0; row < rowCount; row++ {
if row > 0 {
b.WriteString(`,`)
}
b.WriteString(`(`)
for col := 0; col < len(cols); col++ {
if col > 0 {
b.WriteString(`,`)
}
b.WriteString(`$`)
b.WriteString(strconv.Itoa(param))
param++
}
b.WriteString(`)`)
}
if suffix != "" {
b.WriteString(suffix)
}
return b.String()
}
func isLegacyIsPresentError(err error) bool {
if err == nil {
return false
}
return strings.Contains(strings.ToLower(err.Error()), "ispresent")
}
func dropSnapshotTable(ctx context.Context, dbConn *sqlx.DB, table string) error {
if _, err := db.SafeTableName(table); err != nil {
return err
}
_, err := dbConn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", table))
return err
}
func clearTable(ctx context.Context, dbConn *sqlx.DB, table string) error {
if _, err := db.SafeTableName(table); err != nil {
return err
}
_, err := dbConn.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s", table))
if err != nil {
return fmt.Errorf("failed to clear table %s: %w", table, err)
}
return nil
}
+53
View File
@@ -0,0 +1,53 @@
package tasks
import "testing"
func TestPostgresMaxRowsPerStatement(t *testing.T) {
tests := []struct {
name string
cols int
expect int
}{
{name: "zero columns", cols: 0, expect: 1},
{name: "hourly cache columns", cols: 17, expect: 3855},
{name: "hourly snapshot columns", cols: 20, expect: 3276},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := postgresMaxRowsPerStatement(tc.cols)
if got != tc.expect {
t.Fatalf("unexpected max rows: cols=%d got=%d want=%d", tc.cols, got, tc.expect)
}
})
}
}
func TestBuildPostgresMultiRowInsertSQL(t *testing.T) {
got := buildPostgresMultiRowInsertSQL("vm_hourly_stats", []string{"A", "B"}, 2, "")
want := `INSERT INTO vm_hourly_stats ("A","B") VALUES ($1,$2),($3,$4)`
if got != want {
t.Fatalf("unexpected SQL\nwant: %s\ngot: %s", want, got)
}
withSuffix := buildPostgresMultiRowInsertSQL("vm_hourly_stats", []string{"A"}, 1, ` ON CONFLICT ("A") DO NOTHING`)
wantSuffix := `INSERT INTO vm_hourly_stats ("A") VALUES ($1) ON CONFLICT ("A") DO NOTHING`
if withSuffix != wantSuffix {
t.Fatalf("unexpected SQL with suffix\nwant: %s\ngot: %s", wantSuffix, withSuffix)
}
}
func TestIsLegacyIsPresentError(t *testing.T) {
if !isLegacyIsPresentError(assertErr(`null value in column "IsPresent" violates not-null constraint`)) {
t.Fatal("expected legacy IsPresent error to be detected")
}
if isLegacyIsPresentError(assertErr("duplicate key value violates unique constraint")) {
t.Fatal("expected non-IsPresent errors to be ignored")
}
}
type testErr string
func (e testErr) Error() string { return string(e) }
func assertErr(msg string) error { return testErr(msg) }
+553
View File
@@ -0,0 +1,553 @@
package tasks
import (
"context"
"database/sql"
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"time"
"vctp/db"
"vctp/db/queries"
"github.com/jmoiron/sqlx"
)
var snapshotProbeLimiter = make(chan struct{}, 1)
func acquireSnapshotProbe(ctx context.Context) (func(), error) {
select {
case snapshotProbeLimiter <- struct{}{}:
return func() { <-snapshotProbeLimiter }, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func boolStringFromInterface(value any) string {
switch v := value.(type) {
case nil:
return ""
case string:
return v
case []byte:
return string(v)
case bool:
if v {
return "TRUE"
}
return "FALSE"
case int:
if v != 0 {
return "TRUE"
}
return "FALSE"
case int64:
if v != 0 {
return "TRUE"
}
return "FALSE"
default:
return fmt.Sprint(v)
}
}
// latestHourlySnapshotBefore finds the most recent hourly snapshot table prior to the given time, skipping empty tables.
func latestHourlySnapshotBefore(ctx context.Context, dbConn *sqlx.DB, cutoff time.Time, logger *slog.Logger) (string, error) {
tables, err := listLatestHourlyWithRows(ctx, dbConn, "", cutoff.Unix(), 1, logger)
if err != nil {
return "", err
}
if len(tables) == 0 {
return "", nil
}
return tables[0].Table, nil
}
// parseSnapshotTime extracts the unix suffix from an inventory_hourly table name.
func parseSnapshotTime(table string) (int64, bool) {
const prefix = "inventory_hourly_"
if !strings.HasPrefix(table, prefix) {
return 0, false
}
ts, err := strconv.ParseInt(strings.TrimPrefix(table, prefix), 10, 64)
if err != nil {
return 0, false
}
return ts, true
}
// listLatestHourlyWithRows returns recent hourly snapshot tables (ordered desc by time) that have rows, optionally filtered by vcenter.
func listLatestHourlyWithRows(ctx context.Context, dbConn *sqlx.DB, vcenter string, beforeUnix int64, limit int, logger *slog.Logger) ([]snapshotTable, error) {
if limit <= 0 {
limit = 50
}
query := dbConn.Rebind(`
SELECT table_name, snapshot_time, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = 'hourly' AND snapshot_time < ?
ORDER BY snapshot_time DESC
LIMIT ?
`)
rows, err := dbConn.QueryxContext(ctx, query, beforeUnix, limit)
if err != nil {
return nil, err
}
defer rows.Close()
var out []snapshotTable
for rows.Next() {
var name string
var ts int64
var count sql.NullInt64
if scanErr := rows.Scan(&name, &ts, &count); scanErr != nil {
continue
}
if err := db.ValidateTableName(name); err != nil {
continue
}
if count.Valid && count.Int64 == 0 {
if logger != nil {
logger.Debug("skipping snapshot table with zero count", "table", name, "snapshot_time", ts, "vcenter", vcenter)
}
continue
}
probed := false
var probeErr error
probeTimeout := false
// If count is known and >0, trust it; if NULL, accept optimistically to avoid heavy probes.
hasRows := !count.Valid || count.Int64 > 0
start := time.Now()
if vcenter != "" && hasRows {
probed = true
probeCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
release, err := acquireSnapshotProbe(probeCtx)
if err != nil {
probeErr = err
hasRows = false
cancel()
} else {
vrows, qerr := querySnapshotRows(probeCtx, dbConn, name, []string{"VmId"}, `"Vcenter" = ? LIMIT 1`, vcenter)
if qerr == nil {
hasRows = vrows.Next()
vrows.Close()
} else {
probeErr = qerr
hasRows = false
}
release()
cancel()
}
probeTimeout = errors.Is(probeErr, context.DeadlineExceeded) || errors.Is(probeErr, context.Canceled)
}
elapsed := time.Since(start)
if logger != nil {
logger.Debug("evaluated snapshot table", "table", name, "snapshot_time", ts, "snapshot_count", count, "probed", probed, "has_rows", hasRows, "elapsed", elapsed, "vcenter", vcenter, "probe_error", probeErr, "probe_timeout", probeTimeout)
}
if !hasRows {
continue
}
out = append(out, snapshotTable{Table: name, Time: ts, Count: count})
}
return out, nil
}
// SnapshotTooSoon reports whether the gap between prev and curr is significantly shorter than expected.
func SnapshotTooSoon(prevUnix, currUnix int64, expectedSeconds int64) bool {
if prevUnix == 0 || currUnix == 0 || expectedSeconds <= 0 {
return false
}
return currUnix-prevUnix < expectedSeconds
}
// querySnapshotRows builds a SELECT with proper rebind for the given table/columns/where.
func querySnapshotRows(ctx context.Context, dbConn *sqlx.DB, table string, columns []string, where string, args ...any) (*sqlx.Rows, error) {
if err := db.ValidateTableName(table); err != nil {
return nil, err
}
colExpr := "*"
if len(columns) > 0 {
colExpr = `"` + strings.Join(columns, `","`) + `"`
}
query := fmt.Sprintf(`SELECT %s FROM %s`, colExpr, table)
if strings.TrimSpace(where) != "" {
query = fmt.Sprintf(`%s WHERE %s`, query, where)
}
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
return dbConn.QueryxContext(ctx, query, args...)
}
func updateDeletionTimeInSnapshot(ctx context.Context, dbConn *sqlx.DB, table, vcenter, vmID, vmUUID, name string, deletionUnix int64) (int64, error) {
if err := db.ValidateTableName(table); err != nil {
return 0, err
}
matchColumn := ""
matchValue := ""
switch {
case vmID != "":
matchColumn = "VmId"
matchValue = vmID
case vmUUID != "":
matchColumn = "VmUuid"
matchValue = vmUUID
case name != "":
matchColumn = "Name"
matchValue = name
default:
return 0, nil
}
query := fmt.Sprintf(`UPDATE %s SET "DeletionTime" = ? WHERE "Vcenter" = ? AND "%s" = ? AND ("DeletionTime" IS NULL OR "DeletionTime" = 0 OR "DeletionTime" > ?)`, table, matchColumn)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
result, err := dbConn.ExecContext(ctx, query, deletionUnix, vcenter, matchValue, deletionUnix)
if err != nil {
return 0, err
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, err
}
return rowsAffected, nil
}
func updateDeletionTimeInHourlyCache(ctx context.Context, dbConn *sqlx.DB, vcenter, vmID, vmUUID, name string, snapshotUnix, deletionUnix int64) (int64, error) {
if snapshotUnix <= 0 {
return 0, nil
}
matchColumn := ""
matchValue := ""
switch {
case vmID != "":
matchColumn = "VmId"
matchValue = vmID
case vmUUID != "":
matchColumn = "VmUuid"
matchValue = vmUUID
case name != "":
matchColumn = "Name"
matchValue = name
default:
return 0, nil
}
query := fmt.Sprintf(`UPDATE vm_hourly_stats SET "DeletionTime" = ? WHERE "Vcenter" = ? AND "SnapshotTime" = ? AND "%s" = ? AND ("DeletionTime" IS NULL OR "DeletionTime" = 0 OR "DeletionTime" > ?)`, matchColumn)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
result, err := dbConn.ExecContext(ctx, query, deletionUnix, vcenter, snapshotUnix, matchValue, deletionUnix)
if err != nil {
return 0, err
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, err
}
return rowsAffected, nil
}
// markMissingFromPrevious marks VMs that were present in the previous snapshot but missing now.
// When updateCompatSnapshot is true, legacy hourly snapshot tables are updated as well.
func (c *CronTask) markMissingFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, snapshotTime time.Time,
currentByID map[string]InventorySnapshotRow, currentByUuid map[string]struct{}, currentByName map[string]struct{},
invByID map[string]queries.Inventory, invByUuid map[string]queries.Inventory, invByName map[string]queries.Inventory, updateCompatSnapshot bool) (int, bool) {
if err := db.ValidateTableName(prevTable); err != nil {
return 0, false
}
prevSnapUnix, _ := parseSnapshotTime(prevTable)
type prevRow struct {
VmId sql.NullString `db:"VmId"`
VmUuid sql.NullString `db:"VmUuid"`
Name string `db:"Name"`
Cluster sql.NullString `db:"Cluster"`
Datacenter sql.NullString `db:"Datacenter"`
DeletionTime sql.NullInt64 `db:"DeletionTime"`
}
rows, err := querySnapshotRows(ctx, dbConn, prevTable, []string{"VmId", "VmUuid", "Name", "Cluster", "Datacenter", "DeletionTime"}, `"Vcenter" = ?`, vcenter)
if err != nil {
c.Logger.Warn("failed to read previous snapshot for deletion detection", "error", err, "table", prevTable, "vcenter", vcenter)
return 0, false
}
defer rows.Close()
missing := 0
tableUpdated := false
for rows.Next() {
var r prevRow
if err := rows.StructScan(&r); err != nil {
continue
}
vmID := r.VmId.String
uuid := r.VmUuid.String
name := r.Name
cluster := r.Cluster.String
found := false
if vmID != "" {
if _, ok := currentByID[vmID]; ok {
found = true
}
}
if !found && uuid != "" {
if _, ok := currentByUuid[uuid]; ok {
found = true
}
}
if !found && name != "" {
if _, ok := currentByName[name]; ok {
found = true
}
}
// If the name is missing but UUID+Cluster still exists in inventory/current, treat it as present (rename, not delete).
if !found && uuid != "" && cluster != "" {
if inv, ok := invByUuid[uuid]; ok && strings.EqualFold(inv.Cluster.String, cluster) {
found = true
}
}
if found {
continue
}
var inv queries.Inventory
var ok bool
if vmID != "" {
inv, ok = invByID[vmID]
}
if !ok && uuid != "" {
inv, ok = invByUuid[uuid]
}
if !ok && name != "" {
inv, ok = invByName[name]
}
if !ok {
continue
}
delTime := inv.DeletionTime
if !delTime.Valid {
delTime = sql.NullInt64{Int64: snapshotTime.Unix(), Valid: true}
if err := c.Database.Queries().InventoryMarkDeleted(ctx, queries.InventoryMarkDeletedParams{
DeletionTime: delTime,
VmId: inv.VmId,
DatacenterName: inv.Datacenter,
}); err != nil {
c.Logger.Warn("failed to mark inventory record deleted from previous snapshot", "error", err, "vm_id", inv.VmId.String)
}
}
// Also update lifecycle cache so deletion time is available for rollups.
vmUUID := ""
if inv.VmUuid.Valid {
vmUUID = inv.VmUuid.String
}
if err := db.MarkVmDeletedWithDetails(ctx, dbConn, vcenter, inv.VmId.String, vmUUID, inv.Name, inv.Cluster.String, delTime.Int64); err != nil {
c.Logger.Warn("failed to mark lifecycle cache deleted from previous snapshot", "error", err, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
}
if prevSnapUnix > 0 {
if cacheRows, err := updateDeletionTimeInHourlyCache(ctx, dbConn, vcenter, inv.VmId.String, vmUUID, inv.Name, prevSnapUnix, delTime.Int64); err != nil {
c.Logger.Warn("failed to update hourly cache deletion time", "error", err, "snapshot_time", prevSnapUnix, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
} else if cacheRows > 0 {
c.Logger.Debug("updated hourly cache deletion time", "snapshot_time", prevSnapUnix, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter, "deletion_time", delTime.Int64)
}
}
if updateCompatSnapshot {
if rowsAffected, err := updateDeletionTimeInSnapshot(ctx, dbConn, prevTable, vcenter, inv.VmId.String, vmUUID, inv.Name, delTime.Int64); err != nil {
c.Logger.Warn("failed to update hourly snapshot deletion time", "error", err, "table", prevTable, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
} else if rowsAffected > 0 {
tableUpdated = true
c.Logger.Debug("updated hourly snapshot deletion time", "table", prevTable, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter, "deletion_time", delTime.Int64)
}
}
c.Logger.Debug("Detected VM missing compared to previous snapshot", "name", inv.Name, "vm_id", inv.VmId.String, "vm_uuid", inv.VmUuid.String, "vcenter", vcenter, "snapshot_time", snapshotTime, "prev_table", prevTable)
missing++
}
return missing, tableUpdated
}
// countNewFromPrevious returns how many VMs are present in the current snapshot but not in the previous snapshot.
func countNewFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, current map[string]InventorySnapshotRow) int {
if err := db.ValidateTableName(prevTable); err != nil {
return len(current)
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name" FROM %s WHERE "Vcenter" = ?`, prevTable)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
if err != nil {
return len(current)
}
defer rows.Close()
prevIDs := make(map[string]struct{})
prevUUIDs := make(map[string]struct{})
prevNames := make(map[string]struct{})
for rows.Next() {
var vmID, vmUUID, name string
if scanErr := rows.Scan(&vmID, &vmUUID, &name); scanErr != nil {
continue
}
if vmID != "" {
prevIDs[vmID] = struct{}{}
}
if vmUUID != "" {
prevUUIDs[vmUUID] = struct{}{}
}
if name != "" {
prevNames[name] = struct{}{}
}
}
newCount := 0
for _, cur := range current {
id := cur.VmId.String
uuid := cur.VmUuid.String
name := cur.Name
if id != "" {
if _, ok := prevIDs[id]; ok {
continue
}
}
if uuid != "" {
if _, ok := prevUUIDs[uuid]; ok {
continue
}
}
if name != "" {
if _, ok := prevNames[name]; ok {
continue
}
}
newCount++
}
return newCount
}
// listNewFromPrevious returns the rows present now but not in the previous snapshot.
func listNewFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, current map[string]InventorySnapshotRow) []InventorySnapshotRow {
if err := db.ValidateTableName(prevTable); err != nil {
all := make([]InventorySnapshotRow, 0, len(current))
for _, cur := range current {
all = append(all, cur)
}
return all
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name" FROM %s WHERE "Vcenter" = ?`, prevTable)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
if err != nil {
all := make([]InventorySnapshotRow, 0, len(current))
for _, cur := range current {
all = append(all, cur)
}
return all
}
defer rows.Close()
prevIDs := make(map[string]struct{})
prevUUIDs := make(map[string]struct{})
prevNames := make(map[string]struct{})
for rows.Next() {
var vmID, vmUUID, name string
if scanErr := rows.Scan(&vmID, &vmUUID, &name); scanErr != nil {
continue
}
if vmID != "" {
prevIDs[vmID] = struct{}{}
}
if vmUUID != "" {
prevUUIDs[vmUUID] = struct{}{}
}
if name != "" {
prevNames[name] = struct{}{}
}
}
newRows := make([]InventorySnapshotRow, 0)
for _, cur := range current {
id := cur.VmId.String
uuid := cur.VmUuid.String
name := cur.Name
if id != "" {
if _, ok := prevIDs[id]; ok {
continue
}
}
if uuid != "" {
if _, ok := prevUUIDs[uuid]; ok {
continue
}
}
if name != "" {
if _, ok := prevNames[name]; ok {
continue
}
}
newRows = append(newRows, cur)
}
return newRows
}
// findVMInHourlySnapshots searches recent hourly snapshot tables for a VM by ID for the given vCenter.
// extraTables are searched first (e.g., known previous snapshot tables).
func findVMInHourlySnapshots(ctx context.Context, dbConn *sqlx.DB, vcenter string, vmID string, extraTables ...string) (InventorySnapshotRow, string, bool) {
if vmID == "" {
return InventorySnapshotRow{}, "", false
}
// Use a short timeout to avoid hanging if the DB is busy.
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// First search any explicit tables provided.
for _, table := range extraTables {
if table == "" {
continue
}
if err := db.ValidateTableName(table); err != nil {
continue
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name","Datacenter","Cluster" FROM %s WHERE "Vcenter" = ? AND "VmId" = ? LIMIT 1`, table)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
var row InventorySnapshotRow
if err := dbConn.QueryRowxContext(ctx, query, vcenter, vmID).Scan(&row.VmId, &row.VmUuid, &row.Name, &row.Datacenter, &row.Cluster); err == nil {
return row, table, true
}
}
// Try a handful of most recent hourly tables from the registry.
rows, err := dbConn.QueryxContext(ctx, `
SELECT table_name
FROM snapshot_registry
WHERE snapshot_type = 'hourly'
ORDER BY snapshot_time DESC
LIMIT 20
`)
if err != nil {
return InventorySnapshotRow{}, "", false
}
defer rows.Close()
checked := 0
for rows.Next() {
var table string
if scanErr := rows.Scan(&table); scanErr != nil {
continue
}
if err := db.ValidateTableName(table); err != nil {
continue
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name","Datacenter","Cluster" FROM %s WHERE "Vcenter" = ? AND "VmId" = ? LIMIT 1`, table)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
var row InventorySnapshotRow
if err := dbConn.QueryRowxContext(ctx, query, vcenter, vmID).Scan(&row.VmId, &row.VmUuid, &row.Name, &row.Datacenter, &row.Cluster); err == nil {
return row, table, true
}
checked++
if checked >= 10 { // limit work
break
}
}
return InventorySnapshotRow{}, "", false
}
+295
View File
@@ -0,0 +1,295 @@
package tasks
import (
"context"
"database/sql"
"log/slog"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
)
// presenceKeys builds lookup keys for vm presence comparison.
func presenceKeys(vmID, vmUUID, name string) []string {
keys := make([]string, 0, 3)
if vmID != "" {
keys = append(keys, "id:"+vmID)
}
if vmUUID != "" {
keys = append(keys, "uuid:"+vmUUID)
}
if name != "" {
keys = append(keys, "name:"+name)
}
return keys
}
// backfillLifecycleDeletionsToday looks for VMs in the lifecycle cache that are not in the current inventory,
// have no DeletedAt, and determines their deletion time from today's hourly snapshots, optionally checking the next snapshot (next day) to confirm.
// It returns any hourly snapshot tables that were updated with deletion times.
func backfillLifecycleDeletionsToday(ctx context.Context, logger *slog.Logger, dbConn *sqlx.DB, vcenter string, snapshotTime time.Time, present map[string]InventorySnapshotRow, updateCompatSnapshot bool) ([]string, error) {
dayStart := truncateDate(snapshotTime)
dayEnd := dayStart.Add(24 * time.Hour)
candidates, err := loadLifecycleCandidates(ctx, dbConn, vcenter, present)
if err != nil || len(candidates) == 0 {
return nil, err
}
tables, err := listHourlyTablesForDay(ctx, dbConn, dayStart, dayEnd)
if err != nil {
return nil, err
}
if len(tables) == 0 {
return nil, nil
}
nextPresence := make(map[string]struct{})
if nextTable, nextErr := nextSnapshotAfter(ctx, dbConn, dayEnd, vcenter); nextErr == nil && nextTable != "" {
nextPresence = loadPresenceKeys(ctx, dbConn, nextTable, vcenter)
}
updatedTables := make(map[string]struct{})
for i := range candidates {
cand := &candidates[i]
deletion, firstMiss, lastSeenTable := findDeletionInTables(ctx, dbConn, tables, vcenter, cand)
if deletion == 0 && len(nextPresence) > 0 && firstMiss > 0 {
if !isPresent(nextPresence, *cand) {
// Single miss at end of day, confirmed by next-day absence.
deletion = firstMiss
logger.Debug("cross-day deletion inferred from next snapshot", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "deletion", deletion)
}
}
if deletion > 0 {
if err := db.MarkVmDeletedWithDetails(ctx, dbConn, vcenter, cand.vmID, cand.vmUUID, cand.name, cand.cluster, deletion); err != nil {
logger.Warn("lifecycle backfill mark deleted failed", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "deletion", deletion, "error", err)
continue
}
if lastSeenTable != "" {
if snapUnix, ok := parseSnapshotTime(lastSeenTable); ok {
if cacheRows, err := updateDeletionTimeInHourlyCache(ctx, dbConn, vcenter, cand.vmID, cand.vmUUID, cand.name, snapUnix, deletion); err != nil {
logger.Warn("lifecycle backfill failed to update hourly cache deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "snapshot_time", snapUnix, "deletion", deletion, "error", err)
} else if cacheRows > 0 {
logger.Debug("lifecycle backfill updated hourly cache deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "snapshot_time", snapUnix, "deletion", deletion)
}
}
if updateCompatSnapshot {
if rowsAffected, err := updateDeletionTimeInSnapshot(ctx, dbConn, lastSeenTable, vcenter, cand.vmID, cand.vmUUID, cand.name, deletion); err != nil {
logger.Warn("lifecycle backfill failed to update hourly snapshot deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "table", lastSeenTable, "deletion", deletion, "error", err)
} else if rowsAffected > 0 {
updatedTables[lastSeenTable] = struct{}{}
logger.Debug("lifecycle backfill updated hourly snapshot deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "table", lastSeenTable, "deletion", deletion)
}
}
}
logger.Debug("lifecycle backfill applied", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "deletion", deletion)
}
}
if len(updatedTables) == 0 {
return nil, nil
}
tablesUpdated := make([]string, 0, len(updatedTables))
for table := range updatedTables {
tablesUpdated = append(tablesUpdated, table)
}
return tablesUpdated, nil
}
type lifecycleCandidate struct {
vmID string
vmUUID string
name string
cluster string
}
func loadLifecycleCandidates(ctx context.Context, dbConn *sqlx.DB, vcenter string, present map[string]InventorySnapshotRow) ([]lifecycleCandidate, error) {
query := dbConn.Rebind(`
SELECT "VmId","VmUuid","Name","Cluster"
FROM vm_lifecycle_cache
WHERE "Vcenter" = ? AND ("DeletedAt" IS NULL OR "DeletedAt" = 0)
`)
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
if err != nil {
return nil, err
}
defer rows.Close()
var cands []lifecycleCandidate
for rows.Next() {
var vmID, vmUUID, name, cluster sql.NullString
if scanErr := rows.Scan(&vmID, &vmUUID, &name, &cluster); scanErr != nil {
continue
}
if vmID.String == "" {
continue
}
if _, ok := present[vmID.String]; ok {
continue // still present, skip
}
cands = append(cands, lifecycleCandidate{
vmID: vmID.String,
vmUUID: vmUUID.String,
name: name.String,
cluster: cluster.String,
})
}
return cands, nil
}
type snapshotTable struct {
Table string `db:"table_name"`
Time int64 `db:"snapshot_time"`
Count sql.NullInt64 `db:"snapshot_count"`
}
func listHourlyTablesForDay(ctx context.Context, dbConn *sqlx.DB, dayStart, dayEnd time.Time) ([]snapshotTable, error) {
log := loggerFromCtx(ctx, nil)
query := dbConn.Rebind(`
SELECT table_name, snapshot_time, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = 'hourly' AND snapshot_time >= ? AND snapshot_time < ?
ORDER BY snapshot_time ASC
`)
rows, err := dbConn.QueryxContext(ctx, query, dayStart.Unix(), dayEnd.Unix())
if err != nil {
return nil, err
}
defer rows.Close()
var tables []snapshotTable
for rows.Next() {
var t snapshotTable
if err := rows.StructScan(&t); err != nil {
continue
}
if err := db.ValidateTableName(t.Table); err != nil {
continue
}
// Trust snapshot_count if present; otherwise optimistically include to avoid long probes.
if t.Count.Valid && t.Count.Int64 <= 0 {
if log != nil {
log.Debug("skipping snapshot table with zero count", "table", t.Table, "snapshot_time", t.Time)
}
continue
}
tables = append(tables, t)
}
return tables, nil
}
func nextSnapshotAfter(ctx context.Context, dbConn *sqlx.DB, after time.Time, vcenter string) (string, error) {
query := dbConn.Rebind(`
SELECT table_name
FROM snapshot_registry
WHERE snapshot_type = 'hourly' AND snapshot_time >= ?
ORDER BY snapshot_time ASC
LIMIT 1
`)
rows, err := dbConn.QueryxContext(ctx, query, after.Unix())
if err != nil {
return "", err
}
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
continue
}
if err := db.ValidateTableName(name); err != nil {
continue
}
// ensure the snapshot table actually has entries for this vcenter
vrows, qerr := querySnapshotRows(ctx, dbConn, name, []string{"VmId"}, `"Vcenter" = ? LIMIT 1`, vcenter)
if qerr != nil {
continue
}
hasVcenter := vrows.Next()
vrows.Close()
if hasVcenter {
return name, nil
}
}
return "", nil
}
func loadPresenceKeys(ctx context.Context, dbConn *sqlx.DB, table, vcenter string) map[string]struct{} {
out := make(map[string]struct{})
rows, err := querySnapshotRows(ctx, dbConn, table, []string{"VmId", "VmUuid", "Name"}, `"Vcenter" = ?`, vcenter)
if err != nil {
return out
}
defer rows.Close()
for rows.Next() {
var vmId, vmUuid, name sql.NullString
if err := rows.Scan(&vmId, &vmUuid, &name); err == nil {
for _, k := range presenceKeys(vmId.String, vmUuid.String, name.String) {
out[k] = struct{}{}
}
}
}
return out
}
func isPresent(presence map[string]struct{}, cand lifecycleCandidate) bool {
for _, k := range presenceKeys(cand.vmID, cand.vmUUID, cand.name) {
if _, ok := presence[k]; ok {
return true
}
}
return false
}
// findDeletionInTables walks ordered hourly tables for a vCenter and returns the first confirmed deletion time
// (requiring two consecutive misses), the time of the first miss for cross-day handling, and the last table where
// the VM was seen so we can backfill deletion time into that snapshot.
func findDeletionInTables(ctx context.Context, dbConn *sqlx.DB, tables []snapshotTable, vcenter string, cand *lifecycleCandidate) (int64, int64, string) {
var lastSeen int64
var lastSeenTable string
var firstMiss int64
for i, tbl := range tables {
rows, err := querySnapshotRows(ctx, dbConn, tbl.Table, []string{"VmId", "VmUuid", "Name", "Cluster"}, `"Vcenter" = ? AND "VmId" = ?`, vcenter, cand.vmID)
if err != nil {
continue
}
seen := false
if rows.Next() {
var vmId, vmUuid, name, cluster sql.NullString
if scanErr := rows.Scan(&vmId, &vmUuid, &name, &cluster); scanErr == nil {
seen = true
lastSeen = tbl.Time
lastSeenTable = tbl.Table
if cand.vmUUID == "" && vmUuid.Valid {
cand.vmUUID = vmUuid.String
}
if cand.name == "" && name.Valid {
cand.name = name.String
}
if cand.cluster == "" && cluster.Valid {
cand.cluster = cluster.String
}
}
}
rows.Close()
if lastSeen > 0 && !seen && firstMiss == 0 {
firstMiss = tbl.Time
if i+1 < len(tables) {
if seen2, _ := candSeenInTable(ctx, dbConn, tables[i+1].Table, vcenter, cand.vmID); !seen2 {
return firstMiss, firstMiss, lastSeenTable
}
}
}
}
return 0, firstMiss, lastSeenTable
}
func candSeenInTable(ctx context.Context, dbConn *sqlx.DB, table, vcenter, vmID string) (bool, error) {
rows, err := querySnapshotRows(ctx, dbConn, table, []string{"VmId"}, `"Vcenter" = ? AND "VmId" = ? LIMIT 1`, vcenter, vmID)
if err != nil {
return false, err
}
defer rows.Close()
return rows.Next(), nil
}
File diff suppressed because it is too large Load Diff
+122
View File
@@ -0,0 +1,122 @@
package tasks
import (
"context"
"database/sql"
"testing"
"vctp/db"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite"
)
func newTasksTestDB(t *testing.T) *sqlx.DB {
t.Helper()
dbConn, err := sqlx.Open("sqlite", ":memory:")
if err != nil {
t.Fatalf("failed to open sqlite test db: %v", err)
}
t.Cleanup(func() {
_ = dbConn.Close()
})
return dbConn
}
func TestBackfillSnapshotRowFromHourlyCache(t *testing.T) {
ctx := context.Background()
dbConn := newTasksTestDB(t)
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_hourly_stats: %v", err)
}
insertSQL := `
INSERT INTO vm_hourly_stats (
"SnapshotTime","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime","ResourcePool",
"Datacenter","Cluster","Folder","ProvisionedDisk","VcpuCount","RamGB","IsTemplate","PoweredOn","SrmPlaceholder"
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
`
if _, err := dbConn.ExecContext(ctx, insertSQL,
int64(1000), "vc-a", "vm-1", "uuid-1", "demo-vm", int64(900), int64(0), "Tin",
"dc-1", "cluster-1", "/Prod", 123.4, int64(4), int64(16), "FALSE", "TRUE", "FALSE",
); err != nil {
t.Fatalf("failed to insert cache row: %v", err)
}
row := InventorySnapshotRow{
Vcenter: "vc-a",
VmId: sql.NullString{String: "vm-1", Valid: true},
Name: "demo-vm",
SnapshotTime: 2000,
ResourcePool: sql.NullString{String: "Tin", Valid: true},
SrmPlaceholder: "",
}
if !needsSnapshotBackfill(row) {
t.Fatal("expected sparse row to require backfill")
}
changed := backfillSnapshotRowFromHourlyCache(ctx, dbConn, &row)
if !changed {
t.Fatal("expected cache backfill to update the row")
}
if !row.CreationTime.Valid || row.CreationTime.Int64 != 900 {
t.Fatalf("unexpected CreationTime after backfill: %#v", row.CreationTime)
}
if !row.Cluster.Valid || row.Cluster.String != "cluster-1" {
t.Fatalf("unexpected Cluster after backfill: %#v", row.Cluster)
}
if !row.Datacenter.Valid || row.Datacenter.String != "dc-1" {
t.Fatalf("unexpected Datacenter after backfill: %#v", row.Datacenter)
}
if !row.ProvisionedDisk.Valid || row.ProvisionedDisk.Float64 != 123.4 {
t.Fatalf("unexpected ProvisionedDisk after backfill: %#v", row.ProvisionedDisk)
}
if !row.VcpuCount.Valid || row.VcpuCount.Int64 != 4 {
t.Fatalf("unexpected VcpuCount after backfill: %#v", row.VcpuCount)
}
if !row.RamGB.Valid || row.RamGB.Int64 != 16 {
t.Fatalf("unexpected RamGB after backfill: %#v", row.RamGB)
}
if row.SrmPlaceholder != "FALSE" {
t.Fatalf("unexpected SrmPlaceholder after backfill: %q", row.SrmPlaceholder)
}
if !row.VmUuid.Valid || row.VmUuid.String != "uuid-1" {
t.Fatalf("unexpected VmUuid after backfill: %#v", row.VmUuid)
}
}
func TestBackfillSnapshotRowFromHourlyCacheNoMatch(t *testing.T) {
ctx := context.Background()
dbConn := newTasksTestDB(t)
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
t.Fatalf("failed to ensure vm_hourly_stats: %v", err)
}
row := InventorySnapshotRow{
Vcenter: "vc-a",
VmId: sql.NullString{String: "vm-missing", Valid: true},
}
changed := backfillSnapshotRowFromHourlyCache(ctx, dbConn, &row)
if changed {
t.Fatal("expected no backfill change for missing VM")
}
}
func TestNeedsSnapshotBackfillIgnoresDiskOnlyGap(t *testing.T) {
row := InventorySnapshotRow{
CreationTime: sql.NullInt64{Int64: 100, Valid: true},
VcpuCount: sql.NullInt64{Int64: 2, Valid: true},
RamGB: sql.NullInt64{Int64: 8, Valid: true},
Cluster: sql.NullString{String: "cluster-a", Valid: true},
Datacenter: sql.NullString{String: "dc-a", Valid: true},
SrmPlaceholder: "FALSE",
VmUuid: sql.NullString{String: "uuid-1", Valid: true},
// ProvisionedDisk intentionally missing.
}
if needsSnapshotBackfill(row) {
t.Fatal("expected disk-only gap to be non-critical for sparse-row detection")
}
}
+7 -107
View File
@@ -3,11 +3,8 @@ package tasks
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"log/slog"
"runtime"
"strings"
"time"
"vctp/db/queries"
@@ -18,103 +15,11 @@ import (
"github.com/vmware/govmomi/vim25/types"
)
// use gocron to check vcenters for VMs or updates we don't know about
// RunVcenterPoll is intentionally disabled.
// The legacy inventory polling flow has been retired in favor of hourly snapshots.
func (c *CronTask) RunVcenterPoll(ctx context.Context, logger *slog.Logger) error {
startedAt := time.Now()
defer func() {
logger.Info("Vcenter poll job finished", "duration", time.Since(startedAt))
}()
var matchFound bool
// reload settings in case vcenter list has changed
c.Settings.ReadYMLSettings()
for _, url := range c.Settings.Values.Settings.VcenterAddresses {
c.Logger.Debug("connecting to vcenter", "url", url)
vc := vcenter.New(c.Logger, c.VcCreds)
vc.Login(url)
// Get list of VMs from vcenter
vcVms, err := vc.GetAllVmReferences()
// Get list of VMs from inventory table
c.Logger.Debug("Querying inventory table")
results, err := c.Database.Queries().GetInventoryByVcenter(ctx, url)
if err != nil {
c.Logger.Error("Unable to query inventory table", "error", err)
return err
}
if len(results) == 0 {
c.Logger.Error("Empty inventory results")
return fmt.Errorf("Empty inventory results")
}
// Iterate VMs from vcenter and see if they were in the database
for _, vm := range vcVms {
matchFound = false
// Skip any vCLS VMs
if strings.HasPrefix(vm.Name(), "vCLS-") {
//c.Logger.Debug("Skipping internal VM", "vm_name", vm.Name())
continue
}
// TODO - should we compare the UUID as well?
for _, dbvm := range results {
if dbvm.VmId.String == vm.Reference().Value {
//c.Logger.Debug("Found match for VM", "vm_name", dbvm.Name, "id", dbvm.VmId.String)
matchFound = true
// Get the full VM object
vmObj, err := vc.ConvertObjToMoVM(vm)
if err != nil {
c.Logger.Error("Failed to find VM in vcenter", "vm_id", dbvm.VmId.String, "error", err)
continue
}
if vmObj.Config == nil {
c.Logger.Error("VM has no config properties", "vm_id", dbvm.VmId.String, "vm_name", vmObj.Name)
continue
}
// Check that this is definitely the right VM
if dbvm.VmUuid.String == vmObj.Config.Uuid {
// TODO - compare database against current values, create update record if not matching
err = c.UpdateVmInventory(vmObj, vc, ctx, dbvm)
} else {
c.Logger.Error("VM uuid doesn't match database record", "vm_name", dbvm.Name, "id", dbvm.VmId.String, "vc_uuid", vmObj.Config.Uuid, "db_uuid", dbvm.VmUuid.String)
}
break
}
}
if !matchFound {
c.Logger.Debug("Need to add VM to inventory table", "MoRef", vm.Reference())
vmObj, err := vc.ConvertObjToMoVM(vm)
if err != nil {
c.Logger.Error("Received error getting vm maangedobject", "error", err)
continue
}
// retrieve VM properties and insert into inventory
err = c.AddVmToInventory(vmObj, vc, ctx)
if err != nil {
c.Logger.Error("Received error with VM add", "error", err)
continue
}
// add sleep to slow down mass VM additions
utils.SleepWithContext(ctx, (10 * time.Millisecond))
}
}
c.Logger.Debug("Finished checking vcenter", "url", url)
vc.Logout()
}
c.Logger.Debug("Finished polling vcenters")
_ = ctx
logger.Info("legacy vcenter polling task is disabled")
return nil
}
@@ -130,8 +35,6 @@ func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcent
existingUpdateFound bool
)
// TODO - how to prevent creating a new record every polling cycle?
params := queries.CreateUpdateParams{
InventoryId: sql.NullInt64{Int64: dbVm.Iid, Valid: dbVm.Iid > 0},
}
@@ -181,12 +84,8 @@ func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcent
}
}
// TODO - should we bother to check if disk space has changed?
if updateType != "unknown" {
// TODO query updates table to see if there is already an update of this type and the new value
// Check if we already have an existing update record for this same change
checkParams := queries.GetVmUpdatesParams{
InventoryId: sql.NullInt64{Int64: dbVm.Iid, Valid: dbVm.Iid > 0},
UpdateType: updateType,
@@ -241,7 +140,6 @@ func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcent
// add sleep to slow down mass VM additions
utils.SleepWithContext(ctx, (10 * time.Millisecond))
}
}
return nil
@@ -409,6 +307,7 @@ func (c *CronTask) AddVmToInventory(vmObject *mo.VirtualMachine, vc *vcenter.Vce
return nil
}
/*
// prettyPrint comes from https://gist.github.com/sfate/9d45f6c5405dc4c9bf63bf95fe6d1a7c
func prettyPrint(args ...interface{}) {
var caller string
@@ -436,3 +335,4 @@ func prettyPrint(args ...interface{}) {
fmt.Printf("%s%s\n", prefix, string(s))
}
}
*/
+775 -24
View File
@@ -2,18 +2,29 @@ package tasks
import (
"context"
"database/sql"
"fmt"
"log/slog"
"os"
"runtime"
"slices"
"strings"
"sync"
"time"
"vctp/db"
"vctp/internal/metrics"
"vctp/internal/report"
"vctp/internal/settings"
)
// RunVcenterMonthlyAggregate summarizes the previous month's daily snapshots.
func (c *CronTask) RunVcenterMonthlyAggregate(ctx context.Context, logger *slog.Logger) (err error) {
jobTimeout := durationFromSeconds(c.Settings.Values.Settings.MonthlyJobTimeoutSeconds, 20*time.Minute)
return c.runAggregateJob(ctx, "monthly_aggregate", jobTimeout, func(jobCtx context.Context) error {
if err := c.Settings.ReadYMLSettings(); err != nil {
return err
}
jobCtx = settings.MarkReloadedInContext(jobCtx, c.Settings)
startedAt := time.Now()
defer func() {
logger.Info("Monthly summary job finished", "duration", time.Since(startedAt))
@@ -21,33 +32,75 @@ func (c *CronTask) RunVcenterMonthlyAggregate(ctx context.Context, logger *slog.
now := time.Now()
firstOfThisMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location())
targetMonth := firstOfThisMonth.AddDate(0, -1, 0)
return c.aggregateMonthlySummary(jobCtx, targetMonth, false)
return c.aggregateMonthlySummaryWithMode(jobCtx, targetMonth, false, true)
})
}
func (c *CronTask) AggregateMonthlySummary(ctx context.Context, month time.Time, force bool) error {
return c.aggregateMonthlySummary(ctx, month, force)
return c.aggregateMonthlySummaryWithMode(ctx, month, force, false)
}
func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time.Time, force bool) error {
func (c *CronTask) aggregateMonthlySummaryWithMode(ctx context.Context, targetMonth time.Time, force bool, scheduled bool) error {
jobStart := time.Now()
if err := report.EnsureSnapshotRegistry(ctx, c.Database); err != nil {
return err
}
granularity := strings.ToLower(strings.TrimSpace(c.Settings.Values.Settings.MonthlyAggregationGranularity))
if granularity == "" {
granularity = "daily"
}
if scheduled {
granularity = "daily"
}
if granularity != "hourly" && granularity != "daily" {
c.Logger.Warn("unknown monthly aggregation granularity; defaulting to daily", "granularity", granularity)
granularity = "daily"
}
monthStart := time.Date(targetMonth.Year(), targetMonth.Month(), 1, 0, 0, 0, 0, targetMonth.Location())
monthEnd := monthStart.AddDate(0, 1, 0)
dailySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "daily", "inventory_daily_summary_", "20060102", monthStart, monthEnd)
if err != nil {
return err
}
dailySnapshots = filterRecordsInRange(dailySnapshots, monthStart, monthEnd)
dbConn := c.Database.DB()
db.SetPostgresWorkMem(ctx, dbConn, c.Settings.Values.Settings.PostgresWorkMemMB)
dailySnapshots = filterSnapshotsWithRows(ctx, dbConn, dailySnapshots)
if len(dailySnapshots) == 0 {
return fmt.Errorf("no hourly snapshot tables found for %s", targetMonth.Format("2006-01"))
driver := strings.ToLower(dbConn.DriverName())
// Canonical Go aggregation is the default for both scheduled and manual runs.
// Legacy SQL/union aggregation stays available as a manual fallback/backfill path.
forceGoAgg := os.Getenv("MONTHLY_AGG_GO") == "1"
forceSQLAgg := !scheduled && os.Getenv("MONTHLY_AGG_SQL") == "1"
useGoAgg := scheduled || forceGoAgg || !forceSQLAgg
if forceSQLAgg && !forceGoAgg {
c.Logger.Info("MONTHLY_AGG_SQL=1 enabled; using SQL fallback path for manual monthly aggregation")
}
if !useGoAgg && granularity == "hourly" && driver == "sqlite" {
c.Logger.Warn("SQL monthly aggregation is slow on sqlite; overriding to Go path", "granularity", granularity)
useGoAgg = true
}
var snapshots []report.SnapshotRecord
var unionColumns []string
if !scheduled {
if granularity == "daily" {
dailySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "daily", "inventory_daily_summary_", "20060102", monthStart, monthEnd)
if err != nil {
return err
}
dailySnapshots = filterRecordsInRange(dailySnapshots, monthStart, monthEnd)
dailySnapshots = filterSnapshotsWithRows(ctx, dbConn, dailySnapshots)
snapshots = dailySnapshots
unionColumns = monthlyUnionColumns
} else {
hourlySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "hourly", "inventory_hourly_", "epoch", monthStart, monthEnd)
if err != nil {
return err
}
hourlySnapshots = filterRecordsInRange(hourlySnapshots, monthStart, monthEnd)
hourlySnapshots = filterSnapshotsWithRows(ctx, dbConn, hourlySnapshots)
snapshots = hourlySnapshots
unionColumns = summaryUnionColumns
}
}
if !scheduled && len(snapshots) == 0 {
return fmt.Errorf("no %s snapshot tables found for %s", granularity, targetMonth.Format("2006-01"))
}
monthlyTable, err := monthlySummaryTableName(targetMonth)
@@ -69,11 +122,57 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
}
}
dailyTables := make([]string, 0, len(dailySnapshots))
for _, snapshot := range dailySnapshots {
dailyTables = append(dailyTables, snapshot.TableName)
if scheduled && c.scheduledAggregationEngine() == "sql" {
c.Logger.Info("scheduled_aggregation_engine=sql enabled; using canonical SQL monthly aggregation path")
if err := c.aggregateMonthlySummarySQLCanonical(ctx, monthStart, monthEnd, monthlyTable); err != nil {
c.Logger.Warn("scheduled canonical SQL monthly aggregation failed; falling back to go path", "error", err)
} else {
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
c.Logger.Debug("Finished monthly inventory aggregation (SQL canonical path)", "summary_table", monthlyTable)
return nil
}
}
unionQuery, err := buildUnionQuery(dailyTables, summaryUnionColumns, templateExclusionFilter())
// Optional Go-based aggregation path.
if useGoAgg {
switch granularity {
case "daily":
c.Logger.Debug("Using go implementation of monthly aggregation (daily)")
if err := c.aggregateMonthlySummaryGo(ctx, monthStart, monthEnd, monthlyTable, snapshots, scheduled); err != nil {
if scheduled {
return err
}
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
} else {
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil
}
case "hourly":
if scheduled {
return fmt.Errorf("scheduled monthly aggregation does not support hourly source mode")
}
c.Logger.Debug("Using go implementation of monthly aggregation (hourly)")
if err := c.aggregateMonthlySummaryGoHourly(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
} else {
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil
}
default:
c.Logger.Warn("MONTHLY_AGG_GO is set but granularity is unsupported; using SQL path", "granularity", granularity)
}
}
if scheduled {
return fmt.Errorf("scheduled monthly aggregation requires go daily-rollup path")
}
tables := make([]string, 0, len(snapshots))
for _, snapshot := range snapshots {
tables = append(tables, snapshot.TableName)
}
unionQuery, err := buildUnionQuery(tables, unionColumns, templateExclusionFilter())
if err != nil {
return err
}
@@ -91,7 +190,12 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
)
}
insertQuery, err := db.BuildMonthlySummaryInsert(monthlyTable, unionQuery)
var insertQuery string
if granularity == "daily" {
insertQuery, err = db.BuildMonthlySummaryInsert(monthlyTable, unionQuery)
} else {
insertQuery, err = db.BuildDailySummaryInsert(monthlyTable, unionQuery)
}
if err != nil {
return err
}
@@ -100,12 +204,13 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
c.Logger.Error("failed to aggregate monthly inventory", "error", err, "month", targetMonth.Format("2006-01"))
return err
}
// Backfill missing creation times to the start of the month for rows lacking creation info.
if _, err := dbConn.ExecContext(ctx,
`UPDATE `+monthlyTable+` SET "CreationTime" = $1 WHERE "CreationTime" IS NULL OR "CreationTime" = 0`,
monthStart.Unix(),
); err != nil {
c.Logger.Warn("failed to normalize creation times for monthly summary", "error", err, "table", monthlyTable)
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, monthlyTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary", "error", err, "table", monthlyTable)
} else {
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, monthlyTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window", "error", err, "table", monthlyTable)
}
rowCount, err := db.TableRowCount(ctx, dbConn, monthlyTable)
if err != nil {
@@ -117,7 +222,7 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
db.AnalyzeTableIfPostgres(ctx, dbConn, monthlyTable)
if err := c.generateReport(ctx, monthlyTable); err != nil {
if err := c.generateReportWithPolicy(ctx, monthlyTable); err != nil {
c.Logger.Warn("failed to generate monthly report", "error", err, "table", monthlyTable)
metrics.RecordMonthlyAggregation(time.Since(jobStart), err)
return err
@@ -131,3 +236,649 @@ func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time
func monthlySummaryTableName(t time.Time) (string, error) {
return db.SafeTableName(fmt.Sprintf("inventory_monthly_summary_%s", t.Format("200601")))
}
func (c *CronTask) aggregateMonthlySummarySQLCanonical(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string) error {
jobStart := time.Now()
dbConn := c.Database.DB()
if !db.TableExists(ctx, dbConn, "vm_daily_rollup") {
return fmt.Errorf("vm_daily_rollup table not found for canonical SQL monthly aggregation")
}
unionQuery := buildCanonicalDailyRollupSummaryUnion(monthStart, monthEnd)
insertQuery, err := db.BuildMonthlySummaryInsert(summaryTable, unionQuery)
if err != nil {
return err
}
if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil {
return err
}
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary (SQL canonical)", "error", err, "table", summaryTable)
} else {
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
}
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, summaryTable, buildDailyRollupLifecycleUnion(monthStart, monthEnd)); err != nil {
c.Logger.Warn("failed to refine creation/deletion times (monthly SQL canonical)", "error", err, "table", summaryTable)
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (SQL canonical)", "error", err, "table", summaryTable)
}
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
if err != nil {
c.Logger.Warn("unable to count monthly summary rows (SQL canonical)", "error", err, "table", summaryTable)
}
if rowCount == 0 {
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
}
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
c.Logger.Warn("failed to register monthly snapshot (SQL canonical)", "error", err, "table", summaryTable)
}
if err := c.generateReportWithPolicy(ctx, summaryTable); err != nil {
c.Logger.Warn("failed to generate monthly report (SQL canonical)", "error", err, "table", summaryTable)
return err
}
c.Logger.Debug("Finished monthly inventory aggregation (SQL canonical path)", "summary_table", summaryTable, "duration", time.Since(jobStart))
return nil
}
// aggregateMonthlySummaryGoHourly aggregates hourly snapshots directly into the monthly summary table.
func (c *CronTask) aggregateMonthlySummaryGoHourly(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string, hourlySnapshots []report.SnapshotRecord) error {
jobStart := time.Now()
dbConn := c.Database.DB()
if err := clearTable(ctx, dbConn, summaryTable); err != nil {
return err
}
if len(hourlySnapshots) == 0 {
return fmt.Errorf("no hourly snapshot tables found for %s", monthStart.Format("2006-01"))
}
totalSamples := len(hourlySnapshots)
var (
aggMap map[dailyAggKey]*dailyAggVal
snapTimes []int64
)
if db.TableExists(ctx, dbConn, "vm_hourly_stats") {
cacheAgg, cacheTimes, cacheErr := c.scanHourlyCache(ctx, monthStart, monthEnd)
if cacheErr != nil {
c.Logger.Warn("failed to use hourly cache, falling back to table scans", "error", cacheErr)
} else if len(cacheAgg) > 0 {
c.Logger.Debug("using hourly cache for monthly aggregation", "month", monthStart.Format("2006-01"), "snapshots", len(cacheTimes), "vm_count", len(cacheAgg))
aggMap = cacheAgg
snapTimes = cacheTimes
totalSamples = len(cacheTimes)
}
}
if aggMap == nil {
var errScan error
aggMap, errScan = c.scanHourlyTablesParallel(ctx, hourlySnapshots)
if errScan != nil {
return errScan
}
c.Logger.Debug("scanned hourly tables for monthly aggregation", "month", monthStart.Format("2006-01"), "tables", len(hourlySnapshots), "vm_count", len(aggMap))
if len(aggMap) == 0 {
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
}
snapTimes = make([]int64, 0, len(hourlySnapshots))
for _, snap := range hourlySnapshots {
snapTimes = append(snapTimes, snap.SnapshotTime.Unix())
}
slices.Sort(snapTimes)
}
lifecycleDeletions := c.applyLifecycleDeletions(ctx, aggMap, monthStart, monthEnd)
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", lifecycleDeletions)
inventoryDeletions := c.applyInventoryDeletions(ctx, aggMap, monthStart, monthEnd)
c.Logger.Info("Monthly aggregation deletion times", "source_inventory", inventoryDeletions)
if len(snapTimes) > 0 {
maxSnap := snapTimes[len(snapTimes)-1]
inferredDeletions := 0
for _, v := range aggMap {
if v.deletion != 0 {
continue
}
consecutiveMisses := 0
firstMiss := int64(0)
for _, t := range snapTimes {
if t <= v.lastSeen {
continue
}
if _, ok := v.seen[t]; ok {
consecutiveMisses = 0
firstMiss = 0
continue
}
consecutiveMisses++
if firstMiss == 0 {
firstMiss = t
}
if consecutiveMisses >= 2 {
v.deletion = firstMiss
inferredDeletions++
break
}
}
if v.deletion == 0 && v.lastSeen < maxSnap && firstMiss > 0 {
c.Logger.Debug("pending deletion inference (insufficient consecutive misses)", "vm_id", v.key.VmId, "vm_uuid", v.key.VmUuid, "name", v.key.Name, "last_seen", v.lastSeen, "first_missing_snapshot", firstMiss)
}
}
c.Logger.Info("Monthly aggregation deletion times", "source_inferred", inferredDeletions)
}
totalSamplesByVcenter := sampleCountsByVcenter(aggMap)
if err := c.insertDailyAggregates(ctx, summaryTable, aggMap, totalSamples, totalSamplesByVcenter); err != nil {
return err
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (Go hourly)", "error", err, "table", summaryTable)
}
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
if err != nil {
c.Logger.Warn("unable to count monthly summary rows (Go hourly)", "error", err, "table", summaryTable)
}
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
c.Logger.Warn("failed to register monthly snapshot (Go hourly)", "error", err, "table", summaryTable)
}
if err := c.generateReportWithPolicy(ctx, summaryTable); err != nil {
c.Logger.Warn("failed to generate monthly report (Go hourly)", "error", err, "table", summaryTable)
return err
}
c.Logger.Debug("Finished monthly inventory aggregation (Go hourly)",
"summary_table", summaryTable,
"duration", time.Since(jobStart),
"tables_scanned", len(hourlySnapshots),
"rows_written", rowCount,
"total_samples", totalSamples,
)
return nil
}
// aggregateMonthlySummaryGo mirrors the SQL-based monthly aggregation but performs the work in Go,
// reading daily summaries in parallel and reducing them to a single monthly summary table.
func (c *CronTask) aggregateMonthlySummaryGo(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string, dailySnapshots []report.SnapshotRecord, canonicalOnly bool) error {
jobStart := time.Now()
dbConn := c.Database.DB()
if err := clearTable(ctx, dbConn, summaryTable); err != nil {
return err
}
unionQuery := ""
var (
aggMap map[monthlyAggKey]*monthlyAggVal
err error
)
if canonicalOnly {
aggMap, err = c.scanDailyRollup(ctx, monthStart, monthEnd)
if err != nil {
return err
}
unionQuery = buildDailyRollupLifecycleUnion(monthStart, monthEnd)
} else {
// Build union query for lifecycle refinement after inserts.
dailyTables := make([]string, 0, len(dailySnapshots))
for _, snapshot := range dailySnapshots {
dailyTables = append(dailyTables, snapshot.TableName)
}
unionQuery, err = buildUnionQuery(dailyTables, monthlyUnionColumns, templateExclusionFilter())
if err != nil {
return err
}
aggMap, err = c.scanDailyTablesParallel(ctx, dailySnapshots)
if err != nil {
return err
}
if len(aggMap) == 0 {
cacheAgg, cacheErr := c.scanDailyRollup(ctx, monthStart, monthEnd)
if cacheErr == nil && len(cacheAgg) > 0 {
aggMap = cacheAgg
} else if cacheErr != nil {
c.Logger.Warn("failed to read daily rollup cache; using table scan", "error", cacheErr)
}
}
}
if len(aggMap) == 0 {
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
}
if err := c.insertMonthlyAggregates(ctx, summaryTable, aggMap); err != nil {
return err
}
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary (Go)", "error", err, "table", summaryTable)
} else {
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
}
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, summaryTable, unionQuery); err != nil {
c.Logger.Warn("failed to refine creation/deletion times (monthly Go)", "error", err, "table", summaryTable)
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (Go)", "error", err, "table", summaryTable)
}
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
if err != nil {
c.Logger.Warn("unable to count monthly summary rows", "error", err, "table", summaryTable)
}
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
c.Logger.Warn("failed to register monthly snapshot", "error", err, "table", summaryTable)
}
if err := c.generateReportWithPolicy(ctx, summaryTable); err != nil {
c.Logger.Warn("failed to generate monthly report (Go)", "error", err, "table", summaryTable)
return err
}
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", summaryTable, "duration", time.Since(jobStart))
return nil
}
func (c *CronTask) scanDailyTablesParallel(ctx context.Context, snapshots []report.SnapshotRecord) (map[monthlyAggKey]*monthlyAggVal, error) {
agg := make(map[monthlyAggKey]*monthlyAggVal, 1024)
mu := sync.Mutex{}
workers := min(max(runtime.NumCPU(), 2), len(snapshots))
jobs := make(chan report.SnapshotRecord, len(snapshots))
wg := sync.WaitGroup{}
for i := 0; i < workers; i++ {
wg.Go(func() {
for snap := range jobs {
rows, err := c.scanDailyTable(ctx, snap)
if err != nil {
c.Logger.Warn("failed to scan daily summary", "table", snap.TableName, "error", err)
continue
}
mu.Lock()
for k, v := range rows {
if existing, ok := agg[k]; ok {
mergeMonthlyAgg(existing, v)
} else {
agg[k] = v
}
}
mu.Unlock()
}
})
}
for _, snap := range snapshots {
jobs <- snap
}
close(jobs)
wg.Wait()
return agg, nil
}
func mergeMonthlyAgg(dst, src *monthlyAggVal) {
if src.creation > 0 && (dst.creation == 0 || src.creation < dst.creation) {
dst.creation = src.creation
}
// If creation is unknown in all daily summaries, leave it zero for reports (VM trace handles approximation separately).
if src.deletion > 0 && (dst.deletion == 0 || src.deletion < dst.deletion) {
dst.deletion = src.deletion
}
if src.lastSnapshot.After(dst.lastSnapshot) {
dst.lastSnapshot = src.lastSnapshot
if src.inventoryId != 0 {
dst.inventoryId = src.inventoryId
}
dst.resourcePool = src.resourcePool
dst.datacenter = src.datacenter
dst.cluster = src.cluster
dst.folder = src.folder
dst.isTemplate = src.isTemplate
dst.poweredOn = src.poweredOn
dst.srmPlaceholder = src.srmPlaceholder
dst.provisioned = src.provisioned
dst.vcpuCount = src.vcpuCount
dst.ramGB = src.ramGB
dst.eventKey = src.eventKey
dst.cloudId = src.cloudId
}
dst.samplesPresent += src.samplesPresent
dst.totalSamples += src.totalSamples
dst.sumVcpu += src.sumVcpu
dst.sumRam += src.sumRam
dst.sumDisk += src.sumDisk
dst.tinWeighted += src.tinWeighted
dst.bronzeWeighted += src.bronzeWeighted
dst.silverWeighted += src.silverWeighted
dst.goldWeighted += src.goldWeighted
}
func (c *CronTask) scanDailyTable(ctx context.Context, snap report.SnapshotRecord) (map[monthlyAggKey]*monthlyAggVal, error) {
dbConn := c.Database.DB()
query := fmt.Sprintf(`
SELECT
"InventoryId",
"Name","Vcenter","VmId","VmUuid","EventKey","CloudId","ResourcePool","Datacenter","Cluster","Folder",
COALESCE("ProvisionedDisk",0) AS disk,
COALESCE("VcpuCount",0) AS vcpu,
COALESCE("RamGB",0) AS ram,
COALESCE("CreationTime",0) AS creation,
COALESCE("DeletionTime",0) AS deletion,
COALESCE("SamplesPresent",0) AS samples_present,
"AvgVcpuCount","AvgRamGB","AvgProvisionedDisk","AvgIsPresent",
"PoolTinPct","PoolBronzePct","PoolSilverPct","PoolGoldPct",
"Tin","Bronze","Silver","Gold","IsTemplate","PoweredOn","SrmPlaceholder"
FROM %s
`, snap.TableName)
rows, err := dbConn.QueryxContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
result := make(map[monthlyAggKey]*monthlyAggVal, 256)
for rows.Next() {
var (
inventoryId sql.NullInt64
name, vcenter, vmId, vmUuid string
eventKey, cloudId sql.NullString
resourcePool, datacenter, cluster, folder sql.NullString
isTemplate, poweredOn, srmPlaceholder sql.NullString
disk, avgVcpu, avgRam, avgDisk sql.NullFloat64
avgIsPresent sql.NullFloat64
poolTin, poolBronze, poolSilver, poolGold sql.NullFloat64
tinPct, bronzePct, silverPct, goldPct sql.NullFloat64
vcpu, ram sql.NullInt64
creation, deletion sql.NullInt64
samplesPresent sql.NullInt64
)
if err := rows.Scan(
&inventoryId,
&name, &vcenter, &vmId, &vmUuid, &eventKey, &cloudId, &resourcePool, &datacenter, &cluster, &folder,
&disk, &vcpu, &ram, &creation, &deletion, &samplesPresent,
&avgVcpu, &avgRam, &avgDisk, &avgIsPresent,
&poolTin, &poolBronze, &poolSilver, &poolGold,
&tinPct, &bronzePct, &silverPct, &goldPct,
&isTemplate, &poweredOn, &srmPlaceholder,
); err != nil {
c.Logger.Warn("failed to scan daily summary row", "table", snap.TableName, "error", err)
continue
}
templateVal := strings.TrimSpace(isTemplate.String)
if strings.EqualFold(templateVal, "true") || templateVal == "1" {
continue
}
key := monthlyAggKey{Vcenter: vcenter, VmId: vmId, VmUuid: vmUuid, Name: name}
agg := &monthlyAggVal{
key: key,
inventoryId: inventoryId.Int64,
eventKey: eventKey.String,
cloudId: cloudId.String,
resourcePool: resourcePool.String,
datacenter: datacenter.String,
cluster: cluster.String,
folder: folder.String,
isTemplate: isTemplate.String,
poweredOn: poweredOn.String,
srmPlaceholder: srmPlaceholder.String,
provisioned: disk.Float64,
vcpuCount: vcpu.Int64,
ramGB: ram.Int64,
creation: creation.Int64,
deletion: deletion.Int64,
lastSnapshot: snap.SnapshotTime,
samplesPresent: samplesPresent.Int64,
}
totalSamplesDay := float64(samplesPresent.Int64)
if avgIsPresent.Valid && avgIsPresent.Float64 > 0 {
totalSamplesDay = float64(samplesPresent.Int64) / avgIsPresent.Float64
}
agg.totalSamples = totalSamplesDay
if avgVcpu.Valid {
agg.sumVcpu = avgVcpu.Float64 * totalSamplesDay
}
if avgRam.Valid {
agg.sumRam = avgRam.Float64 * totalSamplesDay
}
if avgDisk.Valid {
agg.sumDisk = avgDisk.Float64 * totalSamplesDay
}
if poolTin.Valid {
agg.tinWeighted = (poolTin.Float64 / 100.0) * totalSamplesDay
}
if poolBronze.Valid {
agg.bronzeWeighted = (poolBronze.Float64 / 100.0) * totalSamplesDay
}
if poolSilver.Valid {
agg.silverWeighted = (poolSilver.Float64 / 100.0) * totalSamplesDay
}
if poolGold.Valid {
agg.goldWeighted = (poolGold.Float64 / 100.0) * totalSamplesDay
}
result[key] = agg
}
return result, rows.Err()
}
// scanDailyRollup aggregates monthly data from vm_daily_rollup cache.
func (c *CronTask) scanDailyRollup(ctx context.Context, start, end time.Time) (map[monthlyAggKey]*monthlyAggVal, error) {
dbConn := c.Database.DB()
if !db.TableExists(ctx, dbConn, "vm_daily_rollup") {
return map[monthlyAggKey]*monthlyAggVal{}, nil
}
query := `
SELECT
"Date","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime",
"SamplesPresent","TotalSamples","SumVcpu","SumRam","SumDisk",
"TinHits","BronzeHits","SilverHits","GoldHits",
"LastResourcePool","LastDatacenter","LastCluster","LastFolder",
"LastProvisionedDisk","LastVcpuCount","LastRamGB","IsTemplate","PoweredOn","SrmPlaceholder"
FROM vm_daily_rollup
WHERE "Date" >= ? AND "Date" < ?
`
bind := dbConn.Rebind(query)
rows, err := dbConn.QueryxContext(ctx, bind, start.Unix(), end.Unix())
if err != nil {
return nil, err
}
defer rows.Close()
agg := make(map[monthlyAggKey]*monthlyAggVal, 512)
for rows.Next() {
var (
date sql.NullInt64
vcenter, vmId, vmUuid, name string
creation, deletion sql.NullInt64
samplesPresent, totalSamples sql.NullInt64
sumVcpu, sumRam, sumDisk sql.NullFloat64
tinHits, bronzeHits, silverHits, goldHits sql.NullInt64
lastPool, lastDc, lastCluster, lastFolder sql.NullString
lastDisk, lastVcpu, lastRam sql.NullFloat64
isTemplate, poweredOn, srmPlaceholder sql.NullString
)
if err := rows.Scan(
&date, &vcenter, &vmId, &vmUuid, &name, &creation, &deletion,
&samplesPresent, &totalSamples, &sumVcpu, &sumRam, &sumDisk,
&tinHits, &bronzeHits, &silverHits, &goldHits,
&lastPool, &lastDc, &lastCluster, &lastFolder,
&lastDisk, &lastVcpu, &lastRam, &isTemplate, &poweredOn, &srmPlaceholder,
); err != nil {
continue
}
templateVal := strings.TrimSpace(isTemplate.String)
if strings.EqualFold(templateVal, "true") || templateVal == "1" {
continue
}
key := monthlyAggKey{Vcenter: vcenter, VmId: vmId, VmUuid: vmUuid, Name: name}
val := &monthlyAggVal{
key: key,
resourcePool: lastPool.String,
datacenter: lastDc.String,
cluster: lastCluster.String,
folder: lastFolder.String,
isTemplate: isTemplate.String,
poweredOn: poweredOn.String,
srmPlaceholder: srmPlaceholder.String,
provisioned: lastDisk.Float64,
vcpuCount: int64(lastVcpu.Float64),
ramGB: int64(lastRam.Float64),
creation: creation.Int64,
deletion: deletion.Int64,
lastSnapshot: time.Unix(date.Int64, 0),
samplesPresent: samplesPresent.Int64,
totalSamples: float64(totalSamples.Int64),
sumVcpu: sumVcpu.Float64,
sumRam: sumRam.Float64,
sumDisk: sumDisk.Float64,
tinWeighted: float64(tinHits.Int64),
bronzeWeighted: float64(bronzeHits.Int64),
silverWeighted: float64(silverHits.Int64),
goldWeighted: float64(goldHits.Int64),
}
if existing, ok := agg[key]; ok {
mergeMonthlyAgg(existing, val)
} else {
agg[key] = val
}
}
return agg, rows.Err()
}
func buildDailyRollupLifecycleUnion(start, end time.Time) string {
return fmt.Sprintf(`
SELECT
"VmId","VmUuid","Name","Vcenter","CreationTime","DeletionTime","Date" AS "SnapshotTime"
FROM vm_daily_rollup
WHERE "Date" >= %d AND "Date" < %d
`, start.Unix(), end.Unix())
}
func buildCanonicalDailyRollupSummaryUnion(start, end time.Time) string {
return fmt.Sprintf(`
SELECT
CAST(NULL AS BIGINT) AS "InventoryId",
COALESCE("Name",'') AS "Name",
COALESCE("Vcenter",'') AS "Vcenter",
COALESCE("VmId",'') AS "VmId",
CAST(NULL AS TEXT) AS "EventKey",
CAST(NULL AS TEXT) AS "CloudId",
COALESCE("CreationTime",0) AS "CreationTime",
COALESCE("DeletionTime",0) AS "DeletionTime",
COALESCE("LastResourcePool",'') AS "ResourcePool",
COALESCE("LastDatacenter",'') AS "Datacenter",
COALESCE("LastCluster",'') AS "Cluster",
COALESCE("LastFolder",'') AS "Folder",
COALESCE("LastProvisionedDisk",0) AS "ProvisionedDisk",
COALESCE("LastVcpuCount",0) AS "VcpuCount",
COALESCE("LastRamGB",0) AS "RamGB",
COALESCE("IsTemplate",'') AS "IsTemplate",
COALESCE("PoweredOn",'') AS "PoweredOn",
COALESCE("SrmPlaceholder",'') AS "SrmPlaceholder",
COALESCE("VmUuid",'') AS "VmUuid",
COALESCE("SamplesPresent",0) AS "SamplesPresent",
CASE WHEN COALESCE("TotalSamples",0) > 0 THEN 1.0 * COALESCE("SumVcpu",0) / "TotalSamples" ELSE NULL END AS "AvgVcpuCount",
CASE WHEN COALESCE("TotalSamples",0) > 0 THEN 1.0 * COALESCE("SumRam",0) / "TotalSamples" ELSE NULL END AS "AvgRamGB",
CASE WHEN COALESCE("TotalSamples",0) > 0 THEN 1.0 * COALESCE("SumDisk",0) / "TotalSamples" ELSE NULL END AS "AvgProvisionedDisk",
CASE WHEN COALESCE("TotalSamples",0) > 0 THEN 1.0 * COALESCE("SamplesPresent",0) / "TotalSamples" ELSE NULL END AS "AvgIsPresent",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("TinHits",0) / "SamplesPresent" ELSE NULL END AS "PoolTinPct",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("BronzeHits",0) / "SamplesPresent" ELSE NULL END AS "PoolBronzePct",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("SilverHits",0) / "SamplesPresent" ELSE NULL END AS "PoolSilverPct",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("GoldHits",0) / "SamplesPresent" ELSE NULL END AS "PoolGoldPct",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("TinHits",0) / "SamplesPresent" ELSE NULL END AS "Tin",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("BronzeHits",0) / "SamplesPresent" ELSE NULL END AS "Bronze",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("SilverHits",0) / "SamplesPresent" ELSE NULL END AS "Silver",
CASE WHEN COALESCE("SamplesPresent",0) > 0 THEN 100.0 * COALESCE("GoldHits",0) / "SamplesPresent" ELSE NULL END AS "Gold",
"Date" AS "SnapshotTime"
FROM vm_daily_rollup
WHERE "Date" >= %d
AND "Date" < %d
AND %s
`, start.Unix(), end.Unix(), templateExclusionFilter())
}
func (c *CronTask) insertMonthlyAggregates(ctx context.Context, summaryTable string, aggMap map[monthlyAggKey]*monthlyAggVal) error {
dbConn := c.Database.DB()
columns := []string{
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SamplesPresent",
"AvgVcpuCount", "AvgRamGB", "AvgProvisionedDisk", "AvgIsPresent",
"PoolTinPct", "PoolBronzePct", "PoolSilverPct", "PoolGoldPct",
"Tin", "Bronze", "Silver", "Gold",
}
quotedColumns := make([]string, len(columns))
for i, col := range columns {
quotedColumns[i] = fmt.Sprintf(`"%s"`, col)
}
placeholders := make([]string, len(columns))
for i := range columns {
placeholders[i] = "?"
}
stmtText := fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, summaryTable, strings.Join(quotedColumns, ","), strings.Join(placeholders, ","))
stmtText = dbConn.Rebind(stmtText)
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
stmt, err := tx.PreparexContext(ctx, stmtText)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
for _, v := range aggMap {
inventoryVal := sql.NullInt64{}
if v.inventoryId != 0 {
inventoryVal = sql.NullInt64{Int64: v.inventoryId, Valid: true}
}
avgVcpu := sql.NullFloat64{}
avgRam := sql.NullFloat64{}
avgDisk := sql.NullFloat64{}
avgIsPresent := sql.NullFloat64{}
tinPct := sql.NullFloat64{}
bronzePct := sql.NullFloat64{}
silverPct := sql.NullFloat64{}
goldPct := sql.NullFloat64{}
if v.totalSamples > 0 {
avgVcpu = sql.NullFloat64{Float64: v.sumVcpu / v.totalSamples, Valid: true}
avgRam = sql.NullFloat64{Float64: v.sumRam / v.totalSamples, Valid: true}
avgDisk = sql.NullFloat64{Float64: v.sumDisk / v.totalSamples, Valid: true}
avgIsPresent = sql.NullFloat64{Float64: float64(v.samplesPresent) / v.totalSamples, Valid: true}
tinPct = sql.NullFloat64{Float64: 100.0 * v.tinWeighted / v.totalSamples, Valid: true}
bronzePct = sql.NullFloat64{Float64: 100.0 * v.bronzeWeighted / v.totalSamples, Valid: true}
silverPct = sql.NullFloat64{Float64: 100.0 * v.silverWeighted / v.totalSamples, Valid: true}
goldPct = sql.NullFloat64{Float64: 100.0 * v.goldWeighted / v.totalSamples, Valid: true}
}
if _, err := stmt.ExecContext(ctx,
inventoryVal,
v.key.Name, v.key.Vcenter, v.key.VmId, v.eventKey, v.cloudId, v.creation, v.deletion,
v.resourcePool, v.datacenter, v.cluster, v.folder, v.provisioned, v.vcpuCount, v.ramGB,
v.isTemplate, v.poweredOn, v.srmPlaceholder, v.key.VmUuid, v.samplesPresent,
avgVcpu, avgRam, avgDisk, avgIsPresent,
tinPct, bronzePct, silverPct, goldPct,
tinPct, bronzePct, silverPct, goldPct,
); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
+4 -212
View File
@@ -2,220 +2,12 @@ package tasks
import (
"context"
"database/sql"
"log/slog"
"strings"
"time"
"vctp/db/queries"
"vctp/internal/vcenter"
"github.com/vmware/govmomi/vim25/types"
)
// use gocron to check events in the Events table
func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
startedAt := time.Now()
defer func() {
logger.Info("Event processing job finished", "duration", time.Since(startedAt))
}()
var (
numVcpus int32
numRam int32
totalDiskGB float64
srmPlaceholder string
foundVm bool
isTemplate string
poweredOn string
folderPath string
rpName string
vmUuid string
)
dateCmp := time.Now().AddDate(0, 0, -1).Unix()
logger.Debug("Started Events processing", "time", time.Now(), "since", dateCmp)
// Query events table
events, err := c.Database.Queries().ListUnprocessedEvents(ctx,
sql.NullInt64{Int64: dateCmp, Valid: dateCmp > 0})
if err != nil {
logger.Error("Unable to query for unprocessed events", "error", err)
return nil // TODO - what to do with this error?
} else {
logger.Debug("Successfully queried for unprocessed events", "count", len(events))
}
for _, evt := range events {
logger.Debug("Checking event", "event", evt)
// TODO - get a list of unique vcenters, then process each event in batches
// to avoid doing unnecessary login/logout of vcenter
//c.Logger.Debug("connecting to vcenter")
vc := vcenter.New(c.Logger, c.VcCreds)
vc.Login(evt.Source)
//datacenter = evt.DatacenterName.String
vmObject, err := vc.FindVMByIDWithDatacenter(evt.VmId.String, evt.DatacenterId.String)
if err != nil {
c.Logger.Error("Can't locate vm in vCenter", "vmID", evt.VmId.String, "error", err)
continue
} else if vmObject == nil {
c.Logger.Debug("didn't find VM", "vm_id", evt.VmId.String)
// TODO - if VM name ends with -tmp or -phVm then we mark this record as processed and stop trying to find a VM that doesnt exist anymore
if strings.HasSuffix(evt.VmName.String, "-phVm") || strings.HasSuffix(evt.VmName.String, "-tmp") {
c.Logger.Info("VM name indicates temporary VM, marking as processed", "vm_name", evt.VmName.String)
err = c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid)
if err != nil {
c.Logger.Error("Unable to mark this event as processed", "event_id", evt.Eid, "error", err)
} else {
//c.Logger.Debug("Marked event as processed", "event_id", evt.Eid)
}
}
/*
numRam = 0
numVcpus = 0
totalDiskGB = 0
isTemplate = "FALSE"
folderPath = ""
vmUuid = ""
*/
continue
}
if strings.HasPrefix(vmObject.Name, "vCLS-") {
c.Logger.Info("Skipping internal vCLS VM event", "vm_name", vmObject.Name)
if err := c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid); err != nil {
c.Logger.Error("Unable to mark vCLS event as processed", "event_id", evt.Eid, "error", err)
}
continue
}
//c.Logger.Debug("found VM")
srmPlaceholder = "FALSE" // Default assumption
//prettyPrint(vmObject)
// calculate VM properties we want to store
if vmObject.Config != nil {
numRam = vmObject.Config.Hardware.MemoryMB
numVcpus = vmObject.Config.Hardware.NumCPU
vmUuid = vmObject.Config.Uuid
var totalDiskBytes int64
// Calculate the total disk allocated in GB
for _, device := range vmObject.Config.Hardware.Device {
if disk, ok := device.(*types.VirtualDisk); ok {
// Print the filename of the backing device
if _, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
//c.Logger.Debug("Adding disk", "size_bytes", disk.CapacityInBytes, "backing_file", backing.FileName)
} else {
//c.Logger.Debug("Adding disk, unknown backing type", "size_bytes", disk.CapacityInBytes)
}
totalDiskBytes += disk.CapacityInBytes
//totalDiskGB += float64(disk.CapacityInBytes / 1024 / 1024 / 1024) // Convert from bytes to GB
}
}
totalDiskGB = float64(totalDiskBytes / 1024 / 1024 / 1024)
c.Logger.Debug("Converted total disk size", "bytes", totalDiskBytes, "GB", totalDiskGB)
// Determine if the VM is a normal VM or an SRM placeholder
if vmObject.Config.ManagedBy != nil && vmObject.Config.ManagedBy.ExtensionKey == "com.vmware.vcDr" {
if vmObject.Config.ManagedBy.Type == "placeholderVm" {
c.Logger.Debug("VM is a placeholder")
srmPlaceholder = "TRUE"
} else {
c.Logger.Debug("VM is managed by SRM but not a placeholder", "details", vmObject.Config.ManagedBy)
}
}
if vmObject.Config.Template {
isTemplate = "TRUE"
} else {
isTemplate = "FALSE"
}
// Retrieve the full folder path of the VM
folderPath, err = vc.GetVMFolderPath(*vmObject)
if err != nil {
c.Logger.Error("failed to get vm folder path", "error", err)
folderPath = ""
} else {
c.Logger.Debug("Found vm folder path", "folder_path", folderPath)
}
// Retrieve the resource pool of the VM
rpName, _ = vc.GetVmResourcePool(*vmObject)
foundVm = true
} else {
c.Logger.Error("Empty VM config")
}
//c.Logger.Debug("VM has runtime data", "power_state", vmObject.Runtime.PowerState)
if vmObject.Runtime.PowerState == "poweredOff" {
poweredOn = "FALSE"
} else {
poweredOn = "TRUE"
}
err = vc.Logout()
if err != nil {
c.Logger.Error("unable to logout of vcenter", "error", err)
}
if foundVm {
c.Logger.Debug("Adding to Inventory table", "vm_name", evt.VmName.String, "vcpus", numVcpus, "ram", numRam, "dc", evt.DatacenterId.String)
params := queries.CreateInventoryParams{
Name: vmObject.Name,
Vcenter: evt.Source,
CloudId: sql.NullString{String: evt.CloudId, Valid: evt.CloudId != ""},
EventKey: sql.NullString{String: evt.EventKey.String, Valid: evt.EventKey.Valid},
VmId: sql.NullString{String: evt.VmId.String, Valid: evt.VmId.Valid},
Datacenter: sql.NullString{String: evt.DatacenterName.String, Valid: evt.DatacenterName.Valid},
Cluster: sql.NullString{String: evt.ComputeResourceName.String, Valid: evt.ComputeResourceName.Valid},
CreationTime: sql.NullInt64{Int64: evt.EventTime.Int64, Valid: evt.EventTime.Valid},
InitialVcpus: sql.NullInt64{Int64: int64(numVcpus), Valid: numVcpus > 0},
InitialRam: sql.NullInt64{Int64: int64(numRam), Valid: numRam > 0},
ProvisionedDisk: sql.NullFloat64{Float64: totalDiskGB, Valid: totalDiskGB > 0},
Folder: sql.NullString{String: folderPath, Valid: folderPath != ""},
ResourcePool: sql.NullString{String: rpName, Valid: rpName != ""},
VmUuid: sql.NullString{String: vmUuid, Valid: vmUuid != ""},
SrmPlaceholder: srmPlaceholder,
IsTemplate: isTemplate,
PoweredOn: poweredOn,
}
//c.Logger.Debug("database params", "params", params)
// Insert the new inventory record into the database
_, err := c.Database.Queries().CreateInventory(ctx, params)
if err != nil {
c.Logger.Error("unable to perform database insert", "error", err)
} else {
//c.Logger.Debug("created database record", "insert_result", result)
// mark this event as processed
err = c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid)
if err != nil {
c.Logger.Error("Unable to mark this event as processed", "event_id", evt.Eid, "error", err)
} else {
//c.Logger.Debug("Marked event as processed", "event_id", evt.Eid)
}
}
} else {
c.Logger.Debug("Not adding to Inventory due to missing vcenter config property", "vm_name", evt.VmName.String)
}
}
//fmt.Printf("processing at %s", time.Now())
// RunVmCheck is intentionally disabled.
// The legacy event-processing flow has been retired in favor of snapshot-based lifecycle logic.
func (c *CronTask) RunVmCheck(_ context.Context, logger *slog.Logger) error {
logger.Info("legacy VM event-processing task is disabled")
return nil
}
-17
View File
@@ -1,17 +0,0 @@
package tasks
import (
"log/slog"
"vctp/db"
"vctp/internal/settings"
"vctp/internal/vcenter"
)
// CronTask stores runtime information to be used by tasks
type CronTask struct {
Logger *slog.Logger
Database db.Database
Settings *settings.Settings
VcCreds *vcenter.VcenterLogin
FirstHourlySnapshotCheck bool
}
+123
View File
@@ -0,0 +1,123 @@
package tasks
import (
"database/sql"
"log/slog"
"time"
"vctp/db"
"vctp/internal/settings"
"vctp/internal/vcenter"
)
// CronTask stores runtime information to be used by tasks.
type CronTask struct {
Logger *slog.Logger
Database db.Database
Settings *settings.Settings
VcCreds *vcenter.VcenterLogin
FirstHourlySnapshotCheck bool
}
// InventorySnapshotRow represents a single VM snapshot row.
type InventorySnapshotRow struct {
InventoryId sql.NullInt64
Name string
Vcenter string
VmId sql.NullString
EventKey sql.NullString
CloudId sql.NullString
CreationTime sql.NullInt64
DeletionTime sql.NullInt64
ResourcePool sql.NullString
Datacenter sql.NullString
Cluster sql.NullString
Folder sql.NullString
ProvisionedDisk sql.NullFloat64
VcpuCount sql.NullInt64
RamGB sql.NullInt64
IsTemplate string
PoweredOn string
SrmPlaceholder string
VmUuid sql.NullString
SnapshotTime int64
}
// snapshotTotals aliases DB snapshot totals for convenience.
type snapshotTotals = db.SnapshotTotals
type dailyAggKey struct {
Vcenter string
VmId string
VmUuid string
Name string
}
type dailyAggVal struct {
key dailyAggKey
resourcePool string
datacenter string
cluster string
folder string
isTemplate string
poweredOn string
srmPlaceholder string
creation int64
firstSeen int64
lastSeen int64
lastDisk float64
lastVcpu int64
lastRam int64
sumVcpu int64
sumRam int64
sumDisk float64
samples int64
tinHits int64
bronzeHits int64
silverHits int64
goldHits int64
seen map[int64]struct{}
deletion int64
}
type monthlyAggKey struct {
Vcenter string
VmId string
VmUuid string
Name string
}
type monthlyAggVal struct {
key monthlyAggKey
inventoryId int64
eventKey string
cloudId string
resourcePool string
datacenter string
cluster string
folder string
isTemplate string
poweredOn string
srmPlaceholder string
creation int64
deletion int64
lastSnapshot time.Time
provisioned float64
vcpuCount int64
ramGB int64
samplesPresent int64
totalSamples float64
sumVcpu float64
sumRam float64
sumDisk float64
tinWeighted float64
bronzeWeighted float64
silverWeighted float64
goldWeighted float64
}
// CronTracker manages re-entry protection and status recording for cron jobs.
type CronTracker struct {
db db.Database
bindType int
}
+21 -15
View File
@@ -6,6 +6,7 @@ import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"log"
"math/big"
"net"
@@ -14,7 +15,7 @@ import (
"time"
)
func GenerateCerts(tlsCert string, tlsKey string) {
func GenerateCerts(tlsCert string, tlsKey string) error {
// @see https://shaneutt.com/blog/golang-ca-and-signed-cert-go/
// @see https://golang.org/src/crypto/tls/generate_cert.go
validFrom := ""
@@ -24,7 +25,7 @@ func GenerateCerts(tlsCert string, tlsKey string) {
// Get the hostname
hostname, err := os.Hostname()
if err != nil {
panic(err)
return fmt.Errorf("failed to lookup hostname: %w", err)
}
// Check that the directory exists
@@ -33,13 +34,15 @@ func GenerateCerts(tlsCert string, tlsKey string) {
_, err = os.Stat(relativePath)
if os.IsNotExist(err) {
log.Printf("Certificate path does not exist, creating %s before generating certificate\n", relativePath)
os.MkdirAll(relativePath, os.ModePerm)
if mkErr := os.MkdirAll(relativePath, os.ModePerm); mkErr != nil {
return fmt.Errorf("failed to create certificate directory %s: %w", relativePath, mkErr)
}
}
// Generate a private key
priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
if err != nil {
log.Fatalf("Failed to generate private key: %v", err)
return fmt.Errorf("failed to generate private key: %w", err)
}
var notBefore time.Time
@@ -48,7 +51,7 @@ func GenerateCerts(tlsCert string, tlsKey string) {
} else {
notBefore, err = time.Parse("Jan 2 15:04:05 2006", validFrom)
if err != nil {
log.Fatalf("Failed to parse creation date: %v", err)
return fmt.Errorf("failed to parse creation date: %w", err)
}
}
@@ -57,7 +60,7 @@ func GenerateCerts(tlsCert string, tlsKey string) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
log.Fatalf("Failed to generate serial number: %v", err)
return fmt.Errorf("failed to generate serial number: %w", err)
}
template := x509.Certificate{
@@ -105,35 +108,38 @@ func GenerateCerts(tlsCert string, tlsKey string) {
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
log.Fatalf("Failed to create certificate: %v", err)
return fmt.Errorf("failed to create certificate: %w", err)
}
certOut, err := os.Create(tlsCert)
if err != nil {
log.Fatalf("Failed to open %s for writing: %v", tlsCert, err)
return fmt.Errorf("failed to open %s for writing: %w", tlsCert, err)
}
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
log.Fatalf("Failed to write data to %s: %v", tlsCert, err)
_ = certOut.Close()
return fmt.Errorf("failed to write certificate data to %s: %w", tlsCert, err)
}
if err := certOut.Close(); err != nil {
log.Fatalf("Error closing %s: %v", tlsCert, err)
return fmt.Errorf("failed to close certificate file %s: %w", tlsCert, err)
}
log.Printf("wrote %s\n", tlsCert)
keyOut, err := os.OpenFile(tlsKey, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Fatalf("Failed to open %s for writing: %v", tlsKey, err)
return
return fmt.Errorf("failed to open %s for writing: %w", tlsKey, err)
}
privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
if err != nil {
log.Fatalf("Unable to marshal private key: %v", err)
_ = keyOut.Close()
return fmt.Errorf("unable to marshal private key: %w", err)
}
if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil {
log.Fatalf("Failed to write data to %s: %v", tlsKey, err)
_ = keyOut.Close()
return fmt.Errorf("failed to write private key data to %s: %w", tlsKey, err)
}
if err := keyOut.Close(); err != nil {
log.Fatalf("Error closing %s: %v", tlsKey, err)
return fmt.Errorf("failed to close private key file %s: %w", tlsKey, err)
}
log.Printf("wrote %s\n", tlsKey)
return nil
}
File diff suppressed because it is too large Load Diff
+380 -39
View File
@@ -1,6 +1,17 @@
// Package main vCTP API entrypoint.
//
// @title vCTP API
// @version 1.0
// @description vCTP API endpoints for inventory snapshots, reporting, and administration.
// @BasePath /
// @schemes http https
// @securityDefinitions.apikey BearerAuth
// @in header
// @name Authorization
package main
import (
"bytes"
"context"
"flag"
"fmt"
@@ -9,6 +20,7 @@ import (
"strings"
"time"
"vctp/db"
"vctp/internal/report"
"vctp/internal/secrets"
"vctp/internal/settings"
"vctp/internal/tasks"
@@ -19,24 +31,32 @@ import (
"vctp/server/router"
"crypto/sha256"
"github.com/go-co-op/gocron/v2"
"log/slog"
"github.com/go-co-op/gocron/v2"
)
var (
bindDisableTls bool
sha1ver string // sha1 revision used to build the program
buildTime string // when the executable was built
cronFrequency time.Duration
cronInvFrequency time.Duration
cronSnapshotFrequency time.Duration
cronAggregateFrequency time.Duration
)
const fallbackEncryptionKey = "5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa"
const (
encryptedVcenterPasswordPrefix = "enc:v1:"
legacyFallbackEncryptionKey = "5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa"
)
func main() {
settingsPath := flag.String("settings", "/etc/dtms/vctp.yml", "Path to settings YAML")
runInventory := flag.Bool("run-inventory", false, "Run a single inventory snapshot across all configured vCenters and exit")
dbCleanup := flag.Bool("db-cleanup", false, "Run a one-time cleanup to drop low-value hourly snapshot indexes and exit")
backfillVcenterCache := flag.Bool("backfill-vcenter-cache", false, "Run a one-time backfill for vcenter latest+aggregate cache tables and exit")
importSQLite := flag.String("import-sqlite", "", "Import a SQLite database file/DSN into the configured Postgres database and exit")
benchmarkAggregations := flag.Bool("benchmark-aggregations", false, "Run a one-time canonical aggregation benchmark (Go vs SQL) and exit")
benchmarkRuns := flag.Int("benchmark-runs", 3, "Number of benchmark iterations per mode when -benchmark-aggregations is set")
flag.Parse()
bootstrapLogger := log.New(log.LevelInfo, log.OutputText)
@@ -56,22 +76,31 @@ func main() {
log.ToOutput(strings.ToLower(strings.TrimSpace(s.Values.Settings.LogOutput))),
)
s.Logger = logger
db.SetVmHourlyStatsPostgresPartitioningEnabled(boolWithDefault(s.Values.Settings.PostgresVmHourlyPartitioning, false))
logger.Info("vCTP starting", "build_time", buildTime, "sha1_version", sha1ver, "go_version", runtime.Version(), "settings_file", *settingsPath)
warnDeprecatedPollingSettings(logger, s.Values)
// Configure database
dbDriver := strings.TrimSpace(s.Values.Settings.DatabaseDriver)
if dbDriver == "" {
dbDriver = "sqlite"
}
normalizedDriver := strings.ToLower(strings.TrimSpace(dbDriver))
if normalizedDriver == "" || normalizedDriver == "sqlite3" {
normalizedDriver = "sqlite"
}
dbURL := strings.TrimSpace(s.Values.Settings.DatabaseURL)
normalizedDriver, inferredFromDSN, err := db.ResolveDriver(s.Values.Settings.DatabaseDriver, dbURL)
if err != nil {
logger.Error("Invalid database configuration", "error", err)
os.Exit(1)
}
if inferredFromDSN {
logger.Warn("database_driver is unset; inferred postgres from database_url")
}
if dbURL == "" && normalizedDriver == "sqlite" {
dbURL = utils.GetFilePath("db.sqlite3")
}
logger.Info("Effective database driver resolved", "driver", normalizedDriver)
database, err := db.New(logger, db.Config{Driver: normalizedDriver, DSN: dbURL})
database, err := db.New(logger, db.Config{
Driver: normalizedDriver,
DSN: dbURL,
EnableExperimentalPostgres: s.Values.Settings.EnableExperimentalPostgres,
})
if err != nil {
logger.Error("Failed to create database", "error", err)
os.Exit(1)
@@ -83,6 +112,140 @@ func main() {
logger.Error("failed to migrate database", "error", err)
os.Exit(1)
}
if strings.TrimSpace(*importSQLite) != "" {
if normalizedDriver != "postgres" {
logger.Error("sqlite import requires settings.database_driver=postgres")
os.Exit(1)
}
logger.Info("starting one-time sqlite import into postgres", "sqlite_source", strings.TrimSpace(*importSQLite))
stats, err := db.ImportSQLiteIntoPostgres(ctx, logger, database.DB(), strings.TrimSpace(*importSQLite))
if err != nil {
logger.Error("failed to import sqlite database into postgres", "error", err)
os.Exit(1)
}
logger.Info("completed sqlite import into postgres",
"sqlite_source", stats.SourceDSN,
"tables_imported", stats.TablesImported,
"tables_skipped", stats.TablesSkipped,
"rows_imported", stats.RowsImported,
)
return
}
if *dbCleanup {
dropped, err := db.CleanupHourlySnapshotIndexes(ctx, database.DB())
if err != nil {
logger.Error("failed to cleanup hourly snapshot indexes", "error", err)
os.Exit(1)
}
logger.Info("completed hourly snapshot index cleanup", "indexes_dropped", dropped)
return
}
if *backfillVcenterCache {
logger.Info("starting one-time vcenter cache backfill")
if err := report.EnsureSnapshotRegistry(ctx, database); err != nil {
logger.Error("failed to ensure snapshot registry", "error", err)
os.Exit(1)
}
hourlyRecords, err := report.ListSnapshots(ctx, database, "hourly")
if err != nil {
logger.Error("failed to list hourly snapshots from registry", "error", err)
os.Exit(1)
}
if len(hourlyRecords) == 0 {
logger.Warn("snapshot registry has no hourly entries; attempting registry migration before cache backfill")
stats, err := report.MigrateSnapshotRegistry(ctx, database)
if err != nil {
logger.Error("failed to migrate snapshot registry before cache backfill", "error", err)
os.Exit(1)
}
logger.Info("snapshot registry migration complete",
"hourly_renamed", stats.HourlyRenamed,
"hourly_registered", stats.HourlyRegistered,
"daily_registered", stats.DailyRegistered,
"monthly_registered", stats.MonthlyRegistered,
)
}
if err := db.SyncVcenterTotalsFromSnapshots(ctx, database.DB()); err != nil {
logger.Error("failed to backfill hourly vcenter totals cache", "error", err)
os.Exit(1)
}
latestSynced, err := db.SyncVcenterLatestTotalsFromHistory(ctx, database.DB())
if err != nil {
logger.Error("failed to backfill latest vcenter totals cache", "error", err)
os.Exit(1)
}
dailySnapshots, dailyRows, dailyErr := db.SyncVcenterAggregateTotalsFromRegistry(ctx, database.DB(), "daily")
if dailyErr != nil {
logger.Warn("daily vcenter aggregate cache backfill completed with warnings", "error", dailyErr)
}
monthlySnapshots, monthlyRows, monthlyErr := db.SyncVcenterAggregateTotalsFromRegistry(ctx, database.DB(), "monthly")
if monthlyErr != nil {
logger.Warn("monthly vcenter aggregate cache backfill completed with warnings", "error", monthlyErr)
}
logger.Info("completed one-time vcenter cache backfill",
"latest_rows_synced", latestSynced,
"daily_snapshots_refreshed", dailySnapshots,
"daily_rows_upserted", dailyRows,
"monthly_snapshots_refreshed", monthlySnapshots,
"monthly_rows_upserted", monthlyRows,
)
return
}
if *benchmarkAggregations {
logger.Info("Running one-shot canonical aggregation benchmark",
"runs_per_mode", *benchmarkRuns,
"driver", normalizedDriver,
"scheduled_aggregation_engine", strings.ToLower(strings.TrimSpace(s.Values.Settings.ScheduledAggregationEngine)),
)
ct := &tasks.CronTask{
Logger: logger,
Database: database,
Settings: s,
FirstHourlySnapshotCheck: true,
}
benchReport, err := ct.RunCanonicalAggregationBenchmark(ctx, *benchmarkRuns)
if err != nil {
logger.Error("canonical aggregation benchmark failed", "error", err)
os.Exit(1)
}
if !benchReport.DailyWindowStart.IsZero() {
logger.Info("daily canonical benchmark",
"window_start", benchReport.DailyWindowStart.Format(time.RFC3339),
"window_end", benchReport.DailyWindowEnd.Format(time.RFC3339),
"go_min", benchReport.DailyGo.Min,
"go_median", benchReport.DailyGo.Median,
"go_avg", benchReport.DailyGo.Avg,
"go_max", benchReport.DailyGo.Max,
"go_rows", benchReport.DailyGoRowsWritten,
"sql_min", benchReport.DailySQL.Min,
"sql_median", benchReport.DailySQL.Median,
"sql_avg", benchReport.DailySQL.Avg,
"sql_max", benchReport.DailySQL.Max,
"sql_rows", benchReport.DailySQLRowsWritten,
)
}
if !benchReport.MonthlyWindowStart.IsZero() {
logger.Info("monthly canonical benchmark",
"window_start", benchReport.MonthlyWindowStart.Format(time.RFC3339),
"window_end", benchReport.MonthlyWindowEnd.Format(time.RFC3339),
"go_min", benchReport.MonthlyGo.Min,
"go_median", benchReport.MonthlyGo.Median,
"go_avg", benchReport.MonthlyGo.Avg,
"go_max", benchReport.MonthlyGo.Max,
"go_rows", benchReport.MonthlyGoRowsWritten,
"sql_min", benchReport.MonthlySQL.Min,
"sql_median", benchReport.MonthlySQL.Median,
"sql_avg", benchReport.MonthlySQL.Avg,
"sql_max", benchReport.MonthlySQL.Max,
"sql_rows", benchReport.MonthlySQLRowsWritten,
)
}
logger.Info("Canonical aggregation benchmark complete; exiting")
return
}
// Determine bind IP
bindIP := strings.TrimSpace(s.Values.Settings.BindIP)
@@ -118,27 +281,33 @@ func main() {
// Generate certificate if required
if !(utils.FileExists(tlsCertFilename) && utils.FileExists(tlsKeyFilename)) {
logger.Warn("Specified TLS certificate or private key do not exist", "certificate", tlsCertFilename, "tls-key", tlsKeyFilename)
utils.GenerateCerts(tlsCertFilename, tlsKeyFilename)
if err := utils.GenerateCerts(tlsCertFilename, tlsKeyFilename); err != nil {
logger.Error("failed to generate TLS cert/key", "error", err)
os.Exit(1)
}
}
// Load vcenter credentials from serttings, decrypt if required
encKey := deriveEncryptionKey(logger)
// Load vcenter credentials from settings, decrypt if required.
encKey := deriveEncryptionKey(logger, *settingsPath, s.Values.Settings.EncryptionKey)
a := secrets.New(logger, encKey)
legacyDecryptKeys := deriveLegacyDecryptionKeys(*settingsPath, encKey)
vcEp := strings.TrimSpace(s.Values.Settings.VcenterPassword)
if len(vcEp) == 0 {
logger.Error("No vcenter password configured")
os.Exit(1)
}
vcPass, err := a.Decrypt(vcEp)
vcPass, rewrittenCredential, err := resolveVcenterPassword(logger, a, legacyDecryptKeys, vcEp)
if err != nil {
logger.Error("failed to decrypt vcenter credentials. Assuming un-encrypted", "error", err)
vcPass = []byte(vcEp)
if cipherText, encErr := a.Encrypt([]byte(vcEp)); encErr != nil {
logger.Warn("failed to encrypt vcenter credentials", "error", encErr)
logger.Error("failed to resolve vcenter credentials", "error", err)
os.Exit(1)
}
if rewrittenCredential != "" && rewrittenCredential != vcEp {
s.Values.Settings.VcenterPassword = rewrittenCredential
if err := s.WriteYMLSettings(); err != nil {
logger.Warn("failed to update settings with encrypted vcenter password", "error", err)
} else {
s.Values.Settings.VcenterPassword = cipherText
if err := s.WriteYMLSettings(); err != nil {
logger.Warn("failed to update settings with encrypted vcenter password", "error", err)
if strings.HasPrefix(vcEp, encryptedVcenterPasswordPrefix) {
logger.Info("rewrote vcenter password with refreshed encryption format")
} else {
logger.Info("encrypted vcenter password stored in settings file")
}
@@ -155,6 +324,13 @@ func main() {
os.Exit(1)
}
// Set a recognizable User-Agent for vCenter sessions.
ua := "vCTP"
if sha1ver != "" {
ua = fmt.Sprintf("vCTP/%s", sha1ver)
}
vcenter.SetUserAgent(ua)
// Prepare the task scheduler
c, err := gocron.NewScheduler()
if err != nil {
@@ -171,20 +347,28 @@ func main() {
FirstHourlySnapshotCheck: true,
}
// One-shot mode: run a single inventory snapshot across all configured vCenters and exit.
if *runInventory {
logger.Info("Running one-shot inventory snapshot across all vCenters")
if err := ct.RunVcenterSnapshotHourly(ctx, logger, true); err != nil {
logger.Error("One-shot inventory snapshot failed", "error", err)
os.Exit(1)
}
logger.Info("One-shot inventory snapshot complete; exiting")
return
}
cronSnapshotFrequency = durationFromSeconds(s.Values.Settings.VcenterInventorySnapshotSeconds, 3600)
logger.Debug("Setting VM inventory snapshot cronjob frequency to", "frequency", cronSnapshotFrequency)
cronAggregateFrequency = durationFromSeconds(s.Values.Settings.VcenterInventoryAggregateSeconds, 86400)
logger.Debug("Setting VM inventory daily aggregation cronjob frequency to", "frequency", cronAggregateFrequency)
startsAt3 := time.Now().Add(cronSnapshotFrequency)
if cronSnapshotFrequency == time.Hour {
startsAt3 = time.Now().Truncate(time.Hour).Add(time.Hour)
}
startsAt3 := alignStart(time.Now(), cronSnapshotFrequency)
job3, err := c.NewJob(
gocron.DurationJob(cronSnapshotFrequency),
gocron.NewTask(func() {
ct.RunVcenterSnapshotHourly(ctx, logger)
ct.RunVcenterSnapshotHourly(ctx, logger, false)
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
gocron.WithStartAt(gocron.WithStartDateTime(startsAt3)),
)
@@ -212,7 +396,10 @@ func main() {
}
logger.Debug("Created vcenter inventory aggregation cron job", "job", job4.ID(), "starting_at", startsAt4)
monthlyCron := "0 0 1 * *"
monthlyCron := strings.TrimSpace(s.Values.Settings.MonthlyAggregationCron)
if monthlyCron == "" {
monthlyCron = "10 3 1 * *"
}
logger.Debug("Setting monthly aggregation cron schedule", "cron", monthlyCron)
job5, err := c.NewJob(
gocron.CronJob(monthlyCron, false),
@@ -234,7 +421,7 @@ func main() {
gocron.CronJob(snapshotCleanupCron, false),
gocron.NewTask(func() {
ct.RunSnapshotCleanup(ctx, logger)
if strings.EqualFold(s.Values.Settings.DatabaseDriver, "sqlite") {
if normalizedDriver == "sqlite" {
logger.Info("Performing sqlite VACUUM after snapshot cleanup")
if _, err := ct.Database.DB().ExecContext(ctx, "VACUUM"); err != nil {
logger.Warn("VACUUM failed after snapshot cleanup", "error", err)
@@ -284,11 +471,42 @@ func main() {
)
//logger.Debug("Server configured", "object", svr)
svr.StartAndWait()
if err := svr.StartAndWait(); err != nil {
logger.Error("server terminated with error", "error", err)
os.Exit(1)
}
os.Exit(0)
}
// alignStart snaps the first run to a sensible boundary (hour or 15-minute block) when possible.
func alignStart(now time.Time, freq time.Duration) time.Time {
if freq == time.Hour {
return now.Truncate(time.Hour).Add(time.Hour)
}
quarter := 15 * time.Minute
if freq%quarter == 0 {
return now.Truncate(quarter).Add(quarter)
}
return now.Add(freq)
}
func warnDeprecatedPollingSettings(logger *slog.Logger, cfg *settings.SettingsYML) {
if cfg == nil {
return
}
if cfg.Settings.VcenterEventPollingSeconds > 0 {
logger.Warn("vcenter_event_polling_seconds is deprecated and ignored; snapshot lifecycle processing is used instead",
"value", cfg.Settings.VcenterEventPollingSeconds,
)
}
if cfg.Settings.VcenterInventoryPollingSeconds > 0 {
logger.Warn("vcenter_inventory_polling_seconds is deprecated and ignored; hourly snapshot jobs are used instead",
"value", cfg.Settings.VcenterInventoryPollingSeconds,
)
}
}
func durationFromSeconds(value int, fallback int) time.Duration {
if value <= 0 {
return time.Second * time.Duration(fallback)
@@ -296,25 +514,148 @@ func durationFromSeconds(value int, fallback int) time.Duration {
return time.Second * time.Duration(value)
}
func deriveEncryptionKey(logger *slog.Logger) []byte {
func boolWithDefault(value *bool, fallback bool) bool {
if value == nil {
return fallback
}
return *value
}
func resolveVcenterPassword(logger *slog.Logger, cipher *secrets.Secrets, legacyDecryptKeys [][]byte, raw string) ([]byte, string, error) {
if strings.TrimSpace(raw) == "" {
return nil, "", fmt.Errorf("vcenter password is empty")
}
// New format: explicit prefix so we can distinguish ciphertext from plaintext safely.
if after, ok := strings.CutPrefix(raw, encryptedVcenterPasswordPrefix); ok {
enc := after
pass, usedLegacyKey, err := decryptVcenterPasswordWithFallback(logger, cipher, legacyDecryptKeys, enc)
if err != nil {
return nil, "", fmt.Errorf("prefixed password decrypt failed: %w", err)
}
if usedLegacyKey {
rewrite, rewriteErr := encryptWithPrefix(cipher, pass)
if rewriteErr != nil {
logger.Warn("failed to refresh prefixed vcenter password after fallback decrypt", "error", rewriteErr)
return pass, "", nil
}
logger.Info("rewrote prefixed vcenter password using active encryption key")
return pass, rewrite, nil
}
return pass, "", nil
}
// Backward compatibility: existing deployments may have unprefixed ciphertext.
if pass, _, err := decryptVcenterPasswordWithFallback(logger, cipher, legacyDecryptKeys, raw); err == nil {
rewrite, rewriteErr := encryptWithPrefix(cipher, pass)
if rewriteErr != nil {
logger.Warn("failed to re-encrypt legacy vcenter password with prefix", "error", rewriteErr)
return pass, "", nil
}
return pass, rewrite, nil
} else {
// If decrypt fails and the input is non-trivial, treat it as plaintext and auto-encrypt.
if len(raw) <= 2 {
return nil, "", fmt.Errorf("vcenter password too short to auto-encrypt")
}
logger.Warn("unable to decrypt unprefixed vcenter password; treating value as plaintext", "error", err)
rewrite, rewriteErr := encryptWithPrefix(cipher, []byte(raw))
if rewriteErr != nil {
return nil, "", fmt.Errorf("failed to encrypt plaintext vcenter password: %w", rewriteErr)
}
return []byte(raw), rewrite, nil
}
}
func decryptVcenterPasswordWithFallback(logger *slog.Logger, cipher *secrets.Secrets, legacyDecryptKeys [][]byte, encrypted string) ([]byte, bool, error) {
pass, err := cipher.Decrypt(encrypted)
if err == nil {
return pass, false, nil
}
primaryErr := err
for _, key := range legacyDecryptKeys {
candidate := secrets.New(logger, key)
pass, decErr := candidate.Decrypt(encrypted)
if decErr == nil {
return pass, true, nil
}
}
return nil, false, primaryErr
}
func encryptWithPrefix(cipher *secrets.Secrets, plain []byte) (string, error) {
enc, encErr := cipher.Encrypt(plain)
if encErr != nil {
return "", encErr
}
return encryptedVcenterPasswordPrefix + enc, nil
}
func deriveLegacyDecryptionKeys(settingsPath string, activeKey []byte) [][]byte {
legacyKeys := make([][]byte, 0, 2)
addCandidate := func(candidate []byte) {
if len(candidate) == 0 || bytes.Equal(candidate, activeKey) {
return
}
for _, existing := range legacyKeys {
if bytes.Equal(existing, candidate) {
return
}
}
keyCopy := make([]byte, len(candidate))
copy(keyCopy, candidate)
legacyKeys = append(legacyKeys, keyCopy)
}
platformKey, _ := deriveHostKeyCandidate(settingsPath)
addCandidate(platformKey)
addCandidate([]byte(legacyFallbackEncryptionKey))
return legacyKeys
}
func deriveEncryptionKey(logger *slog.Logger, settingsPath string, configuredKey string) []byte {
if provided := strings.TrimSpace(configuredKey); provided != "" {
sum := sha256.Sum256([]byte(provided))
logger.Debug("derived encryption key from settings", "setting", "settings.encryption_key")
return sum[:]
}
key, source := deriveHostKeyCandidate(settingsPath)
switch source {
case "bios-uuid":
logger.Debug("derived encryption key from BIOS UUID")
case "machine-id":
logger.Debug("derived encryption key from machine-id")
default:
logger.Warn("using host-derived encryption key fallback; set settings.encryption_key for an explicit key")
}
return key
}
func deriveHostKeyCandidate(settingsPath string) ([]byte, string) {
if runtime.GOOS == "linux" {
if data, err := os.ReadFile("/sys/class/dmi/id/product_uuid"); err == nil {
src := strings.TrimSpace(string(data))
if src != "" {
sum := sha256.Sum256([]byte(src))
logger.Debug("derived encryption key from BIOS UUID")
return sum[:]
return sum[:], "bios-uuid"
}
}
if data, err := os.ReadFile("/etc/machine-id"); err == nil {
src := strings.TrimSpace(string(data))
if src != "" {
sum := sha256.Sum256([]byte(src))
logger.Debug("derived encryption key from machine-id")
return sum[:]
return sum[:], "machine-id"
}
}
}
logger.Warn("using fallback encryption key; hardware UUID not available")
return []byte(fallbackEncryptionKey)
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown-host"
}
src := strings.Join([]string{"vctp", runtime.GOOS, strings.TrimSpace(hostname), strings.TrimSpace(settingsPath)}, "|")
sum := sha256.Sum256([]byte(src))
return sum[:], "host-derived"
}
+112
View File
@@ -0,0 +1,112 @@
package main
import (
"io"
"log/slog"
"strings"
"testing"
"vctp/internal/secrets"
)
func testLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
func mustEncrypt(t *testing.T, s *secrets.Secrets, plain string) string {
t.Helper()
enc, err := s.Encrypt([]byte(plain))
if err != nil {
t.Fatalf("encrypt failed: %v", err)
}
return enc
}
func TestResolveVcenterPasswordPlaintextRewrite(t *testing.T) {
logger := testLogger()
key := []byte("0123456789abcdef0123456789abcdef")
cipher := secrets.New(logger, key)
pass, rewritten, err := resolveVcenterPassword(logger, cipher, nil, "my-password")
if err != nil {
t.Fatalf("resolve failed: %v", err)
}
if string(pass) != "my-password" {
t.Fatalf("unexpected plaintext returned: %q", string(pass))
}
if !strings.HasPrefix(rewritten, encryptedVcenterPasswordPrefix) {
t.Fatalf("expected rewritten prefixed credential, got: %q", rewritten)
}
}
func TestResolveVcenterPasswordUnprefixedLegacyCiphertextRewrite(t *testing.T) {
logger := testLogger()
activeKey := []byte("0123456789abcdef0123456789abcdef")
legacyKey := []byte("abcdef0123456789abcdef0123456789")
activeCipher := secrets.New(logger, activeKey)
legacyCipher := secrets.New(logger, legacyKey)
legacyCiphertext := mustEncrypt(t, legacyCipher, "legacy-secret")
pass, rewritten, err := resolveVcenterPassword(logger, activeCipher, [][]byte{legacyKey}, legacyCiphertext)
if err != nil {
t.Fatalf("resolve failed: %v", err)
}
if string(pass) != "legacy-secret" {
t.Fatalf("unexpected plaintext returned: %q", string(pass))
}
if !strings.HasPrefix(rewritten, encryptedVcenterPasswordPrefix) {
t.Fatalf("expected rewritten prefixed credential, got: %q", rewritten)
}
decoded, err := activeCipher.Decrypt(strings.TrimPrefix(rewritten, encryptedVcenterPasswordPrefix))
if err != nil {
t.Fatalf("rewritten ciphertext did not decrypt with active key: %v", err)
}
if string(decoded) != "legacy-secret" {
t.Fatalf("unexpected rewritten decrypt value: %q", string(decoded))
}
}
func TestResolveVcenterPasswordPrefixedLegacyCiphertextRewrite(t *testing.T) {
logger := testLogger()
activeKey := []byte("0123456789abcdef0123456789abcdef")
legacyKey := []byte("abcdef0123456789abcdef0123456789")
activeCipher := secrets.New(logger, activeKey)
legacyCipher := secrets.New(logger, legacyKey)
legacyCiphertext := mustEncrypt(t, legacyCipher, "legacy-prefixed-secret")
raw := encryptedVcenterPasswordPrefix + legacyCiphertext
pass, rewritten, err := resolveVcenterPassword(logger, activeCipher, [][]byte{legacyKey}, raw)
if err != nil {
t.Fatalf("resolve failed: %v", err)
}
if string(pass) != "legacy-prefixed-secret" {
t.Fatalf("unexpected plaintext returned: %q", string(pass))
}
if !strings.HasPrefix(rewritten, encryptedVcenterPasswordPrefix) {
t.Fatalf("expected rewritten prefixed credential, got: %q", rewritten)
}
decoded, err := activeCipher.Decrypt(strings.TrimPrefix(rewritten, encryptedVcenterPasswordPrefix))
if err != nil {
t.Fatalf("rewritten ciphertext did not decrypt with active key: %v", err)
}
if string(decoded) != "legacy-prefixed-secret" {
t.Fatalf("unexpected rewritten decrypt value: %q", string(decoded))
}
}
func TestResolveVcenterPasswordShortPlaintextRejected(t *testing.T) {
logger := testLogger()
key := []byte("0123456789abcdef0123456789abcdef")
cipher := secrets.New(logger, key)
_, _, err := resolveVcenterPassword(logger, cipher, nil, "ab")
if err == nil {
t.Fatal("expected short plaintext error, got nil")
}
if !strings.Contains(err.Error(), "too short") {
t.Fatalf("unexpected error: %v", err)
}
}
+100
View File
@@ -0,0 +1,100 @@
# Phase 0 Baseline and Regression Snapshot
Date captured: 2026-04-20 (Australia/Sydney)
## Baseline metrics (local `db.sqlite3` + `reports/`)
| Area | Metric | Baseline |
| --- | --- | --- |
| Hourly capture | `snapshot_registry` hourly entries | `930` |
| Hourly capture | Hourly compatibility tables (`inventory_hourly_%`) | `930` |
| Hourly capture | Canonical cache rows (`vm_hourly_stats`) | `489865` |
| Hourly capture | Latest hourly snapshot row count (`snapshot_count`) | `52` |
| Hourly capture | Latest hourly snapshot table | `inventory_hourly_1776635926` |
| Daily aggregation | `snapshot_registry` daily entries | `39` |
| Daily aggregation | Daily summary tables (`inventory_daily_summary_%`) | `40` |
| Daily aggregation | Canonical daily rollup rows (`vm_daily_rollup`) | `1779` |
| Daily aggregation | Latest daily summary table | `inventory_daily_summary_20260419` |
| Daily aggregation | Latest daily snapshot row count (`snapshot_count`) | `52` |
| Monthly aggregation | `snapshot_registry` monthly entries | `1` |
| Monthly aggregation | Latest monthly summary table | `inventory_monthly_summary_202601` |
| Monthly aggregation | Latest monthly snapshot row count (`snapshot_count`) | `62` |
| Report generation | Files present in `reports/` | `10339` |
| Report generation | Most recent files | `inventory_hourly_1776635926.xlsx`, `inventory_daily_summary_20260419.xlsx`, `inventory_hourly_1776635626.xlsx` |
Notes:
- `snapshot_runs` rows: `10254`, success distribution: `TRUE=10254`, attempts min/max/avg: `1/2/1.0001`.
- Runtime histograms/counters for long-running jobs are emitted on `/metrics` and are not persisted in SQLite.
- Hourly per-vCenter duration: `vctp_vcenter_snapshot_duration_seconds`
- Daily duration: `vctp_daily_aggregation_duration_seconds`
- Monthly duration: `vctp_monthly_aggregation_duration_seconds`
- Reports available gauge: `vctp_reports_available`
## API/endpoint contract regression snapshot
Source of truth: `server/router/router.go`.
Unauthenticated/public routes:
- `/`
- `/vm/trace`
- `/vcenters`
- `/vcenters/totals`
- `/vcenters/totals/daily`
- `/vcenters/totals/hourly`
- `/snapshots/hourly`
- `/snapshots/daily`
- `/snapshots/monthly`
- `/metrics`
- `/api/auth/login`
- `/assets/*`, `/favicon*`, `/reports/*`, `/swagger*`
Viewer routes (Bearer auth, viewer/admin role):
- `/api/report/inventory`
- `/api/report/updates`
- `/api/report/snapshot`
- `/api/diagnostics/daily-creation`
Admin routes (Bearer auth, admin role):
- `/api/event/vm/create`
- `/api/event/vm/modify`
- `/api/event/vm/move`
- `/api/event/vm/delete`
- `/api/import/vm`
- `/api/inventory/vm/delete`
- `/api/inventory/vm/update`
- `/api/cleanup/updates`
- `/api/snapshots/aggregate`
- `/api/snapshots/hourly/force`
- `/api/snapshots/migrate`
- `/api/snapshots/repair`
- `/api/snapshots/repair/all`
- `/api/snapshots/regenerate-hourly-reports`
- `/api/vcenters/cache/rebuild`
- `/api/encrypt`
- `/debug/pprof/*` (only when enabled)
`/api/auth/me` route:
- Protected by auth middleware (`withAuth`) but no explicit role gate.
## Report filename behavior regression snapshot
Source of truth: `server/handler/reportDownload.go`, `server/handler/snapshots.go`, `internal/report/snapshots.go`.
HTTP download endpoints:
- `GET /api/report/inventory` -> `Content-Disposition: attachment; filename="inventory_report.xlsx"`
- `GET /api/report/updates` -> `Content-Disposition: attachment; filename="updates_report.xlsx"`
- `GET /api/report/snapshot?table=<tableName>` -> `Content-Disposition: attachment; filename="<tableName>.xlsx"`
On-disk generated report filename:
- `SaveTableReport(...)` writes `<reports_dir>/<tableName>.xlsx`
- Snapshot list pages link to `/reports/<tableName>.xlsx`
## Migration guardrails confirmation
- No auth-model changes: route auth wrappers remain unchanged (`withAuth`, `withAuthRole` usage in router).
- SQLite support retained:
- settings default driver remains sqlite (`src/vctp.yml`, `README.md`).
- hourly canonical write path still has SQLite transactional upsert path (`insertHourlyCache`, `insertHourlyBatch`).
- Compatibility mode enabled by default:
- `settings.snapshot_table_compat_mode` default is `true` in settings defaults.
- runtime check falls back to enabled when unset (`snapshotTableCompatModeEnabled()`).
+391
View File
@@ -0,0 +1,391 @@
# Inventory Capture and Aggregation Optimization Plan
## Summary
Optimize for end-to-end runtime with a Postgres-ready design. Keep the current HTTP and report behavior intact, but shift the scheduled data pipeline so it uses canonical append-only/cache tables instead of repeatedly scanning `inventory_hourly_*` tables and regenerating reports inline.
This plan is intended to be implementation-ready for a `codex-5.3` execution pass.
Execution-path decision:
- For the current architecture and migration phases, scheduled daily and monthly aggregation default to the Go path.
- This is a readability-first and current-performance decision, not a claim that Go is inherently faster than a well-designed SQL implementation.
- SQL path is retained for compatibility, backfill, and fallback.
- SQL remains a future optimization candidate on canonical Postgres tables.
- SQL can be promoted to default only after benchmark evidence on canonical Postgres tables shows a clear runtime advantage.
The target architecture is:
1. `vm_hourly_stats` is the canonical hourly fact store.
2. `vm_daily_rollup` is the canonical monthly input.
3. Per-snapshot tables and XLSX generation remain as compatibility and output concerns, not the primary execution path.
## Current State
- Hourly capture already writes both per-snapshot tables and `vm_hourly_stats`.
- Daily aggregation has mixed execution paths:
- SQL union path over `inventory_hourly_*`
- Go path over `vm_hourly_stats` or parallel table scans
- Monthly aggregation has mixed execution paths:
- SQL path over daily or hourly snapshot tables
- Go path over `vm_daily_rollup` or hourly cache
- Lifecycle reconciliation updates both canonical cache tables and prior hourly snapshot tables during the hot path.
- Report generation is still coupled to scheduled capture and aggregation jobs.
- The current UI is rendered through Templ pages and shared `web2`/`web3` CSS classes, but it does not yet match the visual system described in `design.md`.
- Current shipped styling still uses a different blue accent, tighter radii, default system typography, and inconsistent component hierarchy compared with the target design language.
## Implementation Goals
- Reduce hourly capture wall-clock time.
- Reduce daily and monthly aggregation runtime.
- Eliminate repeated historical table scans from the normal scheduled path.
- Keep user-visible HTTP APIs, reports, and auth behavior unchanged.
- Improve UI clarity and consistency so the dashboard, snapshot views, and trace views reflect the design direction in `design.md`.
- Make authentication and role requirements easier to understand from the UI without changing the auth model.
- Preserve compatibility with SQLite for development and small installs.
- Make the runtime architecture cleanly scalable for PostgreSQL production use.
## Implementation Changes
### 1. Hourly Capture Pipeline
- Keep `GetAllVMsWithProps` as the primary vCenter inventory fetch path.
- Preserve single-VM property retrieval only as a fallback path when bulk retrieval is incomplete.
- Replace row-by-row database writes in hourly capture with batched writes.
- For PostgreSQL:
- prefer multi-row insert/upsert or `COPY` into `vm_hourly_stats`
- keep conflict handling on the canonical key
- For SQLite:
- keep transactional batched insert/upsert
- do not attempt PostgreSQL-only ingestion patterns
- During capture, write data to these canonical destinations first:
- `vm_hourly_stats`
- `vm_lifecycle_cache`
- `vcenter_totals`
- `vcenter_latest_totals`
- `vcenter_aggregate_totals` for hourly totals
- Treat `inventory_hourly_<epoch>` as compatibility output, not as the source of truth for downstream jobs.
- Move deletion and event reconciliation to one post-capture reconciliation phase per vCenter.
- In that reconciliation phase, update canonical cache tables first.
- Stop updating prior hourly snapshot tables inline during the capture hot path except where compatibility mode explicitly requires it.
- Remove synchronous XLSX regeneration from hourly capture.
- Scheduled capture should finish once persistence and reconciliation are complete.
- Report generation should run after the capture path, either deferred within the job or via a follow-up stage.
### 2. Daily Aggregation
- Make `vm_hourly_stats` the only normal scheduled input for daily aggregation.
- Scheduled daily jobs must not build `UNION ALL` queries across `inventory_hourly_*`.
- Keep the Go aggregation path as the explicit default scheduled path for the current implementation and migration phases.
- Readability is the primary reason for this default: the Go path is materially easier to follow, test, and debug than the current snapshot-union SQL path.
- Performance is a secondary but still important reason: on the current implementation, Go is expected to outperform the existing SQL union path by avoiding repeated historical table scans.
- Treat the SQL path as non-default compatibility and fallback behavior.
- Do not treat this as a permanent rejection of SQL.
- Only promote SQL to default if benchmark results on canonical Postgres data show a clear, repeatable improvement over the Go path.
- Keep the current SQL union path only for:
- compatibility fallback
- manual repair
- backfill support where needed
- Daily aggregation output must continue writing:
- `inventory_daily_summary_YYYYMMDD`
- `vm_daily_rollup`
- `snapshot_registry` daily record
- refreshed `vcenter_aggregate_totals` daily entries
- Lifecycle refinement should operate on canonical lifecycle data and only use snapshot-table probing as fallback.
- Preserve existing daily semantics for:
- `SamplesPresent`
- `AvgIsPresent`
- weighted CPU/RAM/disk averages
- pool percentages
- creation/deletion time behavior
### 3. Monthly Aggregation
- Make `vm_daily_rollup` the default scheduled input for monthly aggregation.
- Scheduled monthly jobs should not scan hourly snapshot tables in the normal path.
- Keep the Go aggregation path as the explicit default scheduled path for the current implementation and migration phases.
- Readability is the primary reason for this default: the Go path is materially easier to follow, test, and debug than the current SQL path.
- Performance is a secondary but still important reason: on the current implementation, Go is expected to outperform the existing SQL path by avoiding snapshot-table unions and hourly-history scans in the normal case.
- Treat the SQL path as non-default compatibility and fallback behavior.
- Do not treat this as a permanent rejection of SQL.
- Only promote SQL to default if benchmark results on canonical Postgres data show a clear, repeatable improvement over the Go path.
- Keep hourly-based monthly aggregation only for:
- manual rebuilds
- repair/backfill workflows
- validation against old behavior
- Preserve current monthly weighting semantics based on per-day sample volumes.
- Monthly aggregation output must continue writing:
- `inventory_monthly_summary_YYYYMM`
- `snapshot_registry` monthly record
- refreshed `vcenter_aggregate_totals` monthly entries
- Keep report generation behavior unchanged from the users perspective, but do not keep it on the critical aggregation hot path if it can be deferred safely.
### 4. Storage and Schema
- Keep these tables during migration:
- `inventory_hourly_*`
- `inventory_daily_summary_*`
- `inventory_monthly_summary_*`
- Stop treating hourly snapshot tables as the normal scheduled aggregation source.
- Preserve `snapshot_registry`, but register logical hourly snapshots by timestamp even when downstream jobs no longer depend on hourly table scans.
- Validate or add the following indexes on `vm_hourly_stats` for PostgreSQL:
- `("SnapshotTime")`
- `("Vcenter","SnapshotTime")`
- `("Vcenter","VmId","SnapshotTime")`
- `("Vcenter","VmUuid","SnapshotTime")`
- a name lookup index aligned with current trace queries
- Keep the existing trace-compatible indexes for SQLite.
- After the canonical-path migration is stable, partition `vm_hourly_stats` by snapshot month for PostgreSQL.
- Do not require partitioning for SQLite or tests.
### 5. Compatibility Mode
- Introduce an explicit compatibility mode for legacy snapshot tables.
- When compatibility mode is enabled:
- continue writing `inventory_hourly_*`
- continue generating legacy-compatible daily/monthly summary tables
- continue registering snapshots as today
- When compatibility mode is disabled in a later phase:
- scheduled jobs may skip legacy hourly table creation
- compatibility reports and endpoints must still work from canonical data or compatibility rebuild jobs
- Default to compatibility mode enabled during the transition.
### 6. Scheduling and Job Flow
- Refactor the scheduled pipeline into explicit stages:
1. capture
2. reconcile
3. register and refresh totals caches
4. optional report generation
- Daily aggregation should run only against the completed prior-day hourly data.
- Monthly aggregation should depend on daily rollup completion, not hourly history scans.
- Keep the current cron behavior and auth/UI behavior unchanged while internal data flow changes land.
- Backfill and repair jobs should rebuild canonical caches first, then compatibility tables and reports.
### 7. UI Refresh and Design-System Alignment
- Use `design.md` as the source of truth for the UI refresh, but adapt it pragmatically to this codebase rather than attempting a pixel-perfect clone.
- Introduce semantic theme tokens using `--theme_*` naming in the shared stylesheet layer.
- Replace the current ad hoc `web2` color and radius values with tokenized equivalents for:
- primary text
- weak text
- CTA blue
- borders
- surfaces
- success states
- button spotlight text
- card and ambient shadows
- Update the shared stylesheet source and shipped compiled assets so the new tokens flow through the delivered UI.
- Keep the existing `web2` and `web3` class names if that reduces churn, but rebase them on the new token system.
- Establish a typography strategy that follows `design.md` while remaining deployable:
- prefer Haas and Haas Groot Disp only if licensed webfont delivery is available
- otherwise define a documented fallback stack with similar proportions and spacing behavior
- apply positive letter spacing to body, caption, and button treatments where appropriate
- Normalize component shape language to the design brief:
- buttons at 12px radius
- cards and sections at 16px to 24px radius
- larger containers at 24px to 32px radius where needed
- avoid the current 3px to 6px rounded treatment as the default visual language
- Replace the current flat visual treatment with the documented blue-tinted shadow system, but keep shadows controlled and readable in data-heavy views.
- Refactor shared UI structure in the Templ layer:
- `components/core/header.templ`
- `components/core/footer.templ`
- shared shell/header/card/button/table/form patterns used across `components/views/*`
- Add a reusable page-shell pattern so all primary pages share:
- a consistent hero/header treatment
- action grouping
- content width rules
- section spacing
- responsive table overflow behavior
- Improve the dashboard information architecture in `components/views/index.templ`:
- reduce the current long-form text density
- promote primary navigation and key operational tasks
- move build metadata into secondary status cards
- present auth requirements and role policy as a concise callout rather than dense paragraph copy
- Improve snapshot and vCenter list pages in `components/views/snapshots.templ`:
- stronger table hierarchy
- clearer record counts and grouping
- more intentional page headers and return navigation
- responsive behavior that preserves readability on smaller screens
- Improve the VM trace page in `components/views/vm_trace.templ`:
- upgrade search form layout and input styling
- improve chart framing and diagnostics presentation
- make lifecycle summary cards visually clearer
- preserve dense tabular detail without making the page feel purely utilitarian
- Ensure the auth-enabled experience is visible in the UI:
- clarify that UI pages remain public while APIs require Bearer tokens when auth is enabled
- surface viewer versus admin capability differences in concise language
- keep Swagger and operational links accessible from the main navigation
- Add accessibility and interaction requirements to the UI implementation:
- visible focus states
- sufficient text/background contrast
- keyboard-usable navigation and forms
- table layouts that remain readable with horizontal overflow
- mobile-safe spacing and tap targets
- Keep UI changes implementation-friendly:
- avoid introducing a large frontend framework
- continue using Templ plus shared CSS and existing JS assets
- prefer incremental component replacement over a full frontend rewrite
## Public Interfaces and Settings
- No HTTP API changes are required.
- Keep existing endpoints and report filenames stable.
- No auth-model changes are required for the UI refresh.
- If licensed fonts are not available for deployment, the implementation must ship with a documented fallback stack rather than blocking the UI work.
- Add these settings:
- `settings.capture_write_batch_size`
- default: `1000`
- controls batched DB writes for hourly capture
- `settings.snapshot_table_compat_mode`
- default: `true`
- when `true`, continue writing legacy snapshot tables during migration
- `settings.async_report_generation`
- default: `true`
- when `true`, scheduled jobs defer XLSX generation from the hot path
- Keep existing settings such as:
- `hourly_snapshot_concurrency`
- `monthly_aggregation_granularity`
- retry settings
- cleanup settings
- Scheduled monthly aggregation should ignore hourly granularity unless running a manual or backfill job.
## Execution Order
### Phase 1: Hot-Path Runtime Wins
- Add batched hourly writes.
- Decouple report generation from hourly capture.
- Ensure daily scheduled aggregation reads only from `vm_hourly_stats`.
- Ensure monthly scheduled aggregation reads only from `vm_daily_rollup`.
- Keep compatibility tables enabled.
- Define the UI token layer and shared component mapping before page-level redesign work begins.
### Phase 2: Canonical Dataflow
- Refactor reconciliation so canonical caches are updated first.
- Reduce or eliminate prior-snapshot table mutations during capture.
- Make scheduled aggregation paths canonical-only.
- Keep fallback and repair code for legacy unions/scans.
- Implement the shared page shell, navigation, button, card, table, and form refinements across the existing Templ views.
### Phase 3: Postgres-Ready Scale-Up
- Validate index coverage on canonical tables.
- Add PostgreSQL partitioning for `vm_hourly_stats`.
- Benchmark Go and SQL aggregation paths on representative production-scale data.
- Keep Go as default unless SQL demonstrates a clear, repeatable runtime win on canonical Postgres data.
- Treat the benchmark as a comparison against a canonical-table SQL implementation, not the current snapshot-union SQL path.
- If SQL wins, promote SQL behind a controlled rollout flag first, then make it default.
- Complete page-specific UI refinement for dashboard, snapshots, vCenter totals, and VM trace using the shared tokenized design system.
### Phase 4: Compatibility Reduction
- Keep legacy table output behind `snapshot_table_compat_mode`.
- Once canonical-path validation is complete, allow disabling legacy hourly table generation in scheduled runs.
- Retain explicit backfill and rebuild commands for compatibility tables and reports.
- Clean up obsolete styling rules and duplicated visual patterns once the new UI system is fully adopted.
## Implementation Checklist
### 0. Baseline and Guardrails
- [x] Capture baseline metrics for hourly capture, daily aggregation, monthly aggregation, and report generation.
- [x] Confirm current API/endpoint contract and report filename behavior with a regression snapshot.
- [x] Add new settings with defaults and config wiring:
- [x] `settings.capture_write_batch_size=1000`
- [x] `settings.snapshot_table_compat_mode=true`
- [x] `settings.async_report_generation=true`
- [x] Add/confirm stage-level logging and timing around capture, reconcile, totals refresh, and report generation.
- [x] Document migration guardrails: no auth-model changes, SQLite support retained, compatibility mode enabled by default.
- Evidence snapshot: see `phase0-baseline.md` for metrics, API/report contract snapshot, and guardrail verification.
### 1. Phase 1: Hot-Path Runtime Wins
- [x] Implement batched hourly writes for canonical tables in capture flow.
- [x] Add PostgreSQL multi-row insert/upsert path (or `COPY`) for `vm_hourly_stats`.
- [x] Keep SQLite transactional batched upsert path without PostgreSQL-only ingestion features.
- [x] Decouple XLSX/report generation from capture hot path via async/deferred stage.
- [x] Ensure scheduled daily aggregation reads canonical data from `vm_hourly_stats` only.
- [x] Ensure scheduled monthly aggregation reads canonical data from `vm_daily_rollup` only.
- [x] Keep legacy compatibility tables enabled during this phase.
- [x] Introduce UI token layer (`--theme_*`) and map shared component primitives before page-specific redesign.
### 2. Phase 2: Canonical Dataflow
- [x] Refactor capture/reconcile ordering so canonical caches are updated first.
- [x] Move deletion/event reconciliation to one post-capture phase per vCenter.
- [x] Remove prior-snapshot table mutations from capture hot path (except explicit compatibility needs).
- [x] Keep SQL union/legacy scan paths available only for fallback, repair, and backfill.
- [x] Verify `snapshot_registry` logical hourly registration remains correct without normal hourly table scans.
- [x] Implement shared Templ page shell improvements across header/footer/cards/buttons/tables/forms.
- [x] Refresh dashboard, snapshots, vCenter totals, and VM trace views to the tokenized design system.
### 3. Phase 3: Postgres-Ready Scale-Up
- [x] Validate/add canonical `vm_hourly_stats` indexes for snapshot time, vCenter+time, VM identity+time, and trace lookup.
- [x] Add PostgreSQL monthly partitioning for `vm_hourly_stats` behind migration controls.
- [ ] Benchmark Go vs SQL on canonical Postgres tables using representative production-scale data.
- Benchmark harness implemented via `-benchmark-aggregations` and `-benchmark-runs`; production-scale Postgres run pending.
- [x] Keep Go as scheduled default unless SQL shows clear and repeatable runtime wins.
- [x] If SQL wins, roll out behind a controlled flag before any default switch.
### 4. Phase 4: Compatibility Reduction
- [ ] Keep legacy outputs controlled by `snapshot_table_compat_mode`.
- [ ] Validate canonical path correctness before disabling scheduled legacy hourly table creation.
- [ ] Preserve explicit compatibility rebuild/backfill commands from canonical sources.
- [ ] Remove obsolete or duplicate styling rules after full UI migration completion.
### 5. Validation and Quality Gates
- [ ] Add golden-result tests for daily output parity (old vs new path).
- [ ] Add golden-result tests for monthly output parity (old vs new path).
- [ ] Add lifecycle edge-case coverage (partial presence, missing create times, deletion refinement, pool and resource changes).
- [ ] Add integration tests for canonical write/read paths and totals cache correctness.
- [ ] Add compatibility tests for legacy table generation, reports, and rebuild flows.
- [ ] Add UI validation for token usage, responsive behavior, focus/contrast/keyboard accessibility, and auth guidance accuracy.
- [ ] Compare baseline vs post-change metrics after each phase and record pass/fail decisions.
### 6. Rollout and Documentation
- [ ] Update operator docs for new settings and default behavior.
- [ ] Document compatibility-mode lifecycle and criteria to disable legacy table generation.
- [ ] Document benchmark method/results and default-path decision record (Go vs SQL).
- [ ] Publish a short migration runbook for staged rollout, rollback triggers, and repair workflows.
## Test Plan
### Correctness Tests
- Add golden-result tests comparing old and new daily outputs for the same synthetic hourly dataset.
- Add golden-result tests comparing old and new monthly outputs for the same synthetic daily dataset.
- Include edge cases for:
- partial-day VM presence
- missing creation times
- deletion-time refinement
- pool changes
- CPU and RAM changes across samples
- VMs identified by `VmId`, `VmUuid`, and fallback name matching
### Integration Tests
- Hourly capture writes `vm_hourly_stats`, lifecycle caches, and vCenter totals correctly.
- Daily aggregation reads canonical hourly data without scanning `inventory_hourly_*`.
- Monthly aggregation reads canonical daily rollup without scanning hourly history in the normal path.
- `vcenter_aggregate_totals` remains correct for hourly, daily, and monthly views.
- Trace and totals endpoints keep returning equivalent results before and after migration.
- UI page rendering remains valid for dashboard, snapshot pages, vCenter totals, and VM trace after shared component changes.
### Compatibility Tests
- When `snapshot_table_compat_mode=true`, compatibility snapshot tables still exist and are populated.
- Reports still generate correctly from migrated data.
- Backfill and repair flows can rebuild compatibility outputs from canonical sources.
- UI remains functional when auth is disabled and when auth is enabled with protected API usage documented in-page.
### Performance Tests
- Measure per-vCenter capture duration.
- Measure hourly write throughput.
- Measure daily aggregation runtime.
- Measure monthly aggregation runtime.
- Measure report generation runtime when decoupled from scheduled jobs.
- Capture baseline metrics before refactor and compare after each phase.
- Measure basic UI payload impact after the refresh so stylesheet and JS growth stay controlled.
### UI Validation
- Verify token usage in shared CSS so colors, radii, and shadows are not hard-coded inconsistently across pages.
- Verify responsive behavior for dashboard, snapshot tables, vCenter totals, and VM trace at mobile and desktop widths.
- Verify focus states, contrast, and keyboard access for links, buttons, inputs, and table navigation surfaces.
- Verify that the auth guidance on the dashboard still matches actual route protection and Bearer-token behavior.
## Acceptance Criteria
- Scheduled hourly capture runtime is materially reduced without changing user-visible outputs.
- Scheduled daily aggregation no longer depends on `inventory_hourly_*` scans.
- Scheduled monthly aggregation no longer depends on hourly-history scans.
- Canonical caches become the source of truth for normal scheduled processing.
- Legacy compatibility behavior remains available during migration.
- Existing endpoints, reports, auth behavior, and operational commands continue to work.
- The UI reflects the design direction in `design.md` through tokenized colors, typography, spacing, radius, and shadow usage.
- The dashboard, snapshot pages, vCenter totals view, and VM trace view share a coherent visual system and clearer information hierarchy.
- The refreshed UI remains responsive, accessible, and compatible with the current Templ-based rendering model.
## Assumptions
- Target direction is Postgres-ready and runtime-first.
- Existing endpoints, report filenames, and user-visible semantics must remain stable.
- SQLite remains supported for development, tests, and smaller installs.
- PostgreSQL is the intended scale-up target for larger environments.
- Compatibility snapshot tables should remain enabled by default until canonical-path validation is complete.
+1 -1
View File
@@ -8,7 +8,7 @@ package=./
commit=$(git rev-parse HEAD)
buildtime=$(date +%Y-%m-%dT%T%z)
#Extract the version from yml
package_version=$(grep 'version:' "$package_name.yml" | awk '{print $2}' | tr -d '"' | sed 's/^v//')
package_version=$(grep 'version:' "$package_name-service.yml" | awk '{print $2}' | tr -d '"' | sed 's/^v//')
host_os=$(uname -s | tr '[:upper:]' '[:lower:]')
host_arch=$(uname -m)
+33
View File
@@ -0,0 +1,33 @@
package audit
import (
"log/slog"
"net/http"
)
const authAuditMessage = "auth_audit"
// LogAuthEvent emits a structured auth audit log record.
// It is intentionally generic and should never receive raw credentials or tokens.
func LogAuthEvent(logger *slog.Logger, r *http.Request, event string, outcome string, attrs ...any) {
if logger == nil {
logger = slog.Default()
}
logAttrs := make([]any, 0, 14+len(attrs))
logAttrs = append(logAttrs, "category", "auth", "event", event, "outcome", outcome)
if r != nil {
requestPath := r.URL.RequestURI()
if requestPath == "" {
requestPath = r.URL.Path
}
logAttrs = append(logAttrs,
"method", r.Method,
"path", requestPath,
"remote", r.RemoteAddr,
)
}
logAttrs = append(logAttrs, attrs...)
logger.Info(authAuditMessage, logAttrs...)
}
+193
View File
@@ -0,0 +1,193 @@
package handler
import (
"context"
"errors"
"net/http"
"strings"
"time"
"vctp/internal/auth"
"vctp/server/audit"
"vctp/server/middleware"
"vctp/server/models"
)
const (
authLoginFailureMessage = "invalid username or password"
authLoginRequestTimeout = 30 * time.Second
)
type ldapAuthenticator interface {
AuthenticateAndFetchGroups(ctx context.Context, username string, password string) (auth.LDAPIdentity, error)
}
type jwtService interface {
IssueToken(subject string, roles []string, groups []string) (string, auth.Claims, error)
}
var newLDAPAuthenticator = func(cfg auth.LDAPConfig) (ldapAuthenticator, error) {
return auth.NewLDAPAuthenticator(cfg)
}
var newJWTService = func(cfg auth.JWTConfig) (jwtService, error) {
return auth.NewJWTService(cfg)
}
// AuthLogin authenticates a user against LDAP and returns a signed JWT.
// @Summary Login
// @Description Authenticates a username/password against LDAP and returns a signed access token.
// @Tags auth
// @Accept json
// @Produce json
// @Param payload body models.AuthLoginRequest true "Login credentials"
// @Success 200 {object} models.AuthLoginResponse "Login success"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 401 {object} models.ErrorResponse "Invalid credentials"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Failure 503 {object} models.ErrorResponse "Authentication disabled"
// @Router /api/auth/login [post]
func (h *Handler) AuthLogin(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
if h == nil || h.Settings == nil || h.Settings.Values == nil {
audit.LogAuthEvent(nil, r, "login", "error", "reason", "settings_not_configured")
writeJSONError(w, http.StatusInternalServerError, "authentication is not configured")
return
}
cfg := h.Settings.Values.Settings
if !cfg.AuthEnabled {
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "auth_disabled")
writeJSONError(w, http.StatusServiceUnavailable, "authentication is disabled")
return
}
var req models.AuthLoginRequest
if err := decodeJSONBody(w, r, &req); err != nil {
h.Logger.Error("unable to decode auth login request", "error", err)
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "invalid_request_json", "error", err)
writeJSONError(w, http.StatusBadRequest, "invalid JSON body")
return
}
username := strings.TrimSpace(req.Username)
password := req.Password
if username == "" || strings.TrimSpace(password) == "" {
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "missing_username_or_password", "username", username)
writeJSONError(w, http.StatusBadRequest, "username and password are required")
return
}
ldapAuth, err := newLDAPAuthenticator(auth.LDAPConfig{
BindAddress: cfg.LDAPBindAddress,
BaseDN: cfg.LDAPBaseDN,
TrustCertFile: cfg.LDAPTrustCertFile,
DisableValidation: cfg.LDAPDisableValidation,
Insecure: cfg.LDAPInsecure,
DialTimeout: authLoginRequestTimeout,
})
if err != nil {
h.Logger.Error("failed to initialize ldap authenticator", "error", err)
audit.LogAuthEvent(h.Logger, r, "login", "error", "reason", "ldap_authenticator_init_failed", "username", username, "error", err)
writeJSONError(w, http.StatusInternalServerError, "authentication service unavailable")
return
}
ctx, cancel := withRequestTimeout(r, authLoginRequestTimeout)
defer cancel()
identity, err := ldapAuth.AuthenticateAndFetchGroups(ctx, username, password)
if err != nil {
if errors.Is(err, auth.ErrLDAPInvalidCredentials) {
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "invalid_credentials", "username", username)
writeJSONError(w, http.StatusUnauthorized, authLoginFailureMessage)
return
}
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "ldap_timeout", "username", username, "error", err)
writeJSONError(w, http.StatusUnauthorized, authLoginFailureMessage)
return
}
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "ldap_authentication_failed", "username", username, "error", err)
writeJSONError(w, http.StatusUnauthorized, authLoginFailureMessage)
return
}
roles := auth.ResolveRoles(identity.Groups, cfg.AuthGroupRoleMappings)
if !auth.HasAnyGroup(identity.Groups, cfg.LDAPGroups) || len(roles) == 0 {
audit.LogAuthEvent(h.Logger, r, "login", "deny", "reason", "group_or_role_denied", "username", username, "group_count", len(identity.Groups), "resolved_roles", roles)
writeJSONError(w, http.StatusUnauthorized, authLoginFailureMessage)
return
}
jwtSvc, err := newJWTService(auth.JWTConfig{
SigningKeyBase64: cfg.AuthJWTSigningKey,
Issuer: cfg.AuthJWTIssuer,
Audience: cfg.AuthJWTAudience,
TokenLifespan: time.Duration(cfg.AuthTokenLifespanMinutes) * time.Minute,
ClockSkew: time.Duration(cfg.AuthClockSkewSeconds) * time.Second,
})
if err != nil {
h.Logger.Error("failed to initialize jwt service", "error", err)
audit.LogAuthEvent(h.Logger, r, "login", "error", "reason", "jwt_service_init_failed", "username", username, "error", err)
writeJSONError(w, http.StatusInternalServerError, "authentication service unavailable")
return
}
subject := strings.TrimSpace(identity.Username)
if subject == "" {
subject = username
}
token, claims, err := jwtSvc.IssueToken(subject, roles, identity.Groups)
if err != nil {
h.Logger.Error("failed to issue auth token", "username", username, "error", err)
audit.LogAuthEvent(h.Logger, r, "login", "error", "reason", "token_issue_failed", "username", username, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to issue access token")
return
}
audit.LogAuthEvent(h.Logger, r, "login", "allow", "username", subject, "resolved_roles", roles, "expires_at", claims.ExpiresAt)
writeJSON(w, http.StatusOK, models.AuthLoginResponse{
AccessToken: token,
ExpiresAt: claims.ExpiresAt,
TokenType: "Bearer",
})
}
// AuthMe returns the currently authenticated identity from validated JWT claims.
// @Summary Who am I
// @Description Returns JWT claims for the currently authenticated bearer token.
// @Description Requires Bearer authentication.
// @Tags auth
// @Produce json
// @Success 200 {object} models.AuthMeResponse "Authenticated identity"
// @Failure 401 {object} models.ErrorResponse "Missing or invalid authentication context"
// @Router /api/auth/me [get]
// @Security BearerAuth
func (h *Handler) AuthMe(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
claims, ok := middleware.ClaimsFromContext(r.Context())
if !ok {
audit.LogAuthEvent(h.Logger, r, "whoami", "deny", "reason", "missing_auth_context")
writeJSONError(w, http.StatusUnauthorized, "missing authentication context")
return
}
audit.LogAuthEvent(h.Logger, r, "whoami", "allow", "subject", claims.Subject, "roles", claims.Roles)
writeJSON(w, http.StatusOK, models.AuthMeResponse{
Status: "OK",
Subject: claims.Subject,
Roles: claims.Roles,
Groups: claims.Groups,
Issuer: claims.Issuer,
Audience: claims.Audience,
IssuedAt: claims.IssuedAt,
ExpiresAt: claims.ExpiresAt,
NotBefore: claims.NotBefore,
TokenID: claims.ID,
})
}
+294
View File
@@ -0,0 +1,294 @@
package handler
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"testing"
"time"
"vctp/internal/auth"
"vctp/internal/settings"
"vctp/server/middleware"
"vctp/server/models"
)
type stubLDAPAuthenticator struct {
identity auth.LDAPIdentity
err error
}
func (s *stubLDAPAuthenticator) AuthenticateAndFetchGroups(_ context.Context, _ string, _ string) (auth.LDAPIdentity, error) {
return s.identity, s.err
}
type stubJWTService struct {
token string
claims auth.Claims
err error
}
func (s *stubJWTService) IssueToken(_ string, _ []string, _ []string) (string, auth.Claims, error) {
return s.token, s.claims, s.err
}
func TestAuthLoginAuthDisabled(t *testing.T) {
h := &Handler{
Logger: newTestLogger(),
Settings: &settings.Settings{Values: &settings.SettingsYML{}},
}
req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBufferString(`{"username":"alice","password":"pw"}`))
rr := httptest.NewRecorder()
h.AuthLogin(rr, req)
if rr.Code != http.StatusServiceUnavailable {
t.Fatalf("expected status %d, got %d", http.StatusServiceUnavailable, rr.Code)
}
}
func TestAuthLoginInvalidCredentials(t *testing.T) {
restoreFactories := swapAuthFactoriesForTest(
func(_ auth.LDAPConfig) (ldapAuthenticator, error) {
return &stubLDAPAuthenticator{err: auth.ErrLDAPInvalidCredentials}, nil
},
func(_ auth.JWTConfig) (jwtService, error) {
return &stubJWTService{}, nil
},
)
defer restoreFactories()
h := &Handler{
Logger: newTestLogger(),
Settings: testAuthEnabledSettings(),
}
req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBufferString(`{"username":"alice","password":"pw"}`))
rr := httptest.NewRecorder()
h.AuthLogin(rr, req)
if rr.Code != http.StatusUnauthorized {
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, rr.Code)
}
var payload models.ErrorResponse
if err := json.Unmarshal(rr.Body.Bytes(), &payload); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if payload.Message != authLoginFailureMessage {
t.Fatalf("unexpected error message: %q", payload.Message)
}
}
func TestAuthLoginRejectsUnmappedRoles(t *testing.T) {
restoreFactories := swapAuthFactoriesForTest(
func(_ auth.LDAPConfig) (ldapAuthenticator, error) {
return &stubLDAPAuthenticator{
identity: auth.LDAPIdentity{
Username: "alice",
Groups: []string{"cn=other-group,ou=groups,dc=example,dc=com"},
},
}, nil
},
func(_ auth.JWTConfig) (jwtService, error) {
return &stubJWTService{}, nil
},
)
defer restoreFactories()
h := &Handler{
Logger: newTestLogger(),
Settings: testAuthEnabledSettings(),
}
req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBufferString(`{"username":"alice","password":"pw"}`))
rr := httptest.NewRecorder()
h.AuthLogin(rr, req)
if rr.Code != http.StatusUnauthorized {
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, rr.Code)
}
}
func TestAuthLoginSuccess(t *testing.T) {
restoreFactories := swapAuthFactoriesForTest(
func(_ auth.LDAPConfig) (ldapAuthenticator, error) {
return &stubLDAPAuthenticator{
identity: auth.LDAPIdentity{
Username: "alice",
UserDN: "cn=alice,ou=users,dc=example,dc=com",
Groups: []string{"cn=vctp-admins,ou=groups,dc=example,dc=com"},
},
}, nil
},
func(_ auth.JWTConfig) (jwtService, error) {
return &stubJWTService{
token: "issued-token",
claims: auth.Claims{
ExpiresAt: time.Unix(1_700_000_000, 0).Unix(),
},
}, nil
},
)
defer restoreFactories()
h := &Handler{
Logger: newTestLogger(),
Settings: testAuthEnabledSettings(),
}
req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBufferString(`{"username":"alice","password":"pw"}`))
rr := httptest.NewRecorder()
h.AuthLogin(rr, req)
if rr.Code != http.StatusOK {
t.Fatalf("expected status %d, got %d: %s", http.StatusOK, rr.Code, rr.Body.String())
}
var payload models.AuthLoginResponse
if err := json.Unmarshal(rr.Body.Bytes(), &payload); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if payload.AccessToken != "issued-token" {
t.Fatalf("unexpected token: %q", payload.AccessToken)
}
if payload.TokenType != "Bearer" {
t.Fatalf("unexpected token type: %q", payload.TokenType)
}
}
func TestAuthLoginJWTFactoryFailure(t *testing.T) {
restoreFactories := swapAuthFactoriesForTest(
func(_ auth.LDAPConfig) (ldapAuthenticator, error) {
return &stubLDAPAuthenticator{
identity: auth.LDAPIdentity{
Username: "alice",
Groups: []string{"cn=vctp-admins,ou=groups,dc=example,dc=com"},
},
}, nil
},
func(_ auth.JWTConfig) (jwtService, error) {
return nil, errors.New("jwt init failed")
},
)
defer restoreFactories()
h := &Handler{
Logger: newTestLogger(),
Settings: testAuthEnabledSettings(),
}
req := httptest.NewRequest(http.MethodPost, "/api/auth/login", bytes.NewBufferString(`{"username":"alice","password":"pw"}`))
rr := httptest.NewRecorder()
h.AuthLogin(rr, req)
if rr.Code != http.StatusInternalServerError {
t.Fatalf("expected status %d, got %d", http.StatusInternalServerError, rr.Code)
}
}
func TestAuthMeSuccess(t *testing.T) {
h := &Handler{
Logger: newTestLogger(),
Settings: testAuthEnabledSettings(),
}
protected := middleware.RequireAuth(newTestLogger(), h.Settings)(http.HandlerFunc(h.AuthMe))
tokenSvc, err := auth.NewJWTService(auth.JWTConfig{
SigningKeyBase64: h.Settings.Values.Settings.AuthJWTSigningKey,
Issuer: h.Settings.Values.Settings.AuthJWTIssuer,
Audience: h.Settings.Values.Settings.AuthJWTAudience,
TokenLifespan: time.Duration(h.Settings.Values.Settings.AuthTokenLifespanMinutes) * time.Minute,
ClockSkew: time.Duration(h.Settings.Values.Settings.AuthClockSkewSeconds) * time.Second,
})
if err != nil {
t.Fatalf("failed to create jwt service: %v", err)
}
token, claims, err := tokenSvc.IssueToken("alice", []string{"viewer"}, []string{"cn=vctp-viewers,ou=groups,dc=example,dc=com"})
if err != nil {
t.Fatalf("failed to issue token: %v", err)
}
req := httptest.NewRequest(http.MethodGet, "/api/auth/me", nil)
req.Header.Set("Authorization", "Bearer "+token)
rr := httptest.NewRecorder()
protected.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Fatalf("expected status %d, got %d: %s", http.StatusOK, rr.Code, rr.Body.String())
}
var payload models.AuthMeResponse
if err := json.Unmarshal(rr.Body.Bytes(), &payload); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if payload.Status != "OK" {
t.Fatalf("unexpected status: %q", payload.Status)
}
if payload.Subject != claims.Subject {
t.Fatalf("unexpected subject: %q", payload.Subject)
}
if payload.Issuer != claims.Issuer || payload.Audience != claims.Audience {
t.Fatalf("unexpected issuer/audience: %q/%q", payload.Issuer, payload.Audience)
}
if payload.TokenID != claims.ID {
t.Fatalf("unexpected token id: %q", payload.TokenID)
}
}
func TestAuthMeMissingAuthContext(t *testing.T) {
h := &Handler{
Logger: newTestLogger(),
}
req := httptest.NewRequest(http.MethodGet, "/api/auth/me", nil)
rr := httptest.NewRecorder()
h.AuthMe(rr, req)
if rr.Code != http.StatusUnauthorized {
t.Fatalf("expected status %d, got %d", http.StatusUnauthorized, rr.Code)
}
}
func TestAuthMeMethodNotAllowed(t *testing.T) {
h := &Handler{
Logger: newTestLogger(),
}
req := httptest.NewRequest(http.MethodPost, "/api/auth/me", nil)
rr := httptest.NewRecorder()
h.AuthMe(rr, req)
if rr.Code != http.StatusMethodNotAllowed {
t.Fatalf("expected status %d, got %d", http.StatusMethodNotAllowed, rr.Code)
}
}
func testAuthEnabledSettings() *settings.Settings {
cfg := &settings.Settings{Values: &settings.SettingsYML{}}
cfg.Values.Settings.AuthEnabled = true
cfg.Values.Settings.AuthMode = "required"
cfg.Values.Settings.AuthJWTSigningKey = base64.StdEncoding.EncodeToString([]byte("test-signing-key"))
cfg.Values.Settings.AuthTokenLifespanMinutes = 120
cfg.Values.Settings.AuthJWTIssuer = "vctp"
cfg.Values.Settings.AuthJWTAudience = "vctp-api"
cfg.Values.Settings.AuthClockSkewSeconds = 60
cfg.Values.Settings.LDAPBindAddress = "ldaps://ldap.example.com:636"
cfg.Values.Settings.LDAPBaseDN = "dc=example,dc=com"
cfg.Values.Settings.AuthGroupRoleMappings = map[string]string{
"cn=vctp-admins,ou=groups,dc=example,dc=com": "admin",
}
return cfg
}
func swapAuthFactoriesForTest(
ldapFactory func(auth.LDAPConfig) (ldapAuthenticator, error),
jwtFactory func(auth.JWTConfig) (jwtService, error),
) func() {
origLDAPFactory := newLDAPAuthenticator
origJWTFactory := newJWTService
newLDAPAuthenticator = ldapFactory
newJWTService = jwtFactory
return func() {
newLDAPAuthenticator = origLDAPFactory
newJWTService = origJWTFactory
}
}
+101
View File
@@ -0,0 +1,101 @@
package handler
import (
"encoding/json"
"testing"
"time"
"vctp/components/views"
)
func TestBuildVcenterChartEncodesClientConfig(t *testing.T) {
entries := []views.VcenterTotalsEntry{
{
RawTime: 2_000,
VmCount: 30,
VcpuTotal: 80,
RamTotalGB: 120,
},
{
RawTime: 1_000,
VmCount: 20,
VcpuTotal: 60,
RamTotalGB: 90,
},
}
chart := buildVcenterChart(entries)
if chart.ConfigJSON == "" {
t.Fatal("expected config json for non-empty vcenter chart")
}
var cfg lineChartConfig
if err := json.Unmarshal([]byte(chart.ConfigJSON), &cfg); err != nil {
t.Fatalf("failed to decode chart config json: %v", err)
}
if len(cfg.Labels) != 2 {
t.Fatalf("expected 2 labels, got %d", len(cfg.Labels))
}
expectedFirst := time.Unix(1_000, 0).Local().Format("2006-01-02 15:04:05")
if cfg.Labels[0] != expectedFirst {
t.Fatalf("expected oldest label first %q, got %q", expectedFirst, cfg.Labels[0])
}
if len(cfg.Series) != 3 {
t.Fatalf("expected 3 series, got %d", len(cfg.Series))
}
if cfg.Series[0].Values[0] != 20 {
t.Fatalf("expected first VM value 20, got %v", cfg.Series[0].Values[0])
}
}
func TestBuildVmTraceChartEncodesPoolState(t *testing.T) {
entries := []views.VmTraceEntry{
{
RawTime: 1_000,
ResourcePool: "Tin",
VcpuCount: 4,
RamGB: 16,
},
{
RawTime: 2_000,
ResourcePool: "Gold",
VcpuCount: 8,
RamGB: 24,
},
}
chart := buildVmTraceChart(entries)
if chart.ConfigJSON == "" {
t.Fatal("expected config json for non-empty vm trace chart")
}
var cfg lineChartConfig
if err := json.Unmarshal([]byte(chart.ConfigJSON), &cfg); err != nil {
t.Fatalf("failed to decode vm trace chart config: %v", err)
}
if len(cfg.Series) != 6 {
t.Fatalf("expected 6 series, got %d", len(cfg.Series))
}
if len(cfg.HoverRows) != 1 || cfg.HoverRows[0].Name != "Resource Pool" {
t.Fatalf("expected resource pool hover row, got %#v", cfg.HoverRows)
}
if cfg.HoverRows[0].Values[0] != "Tin" || cfg.HoverRows[0].Values[1] != "Gold" {
t.Fatalf("unexpected hover row values: %#v", cfg.HoverRows[0].Values)
}
if cfg.Series[2].Values[0] == 0 || cfg.Series[2].Values[1] != 0 {
t.Fatalf("tin series should be active only for first point: %#v", cfg.Series[2].Values)
}
if cfg.Series[5].Values[0] != 0 || cfg.Series[5].Values[1] == 0 {
t.Fatalf("gold series should be active only for second point: %#v", cfg.Series[5].Values)
}
}
func TestBuildChartsEmptyInput(t *testing.T) {
if chart := buildVcenterChart(nil); chart.ConfigJSON != "" {
t.Fatalf("expected empty config for empty vcenter input, got %q", chart.ConfigJSON)
}
if chart := buildVmTraceChart(nil); chart.ConfigJSON != "" {
t.Fatalf("expected empty config for empty vm trace input, got %q", chart.ConfigJSON)
}
}
+41
View File
@@ -0,0 +1,41 @@
package handler
import "encoding/json"
type lineChartConfig struct {
Height int `json:"height,omitempty"`
XTicks int `json:"xTicks,omitempty"`
YTicks int `json:"yTicks,omitempty"`
YLabel string `json:"yLabel,omitempty"`
XLabel string `json:"xLabel,omitempty"`
Labels []string `json:"labels"`
TickLabels []string `json:"tickLabels,omitempty"`
Series []lineChartSeries `json:"series"`
HoverRows []lineChartHoverRow `json:"hoverRows,omitempty"`
}
type lineChartSeries struct {
Name string `json:"name"`
Color string `json:"color"`
Values []float64 `json:"values"`
Dash []float64 `json:"dash,omitempty"`
LineWidth float64 `json:"lineWidth,omitempty"`
TooltipFormat string `json:"tooltipFormat,omitempty"`
TooltipHidden bool `json:"tooltipHidden,omitempty"`
}
type lineChartHoverRow struct {
Name string `json:"name"`
Values []string `json:"values"`
}
func encodeLineChartConfig(cfg lineChartConfig) string {
if len(cfg.Labels) == 0 || len(cfg.Series) == 0 {
return ""
}
out, err := json.Marshal(cfg)
if err != nil {
return ""
}
return string(out)
}
+203
View File
@@ -0,0 +1,203 @@
package handler
import (
"database/sql"
"fmt"
"net/http"
"strings"
"time"
"vctp/db"
"vctp/server/models"
)
// DailyCreationDiagnostics returns missing CreationTime diagnostics for a daily summary table.
// @Summary Daily summary CreationTime diagnostics
// @Description Returns counts of daily summary rows missing CreationTime and sample rows for the given date.
// @Description Requires Bearer authentication with the viewer role (admin also allowed).
// @Tags diagnostics
// @Produce json
// @Param date query string true "Daily date (YYYY-MM-DD)"
// @Success 200 {object} models.DailyCreationDiagnosticsResponse "Diagnostics result"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 404 {object} models.ErrorResponse "Summary not found"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/diagnostics/daily-creation [get]
func (h *Handler) DailyCreationDiagnostics(w http.ResponseWriter, r *http.Request) {
dateValue := strings.TrimSpace(r.URL.Query().Get("date"))
if dateValue == "" {
writeJSONError(w, http.StatusBadRequest, "date is required")
return
}
loc := time.Now().Location()
parsed, err := time.ParseInLocation("2006-01-02", dateValue, loc)
if err != nil {
writeJSONError(w, http.StatusBadRequest, "date must be YYYY-MM-DD")
return
}
tableName := fmt.Sprintf("inventory_daily_summary_%s", parsed.Format("20060102"))
if _, err := db.SafeTableName(tableName); err != nil {
writeJSONError(w, http.StatusBadRequest, "invalid summary table name")
return
}
ctx, cancel := withRequestTimeout(r, 10*time.Second)
defer cancel()
dbConn := h.Database.DB()
if !db.TableExists(ctx, dbConn, tableName) {
writeJSONError(w, http.StatusNotFound, "daily summary table not found")
return
}
var totalRows int64
countQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s`, tableName)
if err := dbConn.GetContext(ctx, &totalRows, countQuery); err != nil {
h.Logger.Warn("daily creation diagnostics count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read summary rows")
return
}
var missingTotal int64
missingQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE "CreationTime" IS NULL OR "CreationTime" = 0`, tableName)
if err := dbConn.GetContext(ctx, &missingTotal, missingQuery); err != nil {
h.Logger.Warn("daily creation diagnostics missing count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read missing creation rows")
return
}
var avgIsPresentLtOne int64
avgPresenceQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE "AvgIsPresent" IS NOT NULL AND "AvgIsPresent" < 0.999999`, tableName)
if err := dbConn.GetContext(ctx, &avgIsPresentLtOne, avgPresenceQuery); err != nil {
h.Logger.Warn("daily creation diagnostics avg-is-present count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read avg is present rows")
return
}
var missingPartialCount int64
missingPartialQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE ("CreationTime" IS NULL OR "CreationTime" = 0) AND "AvgIsPresent" IS NOT NULL AND "AvgIsPresent" < 0.999999`, tableName)
if err := dbConn.GetContext(ctx, &missingPartialCount, missingPartialQuery); err != nil {
h.Logger.Warn("daily creation diagnostics missing partial count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read missing partial rows")
return
}
missingPct := 0.0
if totalRows > 0 {
missingPct = float64(missingTotal) * 100 / float64(totalRows)
}
byVcenter := make([]models.DailyCreationMissingByVcenter, 0)
byVcenterQuery := fmt.Sprintf(`
SELECT "Vcenter", COUNT(*) AS missing_count
FROM %s
WHERE "CreationTime" IS NULL OR "CreationTime" = 0
GROUP BY "Vcenter"
ORDER BY missing_count DESC
`, tableName)
if rows, err := dbConn.QueryxContext(ctx, byVcenterQuery); err != nil {
h.Logger.Warn("daily creation diagnostics by-vcenter failed", "table", tableName, "error", err)
} else {
for rows.Next() {
var vcenter string
var count int64
if err := rows.Scan(&vcenter, &count); err != nil {
continue
}
byVcenter = append(byVcenter, models.DailyCreationMissingByVcenter{
Vcenter: vcenter,
MissingCount: count,
})
}
rows.Close()
}
const sampleLimit = 10
samples := make([]models.DailyCreationMissingSample, 0, sampleLimit)
sampleQuery := fmt.Sprintf(`
SELECT "Vcenter","VmId","VmUuid","Name","SamplesPresent","AvgIsPresent","SnapshotTime"
FROM %s
WHERE "CreationTime" IS NULL OR "CreationTime" = 0
ORDER BY "SamplesPresent" DESC
LIMIT %d
`, tableName, sampleLimit)
if rows, err := dbConn.QueryxContext(ctx, sampleQuery); err != nil {
h.Logger.Warn("daily creation diagnostics sample failed", "table", tableName, "error", err)
} else {
for rows.Next() {
var (
vcenter string
vmId, vmUuid, name sql.NullString
samplesPresent, snapshotTime sql.NullInt64
avgIsPresent sql.NullFloat64
)
if err := rows.Scan(&vcenter, &vmId, &vmUuid, &name, &samplesPresent, &avgIsPresent, &snapshotTime); err != nil {
continue
}
samples = append(samples, models.DailyCreationMissingSample{
Vcenter: vcenter,
VmId: vmId.String,
VmUuid: vmUuid.String,
Name: name.String,
SamplesPresent: samplesPresent.Int64,
AvgIsPresent: avgIsPresent.Float64,
SnapshotTime: snapshotTime.Int64,
})
}
rows.Close()
}
partialSamples := make([]models.DailyCreationMissingSample, 0, sampleLimit)
partialSampleQuery := fmt.Sprintf(`
SELECT "Vcenter","VmId","VmUuid","Name","SamplesPresent","AvgIsPresent","SnapshotTime"
FROM %s
WHERE ("CreationTime" IS NULL OR "CreationTime" = 0)
AND "AvgIsPresent" IS NOT NULL
AND "AvgIsPresent" < 0.999999
ORDER BY "SamplesPresent" DESC
LIMIT %d
`, tableName, sampleLimit)
if rows, err := dbConn.QueryxContext(ctx, partialSampleQuery); err != nil {
h.Logger.Warn("daily creation diagnostics partial sample failed", "table", tableName, "error", err)
} else {
for rows.Next() {
var (
vcenter string
vmId, vmUuid, name sql.NullString
samplesPresent, snapshotTime sql.NullInt64
avgIsPresent sql.NullFloat64
)
if err := rows.Scan(&vcenter, &vmId, &vmUuid, &name, &samplesPresent, &avgIsPresent, &snapshotTime); err != nil {
continue
}
partialSamples = append(partialSamples, models.DailyCreationMissingSample{
Vcenter: vcenter,
VmId: vmId.String,
VmUuid: vmUuid.String,
Name: name.String,
SamplesPresent: samplesPresent.Int64,
AvgIsPresent: avgIsPresent.Float64,
SnapshotTime: snapshotTime.Int64,
})
}
rows.Close()
}
response := models.DailyCreationDiagnosticsResponse{
Status: "OK",
Date: parsed.Format("2006-01-02"),
Table: tableName,
TotalRows: totalRows,
MissingCreationCount: missingTotal,
MissingCreationPct: missingPct,
AvgIsPresentLtOneCount: avgIsPresentLtOne,
MissingCreationPartialCount: missingPartialCount,
MissingByVcenter: byVcenter,
Samples: samples,
MissingCreationPartialSamples: partialSamples,
}
writeJSON(w, http.StatusOK, response)
}
+46 -49
View File
@@ -2,70 +2,67 @@ package handler
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
const encryptedValuePrefixV1 = "enc:v1:"
type encryptRequest struct {
Plaintext string `json:"plaintext"`
Value string `json:"value"`
Message string `json:"message"`
}
// EncryptData encrypts a plaintext value and returns the ciphertext.
// @Summary Encrypt data
// @Description Encrypts a plaintext value and returns the ciphertext.
// @Description Requires Bearer authentication with the admin role.
// @Tags crypto
// @Accept json
// @Produce json
// @Param payload body map[string]string true "Plaintext payload"
// @Success 200 {object} map[string]string "Ciphertext response"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Ciphertext response"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/encrypt [post]
func (h *Handler) EncryptData(w http.ResponseWriter, r *http.Request) {
//ctx := context.Background()
var cipherText string
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
reqBody, err := io.ReadAll(r.Body)
var req encryptRequest
if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 4096)).Decode(&req); err != nil {
h.Logger.Error("unable to decode encrypt request", "error", err)
writeJSONError(w, http.StatusBadRequest, "invalid JSON body")
return
}
plaintext := strings.TrimSpace(req.Plaintext)
if plaintext == "" {
plaintext = strings.TrimSpace(req.Value)
}
if plaintext == "" {
plaintext = strings.TrimSpace(req.Message)
}
if plaintext == "" {
writeJSONError(w, http.StatusBadRequest, "plaintext is required (accepted keys: plaintext, value, message)")
return
}
cipherText, err := h.Secret.Encrypt([]byte(plaintext))
if err != nil {
h.Logger.Error("Invalid data received", "error", err)
fmt.Fprintf(w, "Invalid data received")
w.WriteHeader(http.StatusInternalServerError)
h.Logger.Error("unable to encrypt payload", "error", err)
writeJSONError(w, http.StatusInternalServerError, "encryption failed")
return
} else {
h.Logger.Debug("received input data", "length", len(reqBody))
}
// get the json input
var input map[string]string
if err := json.Unmarshal(reqBody, &input); err != nil {
h.Logger.Error("unable to unmarshal json", "error", err)
prettyPrint(reqBody)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Unable to unmarshal JSON in request body: '%s'", err),
})
return
} else {
h.Logger.Debug("successfully decoded JSON")
//prettyPrint(input)
}
//cipher, err := h.Secret.Encrypt()
for k := range input {
//h.Logger.Debug("foo", "key", k, "value", input[k])
cipherText, err = h.Secret.Encrypt([]byte(input[k]))
if err != nil {
h.Logger.Error("Unable to encrypt", "error", err)
} else {
h.Logger.Debug("Encrypted plaintext", "length", len(input[k]), "ciphertext", cipherText)
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
"message": cipherText,
})
return
}
}
// return the result
h.Logger.Debug("encrypted plaintext payload", "input_length", len(plaintext))
writeJSON(w, http.StatusOK, map[string]string{
"status": "OK",
"message": cipherText,
"prefixed": encryptedValuePrefixV1 + cipherText,
"ciphertext": cipherText,
})
}
+135
View File
@@ -0,0 +1,135 @@
package handler
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"vctp/internal/secrets"
)
func newEncryptTestHandler() (*Handler, *secrets.Secrets) {
logger := newTestLogger()
key := []byte("0123456789abcdef0123456789abcdef")
secret := secrets.New(logger, key)
return &Handler{
Logger: logger,
Secret: secret,
}, secret
}
func decodeResponse(t *testing.T, rr *httptest.ResponseRecorder) map[string]string {
t.Helper()
var resp map[string]string
if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil {
t.Fatalf("failed to decode response body %q: %v", rr.Body.String(), err)
}
return resp
}
func TestEncryptDataRejectsWrongMethod(t *testing.T) {
h, _ := newEncryptTestHandler()
req := httptest.NewRequest(http.MethodGet, "/api/encrypt", nil)
rr := httptest.NewRecorder()
h.EncryptData(rr, req)
if rr.Code != http.StatusMethodNotAllowed {
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, rr.Code)
}
resp := decodeResponse(t, rr)
if resp["status"] != "ERROR" {
t.Fatalf("expected status ERROR, got %#v", resp)
}
}
func TestEncryptDataRejectsInvalidJSON(t *testing.T) {
h, _ := newEncryptTestHandler()
req := httptest.NewRequest(http.MethodPost, "/api/encrypt", strings.NewReader("{"))
rr := httptest.NewRecorder()
h.EncryptData(rr, req)
if rr.Code != http.StatusBadRequest {
t.Fatalf("expected %d, got %d", http.StatusBadRequest, rr.Code)
}
resp := decodeResponse(t, rr)
if resp["status"] != "ERROR" {
t.Fatalf("expected status ERROR, got %#v", resp)
}
}
func TestEncryptDataAcceptsPlaintextField(t *testing.T) {
h, secret := newEncryptTestHandler()
req := httptest.NewRequest(http.MethodPost, "/api/encrypt", strings.NewReader(`{"plaintext":"super-secret"}`))
rr := httptest.NewRecorder()
h.EncryptData(rr, req)
if rr.Code != http.StatusOK {
t.Fatalf("expected %d, got %d", http.StatusOK, rr.Code)
}
resp := decodeResponse(t, rr)
if resp["status"] != "OK" {
t.Fatalf("expected status OK, got %#v", resp)
}
if resp["ciphertext"] == "" || resp["prefixed"] == "" {
t.Fatalf("expected ciphertext+prefixed fields, got %#v", resp)
}
if !strings.HasPrefix(resp["prefixed"], encryptedValuePrefixV1) {
t.Fatalf("expected prefixed value with %q, got %q", encryptedValuePrefixV1, resp["prefixed"])
}
if !strings.EqualFold(resp["message"], resp["ciphertext"]) {
t.Fatalf("expected message to mirror ciphertext, got %#v", resp)
}
plain, err := secret.Decrypt(resp["ciphertext"])
if err != nil {
t.Fatalf("unable to decrypt ciphertext response: %v", err)
}
if string(plain) != "super-secret" {
t.Fatalf("unexpected decrypted value %q", string(plain))
}
}
func TestEncryptDataAcceptsLegacyValueField(t *testing.T) {
h, secret := newEncryptTestHandler()
body := bytes.NewBufferString(`{"value":"legacy-input"}`)
req := httptest.NewRequest(http.MethodPost, "/api/encrypt", body)
rr := httptest.NewRecorder()
h.EncryptData(rr, req)
if rr.Code != http.StatusOK {
t.Fatalf("expected %d, got %d", http.StatusOK, rr.Code)
}
resp := decodeResponse(t, rr)
cipherText := resp["ciphertext"]
if cipherText == "" {
t.Fatalf("expected ciphertext in response, got %#v", resp)
}
plain, err := secret.Decrypt(cipherText)
if err != nil {
t.Fatalf("unable to decrypt ciphertext response: %v", err)
}
if string(plain) != "legacy-input" {
t.Fatalf("unexpected decrypted value %q", string(plain))
}
}
func TestEncryptDataRejectsMissingPayloadValue(t *testing.T) {
h, _ := newEncryptTestHandler()
req := httptest.NewRequest(http.MethodPost, "/api/encrypt", strings.NewReader(`{}`))
rr := httptest.NewRecorder()
h.EncryptData(rr, req)
if rr.Code != http.StatusBadRequest {
t.Fatalf("expected %d, got %d", http.StatusBadRequest, rr.Code)
}
resp := decodeResponse(t, rr)
if resp["status"] != "ERROR" {
t.Fatalf("expected status ERROR, got %#v", resp)
}
}
+24
View File
@@ -0,0 +1,24 @@
package handler
import (
"fmt"
"net/http"
)
const legacyAPISetting = "settings.enable_legacy_api"
func (h *Handler) legacyAPIEnabled() bool {
if h == nil || h.Settings == nil || h.Settings.Values == nil {
return false
}
return h.Settings.Values.Settings.EnableLegacyAPI
}
func (h *Handler) denyLegacyAPI(w http.ResponseWriter, endpoint string) bool {
if h.legacyAPIEnabled() {
return false
}
h.Logger.Warn("legacy endpoint request blocked", "endpoint", endpoint, "setting", legacyAPISetting)
writeJSONError(w, http.StatusGone, fmt.Sprintf("%s is deprecated and disabled; set %s=true to temporarily re-enable", endpoint, legacyAPISetting))
return true
}
+115
View File
@@ -0,0 +1,115 @@
package handler
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"vctp/internal/settings"
)
func newLegacyGateHandler(enabled bool) *Handler {
cfg := &settings.Settings{Values: &settings.SettingsYML{}}
cfg.Values.Settings.EnableLegacyAPI = enabled
return &Handler{
Logger: newTestLogger(),
Settings: cfg,
}
}
func TestDenyLegacyAPIDisabledByDefault(t *testing.T) {
h := newLegacyGateHandler(false)
rr := httptest.NewRecorder()
denied := h.denyLegacyAPI(rr, "/api/event/vm/create")
if !denied {
t.Fatal("expected legacy API to be denied by default")
}
if rr.Code != http.StatusGone {
t.Fatalf("expected %d, got %d", http.StatusGone, rr.Code)
}
if !strings.Contains(rr.Body.String(), "deprecated") {
t.Fatalf("unexpected response body: %s", rr.Body.String())
}
}
func TestDenyLegacyAPIEnabledViaSettings(t *testing.T) {
h := newLegacyGateHandler(true)
rr := httptest.NewRecorder()
denied := h.denyLegacyAPI(rr, "/api/event/vm/create")
if denied {
t.Fatal("expected legacy API to be allowed when setting is enabled")
}
if rr.Body.Len() != 0 {
t.Fatalf("expected no response body write, got: %s", rr.Body.String())
}
}
func TestVmCreateEventHonorsLegacyGate(t *testing.T) {
t.Run("disabled", func(t *testing.T) {
h := newLegacyGateHandler(false)
req := httptest.NewRequest(http.MethodPost, "/api/event/vm/create", strings.NewReader("{invalid"))
rr := httptest.NewRecorder()
h.VmCreateEvent(rr, req)
if rr.Code != http.StatusGone {
t.Fatalf("expected %d, got %d", http.StatusGone, rr.Code)
}
})
t.Run("enabled", func(t *testing.T) {
h := newLegacyGateHandler(true)
req := httptest.NewRequest(http.MethodPost, "/api/event/vm/create", strings.NewReader("{invalid"))
rr := httptest.NewRecorder()
h.VmCreateEvent(rr, req)
if rr.Code != http.StatusBadRequest {
t.Fatalf("expected %d when gate is open, got %d", http.StatusBadRequest, rr.Code)
}
})
}
func TestLegacyInventoryEndpointsAreGatedWhenDisabled(t *testing.T) {
h := newLegacyGateHandler(false)
cases := []struct {
name string
method string
path string
body string
call func(*Handler, *httptest.ResponseRecorder, *http.Request)
}{
{
name: "import vm",
method: http.MethodPost,
path: "/api/import/vm",
body: `{"name":"vm1"}`,
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) { h.VmImport(rr, req) },
},
{
name: "cleanup vm",
method: http.MethodDelete,
path: "/api/inventory/vm/delete?vm_id=vm-1&datacenter_name=dc1",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) { h.VmCleanup(rr, req) },
},
{
name: "update vm details",
method: http.MethodPost,
path: "/api/inventory/vm/update",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) { h.VmUpdateDetails(rr, req) },
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest(tc.method, tc.path, strings.NewReader(tc.body))
rr := httptest.NewRecorder()
tc.call(h, rr, req)
if rr.Code != http.StatusGone {
t.Fatalf("expected %d, got %d", http.StatusGone, rr.Code)
}
if !strings.Contains(rr.Body.String(), "deprecated") {
t.Fatalf("expected deprecated response, got: %s", rr.Body.String())
}
})
}
}
+153
View File
@@ -0,0 +1,153 @@
package handler
import (
"context"
"net/http"
"net/http/httptest"
"strings"
"testing"
"vctp/server/models"
)
func TestMutatingHandlersRejectWrongMethod(t *testing.T) {
h := &Handler{Logger: newTestLogger()}
tests := []struct {
name string
path string
call func(*Handler, *httptest.ResponseRecorder, *http.Request)
}{
{
name: "auth login",
path: "/api/auth/login",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.AuthLogin(rr, req)
},
},
{
name: "snapshot force hourly",
path: "/api/snapshots/hourly/force",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.SnapshotForceHourly(rr, req)
},
},
{
name: "snapshot aggregate",
path: "/api/snapshots/aggregate",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.SnapshotAggregateForce(rr, req)
},
},
{
name: "snapshot migrate",
path: "/api/snapshots/migrate",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.SnapshotMigrate(rr, req)
},
},
{
name: "snapshot regenerate hourly",
path: "/api/snapshots/regenerate-hourly-reports",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.SnapshotRegenerateHourlyReports(rr, req)
},
},
{
name: "vm create event",
path: "/api/event/vm/create",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmCreateEvent(rr, req)
},
},
{
name: "vm modify event",
path: "/api/event/vm/modify",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmModifyEvent(rr, req)
},
},
{
name: "vm move event",
path: "/api/event/vm/move",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmMoveEvent(rr, req)
},
},
{
name: "vm delete event",
path: "/api/event/vm/delete",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmDeleteEvent(rr, req)
},
},
{
name: "vm import",
path: "/api/import/vm",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmImport(rr, req)
},
},
{
name: "vm update details",
path: "/api/inventory/vm/update",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmUpdateDetails(rr, req)
},
},
{
name: "vm cleanup",
path: "/api/inventory/vm/delete",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VmCleanup(rr, req)
},
},
{
name: "vcenter cleanup",
path: "/api/cleanup/vcenter",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.VcCleanup(rr, req)
},
},
{
name: "update cleanup",
path: "/api/cleanup/updates",
call: func(h *Handler, rr *httptest.ResponseRecorder, req *http.Request) {
h.UpdateCleanup(rr, req)
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, tc.path, strings.NewReader("{}"))
rr := httptest.NewRecorder()
tc.call(h, rr, req)
if rr.Code != http.StatusMethodNotAllowed {
t.Fatalf("expected %d, got %d", http.StatusMethodNotAllowed, rr.Code)
}
if !strings.Contains(rr.Body.String(), "method not allowed") {
t.Fatalf("expected method not allowed response, got: %s", rr.Body.String())
}
})
}
}
func TestVcenterLoginFailuresAreHandled(t *testing.T) {
h := &Handler{Logger: newTestLogger()}
event := models.CloudEventReceived{}
event.CloudEvent.Source = "https://invalid.local/sdk"
disk := h.calculateNewDiskSize(context.Background(), event)
if disk != 0 {
t.Fatalf("expected disk size 0 on login failure, got %f", disk)
}
id, err := h.AddVmToInventory(event, context.Background(), 0)
if err == nil {
t.Fatal("expected error on login failure")
}
if id != 0 {
t.Fatalf("expected id 0 on login failure, got %d", id)
}
}
+12 -20
View File
@@ -1,8 +1,6 @@
package handler
import (
"context"
"encoding/json"
"fmt"
"net/http"
"vctp/internal/report"
@@ -11,25 +9,22 @@ import (
// InventoryReportDownload returns the inventory report as an XLSX download.
// @Summary Download inventory report
// @Description Generates an inventory XLSX report and returns it as a file download.
// @Description Requires Bearer authentication with the viewer role (admin also allowed).
// @Tags reports
// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
// @Success 200 {file} file "Inventory XLSX report"
// @Failure 500 {object} map[string]string "Report generation failed"
// @Failure 500 {object} models.ErrorResponse "Report generation failed"
// @Security BearerAuth
// @Router /api/report/inventory [get]
func (h *Handler) InventoryReportDownload(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
ctx, cancel := withRequestTimeout(r, reportRequestTimeout)
defer cancel()
// Generate the XLSX report
reportData, err := report.CreateInventoryReport(h.Logger, h.Database, ctx)
if err != nil {
h.Logger.Error("Failed to create report", "error", err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Unable to create xlsx report: '%s'", err),
})
writeJSONError(w, http.StatusInternalServerError, fmt.Sprintf("Unable to create xlsx report: '%s'", err))
return
}
@@ -45,25 +40,22 @@ func (h *Handler) InventoryReportDownload(w http.ResponseWriter, r *http.Request
// UpdateReportDownload returns the updates report as an XLSX download.
// @Summary Download updates report
// @Description Generates an updates XLSX report and returns it as a file download.
// @Description Requires Bearer authentication with the viewer role (admin also allowed).
// @Tags reports
// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
// @Success 200 {file} file "Updates XLSX report"
// @Failure 500 {object} map[string]string "Report generation failed"
// @Failure 500 {object} models.ErrorResponse "Report generation failed"
// @Security BearerAuth
// @Router /api/report/updates [get]
func (h *Handler) UpdateReportDownload(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
ctx, cancel := withRequestTimeout(r, reportRequestTimeout)
defer cancel()
// Generate the XLSX report
reportData, err := report.CreateUpdatesReport(h.Logger, h.Database, ctx)
if err != nil {
h.Logger.Error("Failed to create report", "error", err)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Unable to create xlsx report: '%s'", err),
})
writeJSONError(w, http.StatusInternalServerError, fmt.Sprintf("Unable to create xlsx report: '%s'", err))
return
}
+46
View File
@@ -0,0 +1,46 @@
package handler
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
"time"
)
const (
defaultRequestTimeout = 2 * time.Minute
reportRequestTimeout = 10 * time.Minute
longRunningRequestTimeout = 2 * time.Hour
defaultJSONBodyLimitBytes = 1 << 20 // 1 MiB
)
func withRequestTimeout(r *http.Request, timeout time.Duration) (context.Context, context.CancelFunc) {
base := context.Background()
if r != nil {
base = r.Context()
}
if timeout <= 0 {
return base, func() {}
}
return context.WithTimeout(base, timeout)
}
func decodeJSONBody(w http.ResponseWriter, r *http.Request, dst any) error {
if r == nil || r.Body == nil {
return errors.New("request body is required")
}
decoder := json.NewDecoder(http.MaxBytesReader(w, r.Body, defaultJSONBodyLimitBytes))
if err := decoder.Decode(dst); err != nil {
return err
}
var trailing any
if err := decoder.Decode(&trailing); err != io.EOF {
if err == nil {
return errors.New("request body must contain only one JSON object")
}
return err
}
return nil
}
+41
View File
@@ -0,0 +1,41 @@
package handler
import (
"encoding/json"
"net/http"
"vctp/server/models"
)
func writeJSON(w http.ResponseWriter, statusCode int, payload any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
_ = json.NewEncoder(w).Encode(payload)
}
func writeJSONStatus(w http.ResponseWriter, statusCode int, status string) {
writeJSON(w, statusCode, models.StatusResponse{
Status: status,
})
}
func writeJSONStatusMessage(w http.ResponseWriter, statusCode int, status, message string) {
writeJSON(w, statusCode, models.StatusMessageResponse{
Status: status,
Message: message,
})
}
func writeJSONOK(w http.ResponseWriter) {
writeJSONStatus(w, http.StatusOK, "OK")
}
func writeJSONOKMessage(w http.ResponseWriter, message string) {
writeJSONStatusMessage(w, http.StatusOK, "OK", message)
}
func writeJSONError(w http.ResponseWriter, statusCode int, message string) {
writeJSON(w, statusCode, models.ErrorResponse{
Status: "ERROR",
Message: message,
})
}
+36 -22
View File
@@ -1,28 +1,36 @@
package handler
import (
"context"
"encoding/json"
"net/http"
"strings"
"time"
"vctp/internal/settings"
"vctp/internal/tasks"
)
// SnapshotAggregateForce forces regeneration of a daily or monthly summary table.
// @Summary Force snapshot aggregation
// @Description Forces regeneration of a daily or monthly summary table for a specified date or month.
// @Description Requires Bearer authentication with the admin role.
// @Tags snapshots
// @Produce json
// @Param type query string true "Aggregation type: daily or monthly"
// @Param date query string true "Daily date (YYYY-MM-DD) or monthly date (YYYY-MM)"
// @Success 200 {object} map[string]string "Aggregation complete"
// @Failure 400 {object} map[string]string "Invalid request"
// @Failure 500 {object} map[string]string "Server error"
// @Param granularity query string false "Monthly aggregation granularity: hourly or daily"
// @Success 200 {object} models.StatusResponse "Aggregation complete"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/snapshots/aggregate [post]
func (h *Handler) SnapshotAggregateForce(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
snapshotType := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("type")))
dateValue := strings.TrimSpace(r.URL.Query().Get("date"))
granularity := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("granularity")))
startedAt := time.Now()
loc := time.Now().Location()
@@ -35,11 +43,29 @@ func (h *Handler) SnapshotAggregateForce(w http.ResponseWriter, r *http.Request)
return
}
ctx := context.Background()
if granularity != "" && snapshotType != "monthly" {
h.Logger.Debug("Snapshot aggregation ignoring granularity for non-monthly request",
"type", snapshotType,
"granularity", granularity,
)
granularity = ""
}
if snapshotType == "monthly" && granularity != "" && granularity != "hourly" && granularity != "daily" {
h.Logger.Warn("Snapshot aggregation invalid granularity", "granularity", granularity)
writeJSONError(w, http.StatusBadRequest, "granularity must be hourly or daily")
return
}
ctx, cancel := withRequestTimeout(r, longRunningRequestTimeout)
defer cancel()
settingsCopy := *h.Settings.Values
if granularity != "" {
settingsCopy.Settings.MonthlyAggregationGranularity = granularity
}
ct := &tasks.CronTask{
Logger: h.Logger,
Database: h.Database,
Settings: h.Settings,
Settings: &settings.Settings{Logger: h.Logger, SettingsPath: h.Settings.SettingsPath, Values: &settingsCopy},
}
switch snapshotType {
@@ -63,7 +89,7 @@ func (h *Handler) SnapshotAggregateForce(w http.ResponseWriter, r *http.Request)
writeJSONError(w, http.StatusBadRequest, "date must be YYYY-MM")
return
}
h.Logger.Info("Starting monthly snapshot aggregation", "date", parsed.Format("2006-01"), "force", true)
h.Logger.Info("Starting monthly snapshot aggregation", "date", parsed.Format("2006-01"), "force", true, "granularity", granularity)
if err := ct.AggregateMonthlySummary(ctx, parsed, true); err != nil {
h.Logger.Error("Monthly snapshot aggregation failed", "date", parsed.Format("2006-01"), "error", err)
writeJSONError(w, http.StatusInternalServerError, err.Error())
@@ -78,20 +104,8 @@ func (h *Handler) SnapshotAggregateForce(w http.ResponseWriter, r *http.Request)
h.Logger.Info("Snapshot aggregation completed",
"type", snapshotType,
"date", dateValue,
"granularity", granularity,
"duration", time.Since(startedAt),
)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
})
}
func writeJSONError(w http.ResponseWriter, status int, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": message,
})
writeJSONOK(w)
}
+14 -11
View File
@@ -1,8 +1,6 @@
package handler
import (
"context"
"encoding/json"
"net/http"
"strings"
"time"
@@ -12,22 +10,30 @@ import (
// SnapshotForceHourly triggers an on-demand hourly snapshot run.
// @Summary Trigger hourly snapshot (manual)
// @Description Manually trigger an hourly snapshot for all configured vCenters. Requires confirmation text to avoid accidental execution.
// @Description Requires Bearer authentication with the admin role.
// @Tags snapshots
// @Accept json
// @Produce json
// @Param confirm query string true "Confirmation text; must be 'FORCE'"
// @Success 200 {object} map[string]string "Snapshot started"
// @Failure 400 {object} map[string]string "Invalid request"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.StatusResponse "Snapshot started"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/snapshots/hourly/force [post]
func (h *Handler) SnapshotForceHourly(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
confirm := strings.TrimSpace(r.URL.Query().Get("confirm"))
if strings.ToUpper(confirm) != "FORCE" {
writeJSONError(w, http.StatusBadRequest, "confirm must be 'FORCE'")
return
}
ctx := context.Background()
ctx, cancel := withRequestTimeout(r, longRunningRequestTimeout)
defer cancel()
ct := &tasks.CronTask{
Logger: h.Logger,
Database: h.Database,
@@ -37,15 +43,12 @@ func (h *Handler) SnapshotForceHourly(w http.ResponseWriter, r *http.Request) {
started := time.Now()
h.Logger.Info("Manual hourly snapshot requested")
if err := ct.RunVcenterSnapshotHourly(ctx, h.Logger.With("manual", true)); err != nil {
if err := ct.RunVcenterSnapshotHourly(ctx, h.Logger.With("manual", true), true); err != nil {
h.Logger.Error("Manual hourly snapshot failed", "error", err)
writeJSONError(w, http.StatusInternalServerError, err.Error())
return
}
h.Logger.Info("Manual hourly snapshot completed", "duration", time.Since(started))
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
})
writeJSONOK(w)
}
+31 -16
View File
@@ -1,38 +1,53 @@
package handler
import (
"context"
"encoding/json"
"net/http"
"vctp/internal/report"
"vctp/server/models"
)
// SnapshotMigrate rebuilds the snapshot registry and normalizes hourly table names.
// @Summary Migrate snapshot registry
// @Description Rebuilds the snapshot registry from existing tables and renames hourly tables to epoch-based names.
// @Description Requires Bearer authentication with the admin role.
// @Tags snapshots
// @Produce json
// @Success 200 {object} map[string]interface{} "Migration results"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.SnapshotMigrationResponse "Migration results"
// @Failure 500 {object} models.SnapshotMigrationResponse "Server error"
// @Security BearerAuth
// @Router /api/snapshots/migrate [post]
func (h *Handler) SnapshotMigrate(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
ctx, cancel := withRequestTimeout(r, reportRequestTimeout)
defer cancel()
stats, err := report.MigrateSnapshotRegistry(ctx, h.Database)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]interface{}{
"status": "ERROR",
"error": err.Error(),
"stats": stats,
writeJSON(w, http.StatusInternalServerError, models.SnapshotMigrationResponse{
Status: "ERROR",
Error: err.Error(),
Stats: models.SnapshotMigrationStats{
HourlyRenamed: stats.HourlyRenamed,
HourlyRegistered: stats.HourlyRegistered,
DailyRegistered: stats.DailyRegistered,
MonthlyRegistered: stats.MonthlyRegistered,
Errors: stats.Errors,
},
})
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]interface{}{
"status": "OK",
"stats": stats,
writeJSON(w, http.StatusOK, models.SnapshotMigrationResponse{
Status: "OK",
Stats: models.SnapshotMigrationStats{
HourlyRenamed: stats.HourlyRenamed,
HourlyRegistered: stats.HourlyRegistered,
DailyRegistered: stats.DailyRegistered,
MonthlyRegistered: stats.MonthlyRegistered,
Errors: stats.Errors,
},
})
}
+20 -14
View File
@@ -1,24 +1,31 @@
package handler
import (
"encoding/json"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"vctp/internal/report"
"vctp/server/models"
)
// SnapshotRegenerateHourlyReports regenerates missing hourly snapshot XLSX reports on disk.
// @Summary Regenerate hourly snapshot reports
// @Description Regenerates XLSX reports for hourly snapshots when the report files are missing or empty.
// @Description Requires Bearer authentication with the admin role.
// @Tags snapshots
// @Produce json
// @Success 200 {object} map[string]interface{} "Regeneration summary"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.SnapshotRegenerateReportsResponse "Regeneration summary"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/snapshots/regenerate-hourly-reports [post]
func (h *Handler) SnapshotRegenerateHourlyReports(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
ctx := r.Context()
reportsDir := strings.TrimSpace(h.Settings.Values.Settings.ReportsDir)
if reportsDir == "" {
@@ -46,7 +53,7 @@ func (h *Handler) SnapshotRegenerateHourlyReports(w http.ResponseWriter, r *http
skipped++
continue
}
if _, err := report.SaveTableReport(h.Logger, h.Database, ctx, rec.TableName, reportsDir); err != nil {
if _, err := report.SaveTableReport(h.Logger, h.Database, ctx, rec.TableName, reportsDir, h.Settings); err != nil {
errors++
h.Logger.Warn("failed to regenerate hourly report", "table", rec.TableName, "error", err)
continue
@@ -54,15 +61,14 @@ func (h *Handler) SnapshotRegenerateHourlyReports(w http.ResponseWriter, r *http
regenerated++
}
resp := map[string]interface{}{
"status": "OK",
"total": len(records),
"regenerated": regenerated,
"skipped": skipped,
"errors": errors,
"reports_dir": reportsDir,
"snapshotType": "hourly",
resp := models.SnapshotRegenerateReportsResponse{
Status: "OK",
Total: len(records),
Regenerated: regenerated,
Skipped: skipped,
Errors: errors,
ReportsDir: reportsDir,
SnapshotType: "hourly",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
writeJSON(w, http.StatusOK, resp)
}
+219
View File
@@ -0,0 +1,219 @@
package handler
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"vctp/db"
"vctp/internal/report"
"vctp/server/models"
)
// SnapshotRepair scans existing daily summaries and backfills missing SnapshotTime and lifecycle fields.
// @Summary Repair daily summaries
// @Description Backfills SnapshotTime and lifecycle info for existing daily summary tables and reruns monthly lifecycle refinement using hourly data.
// @Description Requires Bearer authentication with the admin role.
// @Tags snapshots
// @Produce json
// @Success 200 {object} models.SnapshotRepairResponse
// @Security BearerAuth
// @Router /api/snapshots/repair [post]
func (h *Handler) SnapshotRepair(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
h.Logger.Info("snapshot repair started", "scope", "daily")
repaired, failed := h.repairDailySummaries(r.Context(), time.Now())
h.Logger.Info("snapshot repair finished", "daily_repaired", repaired, "daily_failed", failed)
writeJSON(w, http.StatusOK, models.SnapshotRepairResponse{
Status: "OK",
Repaired: strconv.Itoa(repaired),
Failed: strconv.Itoa(failed),
})
}
func (h *Handler) repairDailySummaries(ctx context.Context, now time.Time) (repaired int, failed int) {
dbConn := h.Database.DB()
dailyRecs, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "daily", "inventory_daily_summary_", "20060102", time.Time{}, now)
if err != nil {
h.Logger.Warn("failed to list daily summaries", "error", err)
return 0, 1
}
for _, rec := range dailyRecs {
h.Logger.Debug("repair daily summary table", "table", rec.TableName, "snapshot_time", rec.SnapshotTime)
dayStart := rec.SnapshotTime
dayEnd := dayStart.Add(24 * time.Hour)
if err := db.EnsureSummaryTable(ctx, dbConn, rec.TableName); err != nil {
h.Logger.Warn("ensure summary table failed", "table", rec.TableName, "error", err)
failed++
continue
}
hourlyRecs, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "hourly", "inventory_hourly_", "epoch", dayStart, dayEnd)
if err != nil || len(hourlyRecs) == 0 {
h.Logger.Warn("no hourly snapshots for repair window", "table", rec.TableName, "error", err)
failed++
continue
}
cols := []string{
`"InventoryId"`, `"Name"`, `"Vcenter"`, `"VmId"`, `"EventKey"`, `"CloudId"`, `"CreationTime"`,
`"DeletionTime"`, `"ResourcePool"`, `"Datacenter"`, `"Cluster"`, `"Folder"`,
`"ProvisionedDisk"`, `"VcpuCount"`, `"RamGB"`, `"IsTemplate"`, `"PoweredOn"`,
`"SrmPlaceholder"`, `"VmUuid"`, `"SnapshotTime"`,
}
union, err := buildUnionFromRecords(hourlyRecs, cols, `COALESCE(CAST("IsTemplate" AS TEXT), '') NOT IN ('TRUE','true','1')`)
if err != nil {
h.Logger.Warn("failed to build union for repair", "table", rec.TableName, "error", err)
failed++
continue
}
h.Logger.Debug("built hourly union for repair", "table", rec.TableName, "hourly_tables", len(hourlyRecs))
if err := db.BackfillSnapshotTimeFromUnion(ctx, dbConn, rec.TableName, union); err != nil {
h.Logger.Warn("failed to backfill snapshot time", "table", rec.TableName, "error", err)
failed++
continue
}
h.Logger.Debug("snapshot time backfill complete", "table", rec.TableName)
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, rec.TableName, union); err != nil {
h.Logger.Warn("failed to refine lifecycle during repair", "table", rec.TableName, "error", err)
failed++
continue
}
h.Logger.Debug("lifecycle refinement complete", "table", rec.TableName)
h.Logger.Info("repair applied", "table", rec.TableName, "actions", "snapshot_time+lifecycle")
repaired++
}
return repaired, failed
}
// SnapshotRepairSuite runs a sequence of repair routines to fix older deployments in one call.
// It rebuilds the snapshot registry, syncs vcenter totals, repairs daily summaries, and refines monthly lifecycle data.
// @Summary Run full snapshot repair suite
// @Description Rebuilds snapshot registry, backfills per-vCenter totals, repairs daily summaries (SnapshotTime/lifecycle), and refines monthly lifecycle.
// @Description Requires Bearer authentication with the admin role.
// @Tags snapshots
// @Produce json
// @Success 200 {object} models.SnapshotRepairSuiteResponse
// @Security BearerAuth
// @Router /api/snapshots/repair/all [post]
func (h *Handler) SnapshotRepairSuite(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
ctx := r.Context()
dbConn := h.Database.DB()
// Step 1: rebuild snapshot registry from existing tables.
h.Logger.Info("repair suite step", "step", "snapshot_registry")
if stats, err := report.MigrateSnapshotRegistry(ctx, h.Database); err != nil {
h.Logger.Warn("snapshot registry migration failed", "error", err)
} else {
h.Logger.Info("snapshot registry migration complete", "hourly_renamed", stats.HourlyRenamed, "daily_registered", stats.DailyRegistered, "monthly_registered", stats.MonthlyRegistered, "errors", stats.Errors)
}
// Step 2: backfill vcenter_totals from registry hourly tables.
h.Logger.Info("repair suite step", "step", "vcenter_totals")
if err := db.SyncVcenterTotalsFromSnapshots(ctx, dbConn); err != nil {
h.Logger.Warn("sync vcenter totals failed", "error", err)
}
// Step 3: repair daily summaries (snapshot time + lifecycle).
h.Logger.Info("repair suite step", "step", "daily_summaries")
dailyRepaired, dailyFailed := h.repairDailySummaries(ctx, time.Now())
// Step 4: refine monthly lifecycle using daily summaries (requires SnapshotTime now present after step 3).
h.Logger.Info("repair suite step", "step", "monthly_refine")
monthlyRefined, monthlyFailed := h.refineMonthlyFromDaily(ctx, time.Now())
writeJSON(w, http.StatusOK, models.SnapshotRepairSuiteResponse{
Status: "OK",
DailyRepaired: strconv.Itoa(dailyRepaired),
DailyFailed: strconv.Itoa(dailyFailed),
MonthlyRefined: strconv.Itoa(monthlyRefined),
MonthlyFailed: strconv.Itoa(monthlyFailed),
})
}
func (h *Handler) refineMonthlyFromDaily(ctx context.Context, now time.Time) (refined int, failed int) {
dbConn := h.Database.DB()
dailyRecs, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "daily", "inventory_daily_summary_", "20060102", time.Time{}, now)
if err != nil {
h.Logger.Warn("failed to list daily summaries for monthly refine", "error", err)
return 0, 1
}
// Group daily tables by month (YYYYMM).
grouped := make(map[string][]report.SnapshotRecord)
for _, rec := range dailyRecs {
key := rec.SnapshotTime.Format("200601")
grouped[key] = append(grouped[key], rec)
}
cols := []string{
`"InventoryId"`, `"Name"`, `"Vcenter"`, `"VmId"`, `"EventKey"`, `"CloudId"`, `"CreationTime"`,
`"DeletionTime"`, `"ResourcePool"`, `"Datacenter"`, `"Cluster"`, `"Folder"`,
`"ProvisionedDisk"`, `"VcpuCount"`, `"RamGB"`, `"IsTemplate"`, `"PoweredOn"`,
`"SrmPlaceholder"`, `"VmUuid"`, `"SnapshotTime"`,
}
for monthKey, recs := range grouped {
summaryTable := fmt.Sprintf("inventory_monthly_summary_%s", monthKey)
h.Logger.Debug("monthly refine", "table", summaryTable, "daily_tables", len(recs))
if err := db.EnsureSummaryTable(ctx, dbConn, summaryTable); err != nil {
h.Logger.Warn("ensure monthly summary failed", "table", summaryTable, "error", err)
failed++
continue
}
union, err := buildUnionFromRecords(recs, cols, `COALESCE(CAST("IsTemplate" AS TEXT), '') NOT IN ('TRUE','true','1')`)
if err != nil {
h.Logger.Warn("failed to build union for monthly refine", "table", summaryTable, "error", err)
failed++
continue
}
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, summaryTable, union); err != nil {
h.Logger.Warn("failed to refine monthly lifecycle", "table", summaryTable, "error", err)
failed++
continue
}
h.Logger.Debug("monthly refine applied", "table", summaryTable)
refined++
}
return refined, failed
}
func buildUnionFromRecords(recs []report.SnapshotRecord, columns []string, where string) (string, error) {
if len(recs) == 0 {
return "", fmt.Errorf("no tables provided for union")
}
colList := strings.Join(columns, ", ")
parts := make([]string, 0, len(recs))
for _, rec := range recs {
if err := db.ValidateTableName(rec.TableName); err != nil {
continue
}
q := fmt.Sprintf(`SELECT %s FROM %s`, colList, rec.TableName)
if where != "" {
q = q + " WHERE " + where
}
parts = append(parts, q)
}
if len(parts) == 0 {
return "", fmt.Errorf("no valid tables for union")
}
return strings.Join(parts, "\nUNION ALL\n"), nil
}
+13 -20
View File
@@ -1,8 +1,6 @@
package handler
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
@@ -51,35 +49,28 @@ func (h *Handler) SnapshotMonthlyList(w http.ResponseWriter, r *http.Request) {
// SnapshotReportDownload streams a snapshot table as XLSX.
// @Summary Download snapshot report
// @Description Downloads a snapshot table as an XLSX file.
// @Description Requires Bearer authentication with the viewer role (admin also allowed).
// @Tags snapshots
// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
// @Param table query string true "Snapshot table name"
// @Success 200 {file} file "Snapshot XLSX report"
// @Failure 400 {object} map[string]string "Invalid request"
// @Failure 500 {object} map[string]string "Server error"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/report/snapshot [get]
func (h *Handler) SnapshotReportDownload(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
ctx, cancel := withRequestTimeout(r, reportRequestTimeout)
defer cancel()
tableName := r.URL.Query().Get("table")
if tableName == "" {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": "Missing table parameter",
})
writeJSONError(w, http.StatusBadRequest, "Missing table parameter")
return
}
reportData, err := report.CreateTableReport(h.Logger, h.Database, ctx, tableName)
reportData, err := report.CreateTableReport(h.Logger, h.Database, ctx, tableName, h.Settings)
if err != nil {
h.Logger.Error("Failed to create snapshot report", "error", err, "table", tableName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Unable to create snapshot report: '%s'", err),
})
writeJSONError(w, http.StatusInternalServerError, fmt.Sprintf("Unable to create snapshot report: '%s'", err))
return
}
@@ -91,7 +82,8 @@ func (h *Handler) SnapshotReportDownload(w http.ResponseWriter, r *http.Request)
}
func (h *Handler) renderSnapshotList(w http.ResponseWriter, r *http.Request, snapshotType string, title string, renderer func([]views.SnapshotEntry) templ.Component) {
ctx := context.Background()
ctx, cancel := withRequestTimeout(r, defaultRequestTimeout)
defer cancel()
if err := report.EnsureSnapshotRegistry(ctx, h.Database); err != nil {
h.Logger.Error("Failed to ensure snapshot registry", "error", err)
w.WriteHeader(http.StatusInternalServerError)
@@ -118,10 +110,11 @@ func (h *Handler) renderSnapshotList(w http.ResponseWriter, r *http.Request, sna
case "monthly":
group = record.SnapshotTime.Format("2006")
}
count := max(record.SnapshotCount, 0)
entries = append(entries, views.SnapshotEntry{
Label: label,
Link: "/reports/" + url.PathEscape(record.TableName) + ".xlsx",
Count: record.SnapshotCount,
Count: count,
Group: group,
})
}
+10
View File
@@ -0,0 +1,10 @@
package handler
import (
"io"
"log/slog"
)
func newTestLogger() *slog.Logger {
return slog.New(slog.NewTextHandler(io.Discard, nil))
}
+18 -9
View File
@@ -1,7 +1,6 @@
package handler
import (
"context"
"fmt"
"net/http"
)
@@ -9,13 +8,26 @@ import (
// UpdateCleanup removes orphaned update records.
// @Summary Cleanup updates (deprecated)
// @Description Deprecated: Removes update records that are no longer associated with a VM.
// @Description Requires Bearer authentication with the admin role.
// @Tags maintenance
// @Deprecated
// @Produce text/plain
// @Success 200 {string} string "Cleanup completed"
// @Failure 500 {string} string "Server error"
// @Security BearerAuth
// @Router /api/cleanup/updates [delete]
func (h *Handler) UpdateCleanup(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodDelete {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
if h.denyLegacyAPI(w, "/api/cleanup/updates") {
return
}
ctx, cancel := withRequestTimeout(r, defaultRequestTimeout)
defer cancel()
/*
// Get the current time
now := time.Now()
@@ -31,20 +43,17 @@ func (h *Handler) UpdateCleanup(w http.ResponseWriter, r *http.Request) {
}
h.Logger.Debug("database params", "params", params)
err := h.Database.Queries().CleanupUpdates(context.Background(), params)
err := h.Database.Queries().CleanupUpdates(ctx, params)
*/
//err := h.Database.Queries().InventoryCleanupTemplates(context.Background())
err := h.Database.Queries().CleanupUpdatesNullVm(context.Background())
//err := h.Database.Queries().InventoryCleanupTemplates(ctx)
err := h.Database.Queries().CleanupUpdatesNullVm(ctx)
if err != nil {
h.Logger.Error("Error received cleaning updates table", "error", err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Delete Request unsuccessful %s\n", err)
writeJSONError(w, http.StatusInternalServerError, fmt.Sprintf("Delete Request unsuccessful %s", err))
} else {
h.Logger.Debug("Processed update cleanup successfully")
w.WriteHeader(http.StatusOK)
// TODO - return some JSON
fmt.Fprintf(w, "Processed update cleanup successfully")
writeJSONOKMessage(w, "Processed update cleanup successfully")
}
}
+18 -35
View File
@@ -1,9 +1,7 @@
package handler
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
@@ -16,11 +14,21 @@ import (
// @Deprecated
// @Produce json
// @Param vc_url query string true "vCenter URL"
// @Success 200 {object} map[string]string "Cleanup completed"
// @Failure 400 {object} map[string]string "Invalid request"
// @Success 200 {object} models.StatusMessageResponse "Cleanup completed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Router /api/cleanup/vcenter [delete]
func (h *Handler) VcCleanup(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
if r.Method != http.MethodDelete {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
if h.denyLegacyAPI(w, "/api/cleanup/vcenter") {
return
}
ctx, cancel := withRequestTimeout(r, defaultRequestTimeout)
defer cancel()
// Get the parameters
vcUrl := r.URL.Query().Get("vc_url")
@@ -31,21 +39,11 @@ func (h *Handler) VcCleanup(w http.ResponseWriter, r *http.Request) {
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
h.Logger.Error("No VMs found for vcenter", "url", vcUrl)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("No match to vcenter details specified. vc_url: '%s'", vcUrl),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("No match to vcenter details specified. vc_url: '%s'", vcUrl))
return
} else {
h.Logger.Error("Error checking for vcenter to cleanup", "error", err, "url", vcUrl)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Error checking for vcenter to cleanup. error: '%s'", err),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Error checking for vcenter to cleanup. error: '%s'", err))
return
}
} else {
@@ -53,33 +51,18 @@ func (h *Handler) VcCleanup(w http.ResponseWriter, r *http.Request) {
err = h.Database.Queries().InventoryCleanupVcenter(ctx, vcUrl)
if err != nil {
h.Logger.Error("Error cleaning up VMs from Inventory table", "error", err, "url", vcUrl)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Error cleaning up VMs from Inventory table. error: '%s'", err),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Error cleaning up VMs from Inventory table. error: '%s'", err))
return
} else {
// Successful cleanup
h.Logger.Debug("VMs successfully removed from inventory for vcenter", "url", vcUrl)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
"message": fmt.Sprintf("Removed VMs from Inventory table for vcenter '%s'", vcUrl),
})
writeJSONOKMessage(w, fmt.Sprintf("Removed VMs from Inventory table for vcenter '%s'", vcUrl))
return
}
}
} else {
h.Logger.Error("Parameters not correctly specified", "url", vcUrl)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Parameters not correctly specified. vc_url: '%s'", vcUrl),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Parameters not correctly specified. vc_url: '%s'", vcUrl))
return
}
}
+164
View File
@@ -0,0 +1,164 @@
package handler
import (
"context"
"fmt"
"math"
"net/http"
"strings"
"time"
"vctp/db"
"vctp/internal/vcenter"
"vctp/server/models"
)
// VcenterCacheRebuild force-regenerates cached vCenter reference data in the database.
// @Summary Rebuild vCenter object cache
// @Description Rebuilds cached folder/resource-pool/host(cluster+datacenter) references from vCenter and rewrites the database cache tables.
// @Description Requires Bearer authentication with the admin role.
// @Tags vcenters
// @Produce json
// @Param vcenter query string false "Optional single vCenter URL to rebuild; defaults to all configured vCenters"
// @Success 200 {object} models.VcenterCacheRebuildResponse "Cache rebuild summary"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 405 {object} models.ErrorResponse "Method not allowed"
// @Failure 500 {object} models.VcenterCacheRebuildResponse "All rebuild attempts failed"
// @Security BearerAuth
// @Router /api/vcenters/cache/rebuild [post]
func (h *Handler) VcenterCacheRebuild(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
ctx := r.Context()
h.Settings.ReadYMLSettings()
requested := strings.TrimSpace(r.URL.Query().Get("vcenter"))
configured := h.Settings.Values.Settings.VcenterAddresses
targets := make([]string, 0, len(configured))
for _, raw := range configured {
vcURL := strings.TrimSpace(raw)
if vcURL == "" {
continue
}
if requested != "" && vcURL != requested {
continue
}
targets = append(targets, vcURL)
}
if requested != "" && len(targets) == 0 {
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("requested vcenter is not configured: %s", requested))
return
}
if len(targets) == 0 {
writeJSONError(w, http.StatusBadRequest, "no vcenter addresses configured")
return
}
if err := db.EnsureVcenterReferenceCacheTables(ctx, h.Database.DB()); err != nil {
h.Logger.Error("failed to ensure vcenter cache tables", "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to ensure vcenter cache tables")
return
}
resp := models.VcenterCacheRebuildResponse{
Status: "OK",
Total: len(targets),
Results: make([]models.VcenterCacheRebuildResult, 0, len(targets)),
}
for _, vcURL := range targets {
start := time.Now()
result := models.VcenterCacheRebuildResult{
Vcenter: vcURL,
}
folderEntries, poolEntries, hostEntries, err := h.rebuildOneVcenterCache(ctx, vcURL)
result.DurationSeconds = math.Round(time.Since(start).Seconds()*1000) / 1000
if err != nil {
result.Error = err.Error()
resp.Failed++
h.Logger.Warn("vcenter cache rebuild failed", "vcenter", vcURL, "error", err)
} else {
result.FolderEntries = folderEntries
result.ResourcePoolEntries = poolEntries
result.HostEntries = hostEntries
resp.Succeeded++
h.Logger.Info("vcenter cache rebuild completed", "vcenter", vcURL, "folder_entries", folderEntries, "resource_pool_entries", poolEntries, "host_entries", hostEntries, "duration", time.Since(start))
}
resp.Results = append(resp.Results, result)
}
switch {
case resp.Failed == 0:
resp.Status = "OK"
writeJSON(w, http.StatusOK, resp)
case resp.Succeeded == 0:
resp.Status = "ERROR"
writeJSON(w, http.StatusInternalServerError, resp)
default:
resp.Status = "PARTIAL"
writeJSON(w, http.StatusOK, resp)
}
}
func (h *Handler) rebuildOneVcenterCache(ctx context.Context, vcURL string) (int, int, int, error) {
vc := vcenter.New(h.Logger, h.VcCreds)
if err := vc.Login(vcURL); err != nil {
return 0, 0, 0, fmt.Errorf("unable to connect to vcenter: %w", err)
}
defer func() {
logoutCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 5*time.Second)
defer cancel()
if err := vc.Logout(logoutCtx); err != nil {
h.Logger.Warn("vcenter cache rebuild logout failed", "vcenter", vcURL, "error", err)
}
}()
folderLookup, err := vc.BuildFolderPathLookup()
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to build folder cache from vcenter: %w", err)
}
resourcePools, err := vc.BuildResourcePoolLookup()
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to build resource pool cache from vcenter: %w", err)
}
hostLookup, err := vc.BuildHostLookup()
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to build host cache from vcenter: %w", err)
}
dbConn := h.Database.DB()
if err := db.ClearVcenterReferenceCache(ctx, dbConn, vcURL); err != nil {
return 0, 0, 0, fmt.Errorf("failed to clear existing vcenter cache rows: %w", err)
}
lastSeen := time.Now().Unix()
folderCount := 0
for folderRef, folderPath := range folderLookup {
if err := db.UpsertVcenterFolderCache(ctx, dbConn, vcURL, folderRef, folderPath, lastSeen); err != nil {
return 0, 0, 0, fmt.Errorf("failed to persist folder cache: %w", err)
}
folderCount++
}
poolCount := 0
for poolRef, poolName := range resourcePools {
if err := db.UpsertVcenterResourcePoolCache(ctx, dbConn, vcURL, poolRef, poolName, lastSeen); err != nil {
return 0, 0, 0, fmt.Errorf("failed to persist resource pool cache: %w", err)
}
poolCount++
}
hostCount := 0
for hostRef, entry := range hostLookup {
if err := db.UpsertVcenterHostCache(ctx, dbConn, vcURL, hostRef, entry.Cluster, entry.Datacenter, lastSeen); err != nil {
return 0, 0, 0, fmt.Errorf("failed to persist host cache: %w", err)
}
hostCount++
}
return folderCount, poolCount, hostCount, nil
}
+163 -130
View File
@@ -1,6 +1,7 @@
package handler
import (
"context"
"fmt"
"net/http"
"net/url"
@@ -11,18 +12,21 @@ import (
"vctp/db"
)
const (
vcenterHourlyDetailWindowDays = 45
vcenterDailyDefaultLimit = 400
vcenterMonthlyDefaultLimit = 200
)
// VcenterList renders a list of vCenters being monitored.
// @Summary List vCenters
// @Description Lists all vCenters with recorded snapshot totals.
// @Description Lists all vCenters with recorded snapshot totals, linking to the fast daily aggregated totals page.
// @Tags vcenters
// @Produce text/html
// @Success 200 {string} string "HTML page"
// @Router /vcenters [get]
func (h *Handler) VcenterList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if err := db.SyncVcenterTotalsFromSnapshots(ctx, h.Database.DB()); err != nil {
h.Logger.Warn("failed to sync vcenter totals", "error", err)
}
vcs, err := db.ListVcenters(ctx, h.Database.DB())
if err != nil {
http.Error(w, fmt.Sprintf("failed to list vcenters: %v", err), http.StatusInternalServerError)
@@ -32,7 +36,7 @@ func (h *Handler) VcenterList(w http.ResponseWriter, r *http.Request) {
for _, vc := range vcs {
links = append(links, views.VcenterLink{
Name: vc,
Link: "/vcenters/totals?vcenter=" + url.QueryEscape(vc),
Link: "/vcenters/totals/daily?vcenter=" + url.QueryEscape(vc),
})
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
@@ -41,49 +45,117 @@ func (h *Handler) VcenterList(w http.ResponseWriter, r *http.Request) {
}
}
// VcenterTotals renders totals for a vCenter.
// VcenterTotals keeps backward compatibility with the original endpoint and routes to the new pages.
// @Summary vCenter totals
// @Description Shows per-snapshot totals for a vCenter.
// @Description Redirect-style handler for compatibility; use /vcenters/totals/daily or /vcenters/totals/hourly.
// @Tags vcenters
// @Produce text/html
// @Param vcenter query string true "vCenter URL"
// @Param type query string false "hourly|daily|monthly (default: hourly)"
// @Param limit query int false "Limit results (default 200)"
// @Param type query string false "hourly|daily|monthly"
// @Param limit query int false "Limit results"
// @Success 200 {string} string "HTML page"
// @Failure 400 {string} string "Missing vcenter"
// @Router /vcenters/totals [get]
func (h *Handler) VcenterTotals(w http.ResponseWriter, r *http.Request) {
switch strings.ToLower(strings.TrimSpace(r.URL.Query().Get("type"))) {
case "hourly", "hourly-detail", "detail", "detailed":
h.VcenterTotalsHourlyDetailed(w, r)
return
case "monthly":
h.vcenterTotalsLegacyMonthly(w, r)
return
default:
h.VcenterTotalsDaily(w, r)
return
}
}
// VcenterTotalsDaily renders the daily-aggregation totals page for one vCenter.
// @Summary vCenter daily totals
// @Description Shows daily aggregated VM count/vCPU/RAM totals for a vCenter (cache-backed for fast loading).
// @Tags vcenters
// @Produce text/html
// @Param vcenter query string true "vCenter URL"
// @Param limit query int false "Limit results (default 400)"
// @Success 200 {string} string "HTML page"
// @Failure 400 {string} string "Missing vcenter"
// @Router /vcenters/totals/daily [get]
func (h *Handler) VcenterTotalsDaily(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vc := r.URL.Query().Get("vcenter")
vc, ok := requiredVcenterParam(w, r)
if !ok {
return
}
limit := parsePositiveLimit(r, vcenterDailyDefaultLimit)
rows, err := db.ListVcenterTotalsByType(ctx, h.Database.DB(), vc, "daily", limit)
if err != nil {
http.Error(w, fmt.Sprintf("failed to list daily totals: %v", err), http.StatusInternalServerError)
return
}
h.renderVcenterTotalsPage(ctx, w, vc, "daily", rows)
}
// VcenterTotalsHourlyDetailed renders a detailed hourly page over the most recent 45 days.
// @Summary vCenter hourly totals (45 days)
// @Description Shows detailed hourly VM count/vCPU/RAM totals for the latest 45 days.
// @Tags vcenters
// @Produce text/html
// @Param vcenter query string true "vCenter URL"
// @Success 200 {string} string "HTML page"
// @Failure 400 {string} string "Missing vcenter"
// @Router /vcenters/totals/hourly [get]
func (h *Handler) VcenterTotalsHourlyDetailed(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vc, ok := requiredVcenterParam(w, r)
if !ok {
return
}
since := time.Now().AddDate(0, 0, -vcenterHourlyDetailWindowDays)
rows, err := db.ListVcenterHourlyTotalsSince(ctx, h.Database.DB(), vc, since)
if err != nil {
http.Error(w, fmt.Sprintf("failed to list hourly totals: %v", err), http.StatusInternalServerError)
return
}
h.renderVcenterTotalsPage(ctx, w, vc, "hourly45", rows)
}
func (h *Handler) vcenterTotalsLegacyMonthly(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vc, ok := requiredVcenterParam(w, r)
if !ok {
return
}
limit := parsePositiveLimit(r, vcenterMonthlyDefaultLimit)
rows, err := db.ListVcenterTotalsByType(ctx, h.Database.DB(), vc, "monthly", limit)
if err != nil {
http.Error(w, fmt.Sprintf("failed to list monthly totals: %v", err), http.StatusInternalServerError)
return
}
h.renderVcenterTotalsPage(ctx, w, vc, "monthly", rows)
}
func requiredVcenterParam(w http.ResponseWriter, r *http.Request) (string, bool) {
vc := strings.TrimSpace(r.URL.Query().Get("vcenter"))
if vc == "" {
http.Error(w, "vcenter is required", http.StatusBadRequest)
return
return "", false
}
viewType := strings.ToLower(r.URL.Query().Get("type"))
if viewType == "" {
viewType = "hourly"
return vc, true
}
func parsePositiveLimit(r *http.Request, defaultLimit int) int {
if defaultLimit <= 0 {
defaultLimit = 200
}
switch viewType {
case "hourly", "daily", "monthly":
default:
viewType = "hourly"
}
if viewType == "hourly" {
if err := db.SyncVcenterTotalsFromSnapshots(ctx, h.Database.DB()); err != nil {
h.Logger.Warn("failed to sync vcenter totals", "error", err)
if raw := strings.TrimSpace(r.URL.Query().Get("limit")); raw != "" {
if parsed, err := strconv.Atoi(raw); err == nil && parsed > 0 {
return parsed
}
}
limit := 200
if l := r.URL.Query().Get("limit"); l != "" {
if v, err := strconv.Atoi(l); err == nil && v > 0 {
limit = v
}
}
rows, err := db.ListVcenterTotalsByType(ctx, h.Database.DB(), vc, viewType, limit)
if err != nil {
http.Error(w, fmt.Sprintf("failed to list totals: %v", err), http.StatusInternalServerError)
return
}
return defaultLimit
}
func (h *Handler) renderVcenterTotalsPage(ctx context.Context, w http.ResponseWriter, vc string, viewType string, rows []db.VcenterTotalRow) {
entries := make([]views.VcenterTotalsEntry, 0, len(rows))
for _, row := range rows {
entries = append(entries, views.VcenterTotalsEntry{
@@ -105,28 +177,26 @@ func (h *Handler) VcenterTotals(w http.ResponseWriter, r *http.Request) {
func buildVcenterMeta(vcenter string, viewType string) views.VcenterTotalsMeta {
active := viewType
if active == "" {
active = "hourly"
active = "daily"
}
meta := views.VcenterTotalsMeta{
ViewType: active,
TypeLabel: "Hourly",
HourlyLink: "/vcenters/totals?vcenter=" + url.QueryEscape(vcenter) + "&type=hourly",
DailyLink: "/vcenters/totals?vcenter=" + url.QueryEscape(vcenter) + "&type=daily",
MonthlyLink: "/vcenters/totals?vcenter=" + url.QueryEscape(vcenter) + "&type=monthly",
HourlyClass: "web3-button",
DailyClass: "web3-button",
MonthlyClass: "web3-button",
ViewType: active,
TypeLabel: "Daily",
HourlyLink: "/vcenters/totals/hourly?vcenter=" + url.QueryEscape(vcenter),
DailyLink: "/vcenters/totals/daily?vcenter=" + url.QueryEscape(vcenter),
HourlyClass: "web3-button",
DailyClass: "web3-button",
}
switch active {
case "daily":
meta.TypeLabel = "Daily"
meta.DailyClass = "web3-button active"
case "hourly45", "hourly":
meta.ViewType = "hourly45"
meta.TypeLabel = fmt.Sprintf("Hourly (last %d days)", vcenterHourlyDetailWindowDays)
meta.HourlyClass = "web3-button active"
case "monthly":
meta.TypeLabel = "Monthly"
meta.MonthlyClass = "web3-button active"
default:
meta.ViewType = "hourly"
meta.HourlyClass = "web3-button active"
meta.ViewType = "daily"
meta.DailyClass = "web3-button active"
}
return meta
}
@@ -141,91 +211,54 @@ func buildVcenterChart(entries []views.VcenterTotalsEntry) views.VcenterChartDat
plot = append(plot, entries[i])
}
width := 1200.0
height := 260.0
plotWidth := width - 60.0
startX := 40.0
maxVal := float64(0)
labels := make([]string, 0, len(plot))
tickLabels := make([]string, 0, len(plot))
vmValues := make([]float64, 0, len(plot))
vcpuValues := make([]float64, 0, len(plot))
ramValues := make([]float64, 0, len(plot))
for _, e := range plot {
if float64(e.VmCount) > maxVal {
maxVal = float64(e.VmCount)
}
if float64(e.VcpuTotal) > maxVal {
maxVal = float64(e.VcpuTotal)
}
if float64(e.RamTotalGB) > maxVal {
maxVal = float64(e.RamTotalGB)
}
t := time.Unix(e.RawTime, 0).Local()
labels = append(labels, t.Format("2006-01-02 15:04:05"))
tickLabels = append(tickLabels, t.Format("01-02 15:04"))
vmValues = append(vmValues, float64(e.VmCount))
vcpuValues = append(vcpuValues, float64(e.VcpuTotal))
ramValues = append(ramValues, float64(e.RamTotalGB))
}
if maxVal == 0 {
maxVal = 1
}
stepX := plotWidth
if len(plot) > 1 {
stepX = plotWidth / float64(len(plot)-1)
}
pointsVm := ""
pointsVcpu := ""
pointsRam := ""
for i, e := range plot {
x := startX + float64(i)*stepX
yVm := 10 + (1-(float64(e.VmCount)/maxVal))*height
yVcpu := 10 + (1-(float64(e.VcpuTotal)/maxVal))*height
yRam := 10 + (1-(float64(e.RamTotalGB)/maxVal))*height
if i == 0 {
pointsVm = fmt.Sprintf("%.1f,%.1f", x, yVm)
pointsVcpu = fmt.Sprintf("%.1f,%.1f", x, yVcpu)
pointsRam = fmt.Sprintf("%.1f,%.1f", x, yRam)
} else {
pointsVm = pointsVm + " " + fmt.Sprintf("%.1f,%.1f", x, yVm)
pointsVcpu = pointsVcpu + " " + fmt.Sprintf("%.1f,%.1f", x, yVcpu)
pointsRam = pointsRam + " " + fmt.Sprintf("%.1f,%.1f", x, yRam)
}
}
gridX := []float64{}
if len(plot) > 1 {
for i := 0; i < len(plot); i++ {
gridX = append(gridX, startX+float64(i)*stepX)
}
}
gridY := []float64{}
for i := 0; i <= 4; i++ {
gridY = append(gridY, 10+float64(i)*(height/4))
}
yTicks := []views.ChartTick{}
for i := 0; i <= 4; i++ {
val := maxVal * float64(4-i) / 4
pos := 10 + float64(i)*(height/4)
yTicks = append(yTicks, views.ChartTick{Pos: pos, Label: fmt.Sprintf("%.0f", val)})
}
xTicks := []views.ChartTick{}
maxTicks := 6
stepIdx := 1
if len(plot) > 1 {
stepIdx = (len(plot)-1)/maxTicks + 1
}
for idx := 0; idx < len(plot); idx += stepIdx {
x := startX + float64(idx)*stepX
label := time.Unix(plot[idx].RawTime, 0).Local().Format("01-02 15:04")
xTicks = append(xTicks, views.ChartTick{Pos: x, Label: label})
}
if len(plot) > 1 {
lastIdx := len(plot) - 1
xLast := startX + float64(lastIdx)*stepX
labelLast := time.Unix(plot[lastIdx].RawTime, 0).Local().Format("01-02 15:04")
if len(xTicks) == 0 || xTicks[len(xTicks)-1].Pos != xLast {
xTicks = append(xTicks, views.ChartTick{Pos: xLast, Label: labelLast})
}
cfg := lineChartConfig{
Height: 360,
XTicks: 6,
YTicks: 5,
YLabel: "Totals",
XLabel: "Snapshots (oldest left, newest right)",
Labels: labels,
TickLabels: tickLabels,
Series: []lineChartSeries{
{
Name: "VMs",
Color: "#2563eb",
Values: vmValues,
TooltipFormat: "int",
LineWidth: 2.5,
},
{
Name: "vCPU",
Color: "#16a34a",
Values: vcpuValues,
TooltipFormat: "int",
LineWidth: 2.5,
},
{
Name: "RAM (GB)",
Color: "#ea580c",
Values: ramValues,
TooltipFormat: "int",
LineWidth: 2.5,
},
},
}
return views.VcenterChartData{
PointsVm: pointsVm,
PointsVcpu: pointsVcpu,
PointsRam: pointsRam,
Width: int(width),
Height: int(height),
GridX: gridX,
GridY: gridY,
YTicks: yTicks,
XTicks: xTicks,
ConfigJSON: encodeLineChartConfig(cfg),
}
}
+23 -37
View File
@@ -1,9 +1,7 @@
package handler
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"net/http"
@@ -11,17 +9,30 @@ import (
)
// VmCleanup removes a VM from inventory by ID and datacenter.
// @Summary Cleanup VM inventory entry
// @Description Removes a VM inventory entry by VM ID and datacenter name.
// @Summary Cleanup VM inventory entry (deprecated)
// @Description Deprecated: Removes a VM inventory entry by VM ID and datacenter name.
// @Description Requires Bearer authentication with the admin role.
// @Tags inventory
// @Deprecated
// @Produce json
// @Param vm_id query string true "VM ID"
// @Param datacenter_name query string true "Datacenter name"
// @Success 200 {object} map[string]string "Cleanup completed"
// @Failure 400 {object} map[string]string "Invalid request"
// @Success 200 {object} models.StatusMessageResponse "Cleanup completed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Security BearerAuth
// @Router /api/inventory/vm/delete [delete]
func (h *Handler) VmCleanup(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
if r.Method != http.MethodDelete {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
if h.denyLegacyAPI(w, "/api/inventory/vm/delete") {
return
}
ctx, cancel := withRequestTimeout(r, defaultRequestTimeout)
defer cancel()
// Get the parameters
vmId := r.URL.Query().Get("vm_id")
@@ -38,21 +49,11 @@ func (h *Handler) VmCleanup(w http.ResponseWriter, r *http.Request) {
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
h.Logger.Error("No VM found matching parameters", "vm_id", vmId, "datacenter_name", datacenterName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("No match to VM details specified. vm_id: '%s', datacenter_name: '%s'", vmId, datacenterName),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("No match to VM details specified. vm_id: '%s', datacenter_name: '%s'", vmId, datacenterName))
return
} else {
h.Logger.Error("Error checking for VM to cleanup", "error", err, "vm_id", vmId, "datacenter_name", datacenterName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Error checking for VM to cleanup. error: '%s'", err),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Error checking for VM to cleanup. error: '%s'", err))
return
}
} else {
@@ -68,33 +69,18 @@ func (h *Handler) VmCleanup(w http.ResponseWriter, r *http.Request) {
err = h.Database.Queries().InventoryCleanup(ctx, params)
if err != nil {
h.Logger.Error("Error cleaning up VM from Inventory table", "error", err, "vm_id", vmId, "datacenter_name", datacenterName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Error cleaning up VM from Inventory table. error: '%s'", err),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Error cleaning up VM from Inventory table. error: '%s'", err))
return
} else {
// Successful cleanup
h.Logger.Debug("VM successfully removed from inventory", "vm_name", vm.Name, "iid", vm.Iid, "vm_id", vmId, "datacenter_name", datacenterName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
"message": fmt.Sprintf("VM '%s' removed from Inventory table", vm.Name),
})
writeJSONOKMessage(w, fmt.Sprintf("VM '%s' removed from Inventory table", vm.Name))
return
}
}
} else {
h.Logger.Error("Parameters not correctly specified", "vm_id", vmId, "datacenter_name", datacenterName)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]string{
"status": "ERROR",
"message": fmt.Sprintf("Parameters not correctly specified. vm_id: '%s', datacenter_name: '%s'", vmId, datacenterName),
})
writeJSONError(w, http.StatusBadRequest, fmt.Sprintf("Parameters not correctly specified. vm_id: '%s', datacenter_name: '%s'", vmId, datacenterName))
return
}
}
+23 -26
View File
@@ -1,11 +1,9 @@
package handler
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"net/http"
"runtime"
"strconv"
@@ -17,16 +15,29 @@ import (
// VmCreateEvent records a VM creation CloudEvent.
// @Summary Record VM create event (deprecated)
// @Description Deprecated: Parses a VM create CloudEvent and stores the event data.
// @Description Requires Bearer authentication with the admin role.
// @Tags events
// @Deprecated
// @Accept json
// @Produce text/plain
// @Produce json
// @Param event body models.CloudEventReceived true "CloudEvent payload"
// @Success 200 {string} string "Create event processed"
// @Failure 400 {string} string "Invalid request"
// @Failure 500 {string} string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Create event processed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/event/vm/create [post]
func (h *Handler) VmCreateEvent(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
if h.denyLegacyAPI(w, "/api/event/vm/create") {
return
}
ctx, cancel := withRequestTimeout(r, defaultRequestTimeout)
defer cancel()
var (
unixTimestamp int64
//numVcpus int32
@@ -34,21 +45,10 @@ func (h *Handler) VmCreateEvent(w http.ResponseWriter, r *http.Request) {
//datacenter string
)
reqBody, err := io.ReadAll(r.Body)
if err != nil {
h.Logger.Error("Invalid data received", "error", err)
fmt.Fprintf(w, "Invalid data received")
w.WriteHeader(http.StatusInternalServerError)
return
} else {
h.Logger.Debug("received input data", "length", len(reqBody))
}
// Decode the JSON body into CloudEventReceived struct
var event models.CloudEventReceived
if err := json.Unmarshal(reqBody, &event); err != nil {
if err := decodeJSONBody(w, r, &event); err != nil {
h.Logger.Error("unable to decode json", "error", err)
http.Error(w, "Invalid JSON body", http.StatusBadRequest)
writeJSONError(w, http.StatusBadRequest, "Invalid JSON body")
return
} else {
h.Logger.Debug("successfully decoded JSON")
@@ -93,23 +93,20 @@ func (h *Handler) VmCreateEvent(w http.ResponseWriter, r *http.Request) {
h.Logger.Debug("database params", "params", params)
// Insert the new inventory record into the database
result, err := h.Database.Queries().CreateEvent(context.Background(), params)
result, err := h.Database.Queries().CreateEvent(ctx, params)
if err != nil {
h.Logger.Error("unable to perform database insert", "error", err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error : %v\n", err)
writeJSONError(w, http.StatusInternalServerError, fmt.Sprintf("Error: %v", err))
return
} else {
h.Logger.Debug("created database record", "insert_result", result)
}
//h.Logger.Debug("received create request", "body", string(reqBody))
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Create Request : %v\n", result)
writeJSONOKMessage(w, fmt.Sprintf("Create request processed: %v", result))
}
// prettyPrint comes from https://gist.github.com/sfate/9d45f6c5405dc4c9bf63bf95fe6d1a7c
func prettyPrint(args ...interface{}) {
func prettyPrint(args ...any) {
var caller string
timeNow := time.Now().Format("01-02-2006 15:04:05")
+22 -26
View File
@@ -1,11 +1,8 @@
package handler
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"vctp/db/queries"
@@ -15,36 +12,38 @@ import (
// VmDeleteEvent records a VM deletion CloudEvent in the inventory.
// @Summary Record VM delete event (deprecated)
// @Description Deprecated: Parses a VM delete CloudEvent and marks the VM as deleted in inventory.
// @Description Requires Bearer authentication with the admin role.
// @Tags events
// @Deprecated
// @Accept json
// @Produce text/plain
// @Produce json
// @Param event body models.CloudEventReceived true "CloudEvent payload"
// @Success 200 {string} string "Delete event processed"
// @Failure 400 {string} string "Invalid request"
// @Failure 500 {string} string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Delete event processed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Security BearerAuth
// @Router /api/event/vm/delete [post]
func (h *Handler) VmDeleteEvent(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeJSONError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
if h.denyLegacyAPI(w, "/api/event/vm/delete") {
return
}
ctx, cancel := withRequestTimeout(r, defaultRequestTimeout)
defer cancel()
var (
deletedTimestamp int64
)
reqBody, err := io.ReadAll(r.Body)
if err != nil {
h.Logger.Error("Invalid data received", "error", err)
fmt.Fprintf(w, "Invalid data received")
w.WriteHeader(http.StatusInternalServerError)
return
} else {
//h.Logger.Debug("received input data", "length", len(reqBody))
}
// Decode the JSON body into CloudEventReceived struct
var event models.CloudEventReceived
if err := json.Unmarshal(reqBody, &event); err != nil {
if err := decodeJSONBody(w, r, &event); err != nil {
h.Logger.Error("unable to decode json", "error", err)
prettyPrint(event)
http.Error(w, "Invalid JSON body", http.StatusBadRequest)
writeJSONError(w, http.StatusBadRequest, "Invalid JSON body")
return
} else {
h.Logger.Debug("successfully decoded deletion type cloud event", "vm_id", event.CloudEvent.Data.VM.VM.Value)
@@ -68,17 +67,14 @@ func (h *Handler) VmDeleteEvent(w http.ResponseWriter, r *http.Request) {
DatacenterName: sql.NullString{String: event.CloudEvent.Data.Datacenter.Name, Valid: event.CloudEvent.Data.Datacenter.Name != ""},
}
h.Logger.Debug("database params", "params", params)
err = h.Database.Queries().InventoryMarkDeleted(context.Background(), params)
err = h.Database.Queries().InventoryMarkDeleted(ctx, params)
if err != nil {
h.Logger.Error("Error received marking VM as deleted", "error", err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Delete Request unsuccessful %s\n", err)
writeJSONError(w, http.StatusInternalServerError, fmt.Sprintf("Delete Request unsuccessful %s", err))
} else {
h.Logger.Debug("Processed VM Deletion event successfully")
w.WriteHeader(http.StatusOK)
// TODO - return some JSON
fmt.Fprintf(w, "Processed VM Deletion event successfully")
writeJSONOKMessage(w, "Processed VM Deletion event successfully")
}
//h.Logger.Debug("received delete request", "body", string(reqBody))

Some files were not shown because too many files have changed in this diff Show More